From 54e9d51f2dc493fa5f6f59ee7dfccbcecb5e3fe9 Mon Sep 17 00:00:00 2001 From: Li Wencheng Date: Thu, 21 Mar 2024 14:44:00 +0800 Subject: [PATCH 01/99] dt-bindings: phytmac: Add bindings for Phytium MAC 1.0 and 2.0 This patch document the DT bindings for the Phytium MAC 1.0 and 2.0 controller. Signed-off-by: Li Wencheng Signed-off-by: Wang Yinfeng Signed-off-by: Wang Zhimin Link: https://github.com/deepin-community/kernel/pull/222 (cherry picked from commit 666a16e29e0106a3e1ac7934fce815c34242b696) Signed-off-by: Wentao Guan --- .../devicetree/bindings/net/phytmac.yaml | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 Documentation/devicetree/bindings/net/phytmac.yaml diff --git a/Documentation/devicetree/bindings/net/phytmac.yaml b/Documentation/devicetree/bindings/net/phytmac.yaml new file mode 100644 index 000000000000..33947bac5225 --- /dev/null +++ b/Documentation/devicetree/bindings/net/phytmac.yaml @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +* Phytium xgmac Ethernet controller + +Required properties: +- compatible: Should be "phytium,gmac-[version]" + Use "phytium,gmac-1.0" for gmac version 1.0 on Phytium SoCs + Use "phytium,gmac-2.0" for gmac version 2.0 on Phytium SoCs + +- reg: Address and length of the register set for the device +- interrupts: Should contain phytmac interrupt +- queue-number: The number of queues for the device +- phy-mode: See ethernet.txt file in the same directory +- fixed-link:See ethernet.txt file in the same directory +- dma-coherent: Boolean property, must only be present if memory + accesses performed by the device are cache coherent. + +The MAC address will be determined using the optional properties +defined in ethernet.txt. + +Examples: + + eth0@36ce0000 { + compatible = "phytium,gmac-1.0"; + reg = <0x00 0x36ce0000 0x00 0x2000>; + interrupts = <0x00 0x20 0x04 0x00 0x21 0x04 0x00 0x22 0x04 0x00 0x23 0x04>; + queue-number = <0x04>; + magic-packet; + dma-coherent; + phy-mode = "usxgmii"; + status = "okay"; + + fixed-link { + speed = <0x2710>; + full-duplex; + }; + }; From dd78c3d2526e457645080ec744e1bc82850c0401 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 30 Mar 2024 14:54:13 +0800 Subject: [PATCH 02/99] crypto: ccp: Introduce hygon specific interface to support driver hygon inclusion category: feature CVE: NA --------------------------- Hygon secure processors provide a lot of security functions, which require a lot of code to support. In order to prevent Hygon function code from invading the driver's native code, we introduce specific files for Hygon. We'll leave the native code unchanged as much as possible. In this patch, we add files as below: a. files for codes to support Hygon secure processor: drivers/crypto/ccp/hygon/sp-dev.h drivers/crypto/ccp/hygon/sp-pci.c drivers/crypto/ccp/hygon/psp-dev.c drivers/crypto/ccp/hygon/psp-dev.h b. header file to define data types and structures for HYGON Platform Security Processor: include/linux/psp-hygon.h c. header file to define userspace interface for HYGON Platform Security Processor: include/uapi/linux/psp-hygon.h We'll add more Hygon specific code in the following commits. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/341 (cherry picked from commit 51e0983a79edd201adeda6cc347edebbdda640e6) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/Makefile --- drivers/crypto/ccp/Makefile | 6 ++-- drivers/crypto/ccp/hygon/psp-dev.c | 19 ++++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 30 ++++++++++++++++++ drivers/crypto/ccp/hygon/sp-dev.h | 30 ++++++++++++++++++ drivers/crypto/ccp/hygon/sp-pci.c | 49 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/sev-dev.c | 19 ++++++++++++ include/linux/psp-hygon.h | 17 +++++++++++ include/uapi/linux/psp-hygon.h | 14 +++++++++ 8 files changed, 182 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/psp-dev.c create mode 100644 drivers/crypto/ccp/hygon/psp-dev.h create mode 100644 drivers/crypto/ccp/hygon/sp-dev.h create mode 100644 drivers/crypto/ccp/hygon/sp-pci.c create mode 100644 include/linux/psp-hygon.h create mode 100644 include/uapi/linux/psp-hygon.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index a9626b30044a..1d781183bd37 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -7,14 +7,16 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-dev-v5.o \ ccp-dmaengine.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o -ccp-$(CONFIG_PCI) += sp-pci.o +ccp-$(CONFIG_PCI) += sp-pci.o \ + hygon/sp-pci.o ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ dbc.o \ hsti.o \ - sfs.o + sfs.o \ + hygon/psp-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c new file mode 100644 index 000000000000..736f9aaaa37a --- /dev/null +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include "psp-dev.h" + +/* Function and variable pointers for hooks */ +struct hygon_psp_hooks_table hygon_psp_hooks; diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h new file mode 100644 index 000000000000..ebeade987053 --- /dev/null +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_PSP_DEV_H__ +#define __CCP_HYGON_PSP_DEV_H__ + +#include + +#include "sp-dev.h" + +#include "../psp-dev.h" +#include "../sev-dev.h" + +/* + * Hooks table: a table of function and variable pointers filled in + * when psp init. + */ +extern struct hygon_psp_hooks_table { + bool sev_dev_hooks_installed; + struct mutex *sev_cmd_mutex; + int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); +} hygon_psp_hooks; + +#endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-dev.h b/drivers/crypto/ccp/hygon/sp-dev.h new file mode 100644 index 000000000000..e1996fc3b7c6 --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-dev.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Secure Processor interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_SP_DEV_H__ +#define __CCP_HYGON_SP_DEV_H__ + +#include +#include + +#include "../ccp-dev.h" +#include "../sp-dev.h" + +#ifdef CONFIG_X86_64 +static inline bool is_vendor_hygon(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON; +} +#else +static inline bool is_vendor_hygon(void) { return false; } +#endif + +extern const struct sp_dev_vdata hygon_dev_vdata[]; + +#endif /* __CCP_HYGON_SP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c new file mode 100644 index 000000000000..78e2dab292ec --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "sp-dev.h" + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +static const struct sev_vdata csvv1 = { + .cmdresp_reg = 0x10580, /* C2PMSG_32 */ + .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */ + .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */ +}; + +static const struct psp_vdata pspv1 = { + .sev = &csvv1, + .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */ + .feature_reg = 0x105fc, /* C2PMSG_63 */ + .inten_reg = 0x10610, /* P2CMSG_INTEN */ + .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +}; + +#endif + +const struct sp_dev_vdata hygon_dev_vdata[] = { + { /* 0 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv1, +#endif + }, + { /* 1 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5b, +#endif + }, +}; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 0d13d47c164b..d1a4806826d9 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -39,6 +39,8 @@ #include "psp-dev.h" #include "sev-dev.h" +#include "hygon/psp-dev.h" + #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" #define SEV_FW_NAME_SIZE 64 @@ -2662,12 +2664,29 @@ static int sev_misc_init(struct sev_device *sev) return 0; } +/* Code to set all of the function and variable pointers */ +static void sev_dev_install_hooks(void) +{ + hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; + hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + + hygon_psp_hooks.sev_dev_hooks_installed = true; +} + int sev_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct sev_device *sev; int ret = -ENOMEM; + /* + * Install sev-dev related function and variable pointers hooks only + * for Hygon vendor, install these hooks here, even though the + * following initialization fails. + */ + if (is_vendor_hygon()) + sev_dev_install_hooks(); + if (!boot_cpu_has(X86_FEATURE_SEV)) { dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); return 0; diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h new file mode 100644 index 000000000000..944db2e2ecc0 --- /dev/null +++ b/include/linux/psp-hygon.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __PSP_HYGON_H__ +#define __PSP_HYGON_H__ + +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +#else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +#endif /* CONFIG_CRYPTO_DEV_SP_PSP */ + +#endif /* __PSP_HYGON_H__ */ diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h new file mode 100644 index 000000000000..e1ac9c04dc55 --- /dev/null +++ b/include/uapi/linux/psp-hygon.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Userspace interface for HYGON Platform Security Processor (PSP) + * commands. + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __PSP_HYGON_USER_H__ +#define __PSP_HYGON_USER_H__ + +#endif /* __PSP_HYGON_USER_H__ */ From a1c0663e07447f4a660cf904012163c33e6b02b0 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 8 Mar 2024 20:17:47 +0800 Subject: [PATCH 03/99] crypto: ccp: Fixup the capability of Hygon PSP during initialization hygon inclusion category: feature CVE: NA --------------------------- The meaning of the data read from feature register of Hygon PSP is not exactly the same as AMD ASP. The bit 1 in feature register is used to indicates TEE in AMD ASP, but not in Hygon PSP, which will cause host to crash during module initialization, as shown below. [ 27.898723] BUG: kernel NULL pointer dereference, address: 0000000000000014 [ 27.906503] #PF: supervisor read access in kernel mode [ 27.912242] #PF: error_code(0x0000) - not-present page [ 27.917981] PGD 0 P4D 0 [ 27.920810] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 27.925676] CPU: 67 PID: 1668 Comm: systemd-udevd Not tainted 6.6.7-for-gerrit #3 [ 27.934033] Hardware name: HYGON Hygon65N32/65N32, BIOS A0173036 02/01/2023 [ 27.941807] RIP: 0010:psp_firmware_is_visible+0x3c/0x70 [ccp] [ 27.948240] Code: 00 00 48 85 c0 74 12 48 81 fe e0 54 53 c1 74 2f 48 81 fe c0 54 53 c1 74 03 31 c0 c3 f6 40 70 02 74 f7 48 8b 50 10 48 8b 52 08 <8b> 52 14 85 d2 74 e8 48 03 50 38 48 89 d7 e8 51 71 0a d7 eb 14 48 [ 27.969204] RSP: 0018:ffffc9000b80fa70 EFLAGS: 00010202 [ 27.975039] RAX: ffff888113c2d9a8 RBX: ffffffffc1535460 RCX: 0000000000000124 [ 27.983008] RDX: 0000000000000000 RSI: ffffffffc15354c0 RDI: ffff8888830dc0c0 [ 27.993320] RBP: ffff888883060980 R08: 0000000000000001 R09: 00000006c8df7639 [ 28.005756] R10: ffff888100258278 R11: 0000000000000100 R12: ffff8888830dc0c0 [ 28.019695] R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc1535490 [ 28.032285] FS: 00007f7c9ba2b880(0000) GS:ffff88885fcc0000(0000) knlGS:0000000000000000 [ 28.044626] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 28.054928] CR2: 0000000000000014 CR3: 0000800106e50000 CR4: 00000000003506e0 [ 28.065028] Call Trace: [ 28.067751] [ 28.070095] ? __die_body+0x1f/0x60 [ 28.073995] ? page_fault_oops+0x15d/0x460 [ 28.078573] ? exc_page_fault+0x78/0x170 [ 28.082956] ? asm_exc_page_fault+0x26/0x30 [ 28.087632] ? psp_firmware_is_visible+0x3c/0x70 [ccp] [ 28.093384] internal_create_group+0xde/0x3a0 [ 28.093392] internal_create_groups.part.0+0x3d/0xa0 [ 28.093396] really_probe+0x197/0x3c0 [ 28.093402] ? __device_attach_driver+0x100/0x100 [[ 0 ;2382.m0 9 3O4K0 5 ] __driver_probe_device+0x78/0x160 [ 28.093409] driver_probe_device+0x1e/0xa0 [ 28.126379] __driver_attach+0xaa/0x160 [ 28.130667] ? __device_attach_driver+0x100/0x100 [ 28.135921] bus_for_each_dev+0x75/0xc0 [ 28.142419] bus_add_driver+0x112/0x210 [ 28.149240] driver_register+0x5c/0x110 [ 28.154875] ? 0xffffffffc14a4000 [ 28.160197] sp_mod_init+0x10/0x1000 [ccp] [ 28.166164] do_one_initcall+0x45/0x210 [ 28.170453] ? kmalloc_trace+0x29/0x90 [ 28.174642] do_init_module+0x64/0x240 [ 28.178831] load_module+0x1d84/0x2010 [ 28.183024] ? init_module_from_file+0x8b/0xd0 [ 28.187986] init_module_from_file+0x8b/0xd0 [ 28.192763] do_syscall_64+0x39/0x80 [ 28.206672] entry_SYSCALL_64_after_hwframe+0x63/0xcd [ 28.212318] RIP: 0033:0x7f7c9b91ea3d [ 28.216312] Code: 5b 41 5c c3 66 0f 1f 84 00 00 00 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d c3 a3 0f 00 f7 d8 64 89 01 48 [ 28.237272] RSP: 002b:00007ffe6cee5368 EFLAGS: 00000246 ORIG_RAX: 0000000000000139 [ 28.245725] RAX: ffffffffffffffda RBX: 000055700e302260 RCX: 00007f7c9b91ea3d [ 28.253691] RDX: 0000000000000000 RSI: 00007f7c9ba5cded RDI: 0000000000000006 [ 28.261658] RBP: 0000000000020000 R08: 0000000000000000 R09: 000055700e4d3188 [ 28.269624] R10: 0000000000000006 R11: 0000000000000246 R12: 00007f7c9ba5cded [ 28.277590] R13: 0000000000000000 R14: 000055700e4cb7b0 R15: 000055700e302260 [ 28.285552] [ 28.287995] Modules linked in: k10temp ccp(+) drm_kms_helper ipmi_si(+) ipmi_devintf ipmi_msghandler mac_hid sch_fq_codel parport_pc ppdev lp parport ramoops drm reed_solomon efi_pstore ip_tables x_tables autofs4 btrfs blake2b_generic raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx xor raid6_pq libcrc32c raid1 raid0 multipath linear igb i2c_algo_bit dca ptp crc32_pclmul pps_core ahci libahci i2c_piix4 hid_generic usbhid hid [ 28.288027] CR2: 0000000000000014 [ 28.288031] ---[ end trace 0000000000000000 ]--- [ 28.533899] ipmi_si IPI0001:00: IPMI message handler: Found new BMC (man_id: 0x00d455, prod_id: 0x0202, dev_id: 0x20) [ 28.604507] RIP: 0010:psp_firmware_is_visible+0x3c/0x70 [ccp] [ 28.604527] Code: 00 00 48 85 c0 74 12 48 81 fe e0 54 53 c1 74 2f 48 81 fe c0 54 53 c1 74 03 31 c0 c3 f6 40 70 02 74 f7 48 8b 50 10 48 8b 52 08 <8b> 52 14 85 d2 74 e8 48 03 50 38 48 89 d7 e8 51 71 0a d7 eb 14 48 [ 28.604530] RSP: 0018:ffffc9000b80fa70 EFLAGS: 00010202 [ 28.604533] RAX: ffff888113c2d9a8 RBX: ffffffffc1535460 RCX: 0000000000000124 [ 28.604535] RDX: 0000000000000000 RSI: ffffffffc15354c0 RDI: ffff8888830dc0c0 [ 28.604536] RBP: ffff888883060980 R08: 0000000000000001 R09: 00000006c8df7639 [ 28.604537] R10: ffff888100258278 R11: 0000000000000100 R12: ffff8888830dc0c0 [ 28.604539] R13: 0000000000000001 R14: 0000000000000000 R15: ffffffffc1535490 [ 28.604540] FS: 00007f7c9ba2b880(0000) GS:ffff88885fcc0000(0000) knlGS:0000000000000000 [ 28.604542] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 28.604543] CR2: 0000000000000014 CR3: 0000800106e50000 CR4: 00000000003506e0 Also, the meaning of bit 7 in the feature register of Hygon PSP is not the same as AMD ASP. The Hygon PSP works only when CSV is configured in feature register. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/341 (cherry picked from commit 86af24a3a7f0a215e9a8c206a42ab9d3c241d93c) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/psp-dev.c --- drivers/crypto/ccp/hygon/psp-dev.c | 11 +++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 2 ++ drivers/crypto/ccp/psp-dev.c | 13 +++++++++++++ 3 files changed, 26 insertions(+) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 736f9aaaa37a..dd5285e1ba37 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -17,3 +17,14 @@ /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; + +int fixup_hygon_psp_caps(struct psp_device *psp) +{ + /* the hygon psp is unavailable if bit0 is cleared in feature reg */ + if (!(psp->capability & PSP_CAPABILITY_SEV)) + return -ENODEV; + + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); + return 0; +} diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index ebeade987053..e187d3f24bdf 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -27,4 +27,6 @@ extern struct hygon_psp_hooks_table { int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); } hygon_psp_hooks; +int fixup_hygon_psp_caps(struct psp_device *psp); + #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 9e21da0e298a..4ed402370712 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -22,6 +22,8 @@ #include "dbc.h" #include "hsti.h" +#include "hygon/psp-dev.h" + struct psp_device *psp_master; #define PSP_C2PMSG_17_CMDRESP_CMD GENMASK(19, 16) @@ -158,6 +160,17 @@ static unsigned int psp_get_capability(struct psp_device *psp) } psp->capability.raw = val; + /* + * Fix capability of Hygon psp, the meaning of Hygon psp feature + * register is not exactly the same as AMD. + * Return -ENODEV directly if hygon psp not configured with CSV + * capability. + */ + if (is_vendor_hygon()) { + if (fixup_hygon_psp_caps(psp)) + return -ENODEV; + } + return 0; } From e080f67163580b458d28c97ace59fda036e365c3 Mon Sep 17 00:00:00 2001 From: Hao Feng Date: Thu, 25 Mar 2021 13:36:31 +0800 Subject: [PATCH 04/99] crypto: ccp: Add support to detect CCP devices on Hygon 2nd and 3rd CPUs hygon inclusion category: feature CVE: NA --------------------------- The are Secure Processor devices with 2 different PCI device IDs on Hygon 2nd and 3rd CPUs, add them in the device list. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/341 (cherry picked from commit 06b1147176d0c22450928395a4a82aed61f746b4) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sp-pci.c --- drivers/crypto/ccp/sp-pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index e7bb803912a6..bede5ac090bc 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -26,6 +26,8 @@ #include "psp-dev.h" #include "hsti.h" +#include "hygon/sp-dev.h" + /* used for version string AA.BB.CC.DD */ #define AA GENMASK(31, 24) #define BB GENMASK(23, 16) @@ -539,6 +541,8 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&hygon_dev_vdata[0] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&hygon_dev_vdata[1] }, /* Last entry must be zero */ { 0, } }; From 16a57dec746cb34bfa0e9510a960c96c91efcc6e Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Thu, 10 Aug 2023 13:49:52 +0800 Subject: [PATCH 05/99] crypto: ccp: Add support to detect CCP devices on Hygon 4th CPUs hygon inclusion category: feature CVE: NA --------------------------- Since Hygon 4th CPUs, there are new Secure Processor devices with 3 different PCI device IDs, add them in the device list. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/341 (cherry picked from commit 4de2441151d362400100cbfc18e2d5fe6ff312fa) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/sp-pci.c | 16 ++++++++++++++++ drivers/crypto/ccp/sp-pci.c | 3 +++ 2 files changed, 19 insertions(+) diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c index 78e2dab292ec..ba3b2448d0b6 100644 --- a/drivers/crypto/ccp/hygon/sp-pci.c +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -28,6 +28,13 @@ static const struct psp_vdata pspv1 = { .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ }; +static const struct psp_vdata pspv2 = { + .sev = &csvv1, + .feature_reg = 0x105fc, + .inten_reg = 0x10670, + .intsts_reg = 0x10674, +}; + #endif const struct sp_dev_vdata hygon_dev_vdata[] = { @@ -44,6 +51,15 @@ const struct sp_dev_vdata hygon_dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv5b, +#endif + }, + { /* 2 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &pspv2, #endif }, }; diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index bede5ac090bc..a443d091caba 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -543,6 +543,9 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x17D8), (kernel_ulong_t)&dev_vdata[8] }, { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&hygon_dev_vdata[0] }, { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&hygon_dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&hygon_dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&hygon_dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&hygon_dev_vdata[2] }, /* Last entry must be zero */ { 0, } }; From 5d0cdb89f3007f9dc31e7d3cc2daba89cb63448d Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Thu, 22 Sep 2022 10:59:03 +0800 Subject: [PATCH 06/99] crypto: ccp: Implement CSV_HGSC_CERT_IMPORT ioctl command hygon inclusion category: feature CVE: NA --------------------------- The CSV_HGSC_CERT_IMPORT command can be used to import hygon general secure cert to the Secure Proccessor, to enable Hygon Secure Functions, such as CSV, TPM, TPCM, TDM. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/342 (cherry picked from commit 0a38ba28b230b918000624ab114e61fe7616652a) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/Makefile --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/hygon/csv-dev.c | 122 +++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 19 +++++ drivers/crypto/ccp/hygon/psp-dev.h | 1 + drivers/crypto/ccp/sev-dev.c | 23 +++++- include/linux/psp-hygon.h | 30 +++++++ include/uapi/linux/psp-hygon.h | 30 +++++++ 7 files changed, 225 insertions(+), 3 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/csv-dev.c create mode 100644 drivers/crypto/ccp/hygon/csv-dev.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 1d781183bd37..c6184de3f46b 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -16,7 +16,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ dbc.o \ hsti.o \ sfs.o \ - hygon/psp-dev.o + hygon/psp-dev.o \ + hygon/csv-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c new file mode 100644 index 000000000000..6f238aaeb434 --- /dev/null +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include + +#include "psp-dev.h" + +int csv_cmd_buffer_len(int cmd) +{ + switch (cmd) { + case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + default: return 0; + } +} + +static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +{ + struct csv_user_data_hgsc_cert_import input; + struct csv_data_hgsc_cert_import *data; + void *hgscsk_blob, *hgsc_blob; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy HGSCSK certificate blobs from userspace */ + hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); + if (IS_ERR(hgscsk_blob)) { + ret = PTR_ERR(hgscsk_blob); + goto e_free; + } + + data->hgscsk_cert_address = __psp_pa(hgscsk_blob); + data->hgscsk_cert_len = input.hgscsk_cert_len; + + /* copy HGSC certificate blobs from userspace */ + hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); + if (IS_ERR(hgsc_blob)) { + ret = PTR_ERR(hgsc_blob); + goto e_free_hgscsk; + } + + data->hgsc_cert_address = __psp_pa(hgsc_blob); + data->hgsc_cert_len = input.hgsc_cert_len; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, + data, &argp->error); + + kfree(hgsc_blob); +e_free_hgscsk: + kfree(hgscsk_blob); +e_free: + kfree(data); + return ret; +} + +static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sev_issue_cmd input; + int ret = -EFAULT; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; + + if (ioctl != SEV_ISSUE_CMD) + return -EINVAL; + + if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) + return -EFAULT; + + if (input.cmd > CSV_MAX) + return -EINVAL; + + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + + switch (input.cmd) { + case CSV_HGSC_CERT_IMPORT: + ret = csv_ioctl_do_hgsc_import(&input); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + return hygon_psp_hooks.sev_ioctl(file, ioctl, arg); + } + + if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) + ret = -EFAULT; + + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return ret; +} + +const struct file_operations csv_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_ioctl, +}; diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h new file mode 100644 index 000000000000..43ca224be610 --- /dev/null +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON CSV driver interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CCP_HYGON_CSV_DEV_H__ +#define __CCP_HYGON_CSV_DEV_H__ + +#include + +extern const struct file_operations csv_fops; + +int csv_cmd_buffer_len(int cmd); + +#endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index e187d3f24bdf..b984237b4795 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -25,6 +25,7 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); + long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; int fixup_hygon_psp_caps(struct psp_device *psp); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index d1a4806826d9..6d603f52162a 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -40,6 +40,7 @@ #include "sev-dev.h" #include "hygon/psp-dev.h" +#include "hygon/csv-dev.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" @@ -203,6 +204,18 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, static int sev_cmd_buffer_len(int cmd) { + /* + * The Hygon CSV command may conflict with AMD SEV command, so it's + * preferred to check whether it's a CSV-specific command for Hygon + * psp. + */ + if (is_vendor_hygon()) { + int r = csv_cmd_buffer_len(cmd); + + if (r) + return r; + } + switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); @@ -2646,7 +2659,11 @@ static int sev_misc_init(struct sev_device *sev) misc = &misc_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = DEVICE_NAME; - misc->fops = &sev_fops; + + if (is_vendor_hygon()) + misc->fops = &csv_fops; + else + misc->fops = &sev_fops; ret = misc_register(misc); if (ret) @@ -2669,6 +2686,7 @@ static void sev_dev_install_hooks(void) { hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + hygon_psp_hooks.sev_ioctl = sev_ioctl; hygon_psp_hooks.sev_dev_hooks_installed = true; } @@ -2833,7 +2851,8 @@ static int snp_shutdown_on_panic(struct notifier_block *nb, int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { - if (!filep || filep->f_op != &sev_fops) + if (!filep || filep->f_op != (is_vendor_hygon() + ? &csv_fops : &sev_fops)) return -EBADF; return sev_do_cmd(cmd, data, error); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 944db2e2ecc0..5c7abb06740a 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -10,6 +10,36 @@ #ifndef __PSP_HYGON_H__ #define __PSP_HYGON_H__ +#include + +/*****************************************************************************/ +/***************************** CSV interface *********************************/ +/*****************************************************************************/ + +/** + * Guest/platform management commands for CSV + */ +enum csv_cmd { + CSV_CMD_HGSC_CERT_IMPORT = 0x300, + CSV_CMD_MAX, +}; + +/** + * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: len of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: len of HGSC certificate + */ +struct csv_data_hgsc_cert_import { + u64 hgscsk_cert_address; /* In */ + u32 hgscsk_cert_len; /* In */ + u32 reserved; /* In */ + u64 hgsc_cert_address; /* In */ + u32 hgsc_cert_len; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h index e1ac9c04dc55..004ad4c50b70 100644 --- a/include/uapi/linux/psp-hygon.h +++ b/include/uapi/linux/psp-hygon.h @@ -11,4 +11,34 @@ #ifndef __PSP_HYGON_USER_H__ #define __PSP_HYGON_USER_H__ +#include + +/*****************************************************************************/ +/***************************** CSV interface *********************************/ +/*****************************************************************************/ + +/** + * CSV guest/platform commands + */ +enum { + CSV_HGSC_CERT_IMPORT = 201, + + CSV_MAX, +}; + +/** + * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: length of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: length of HGSC certificate + */ +struct csv_user_data_hgsc_cert_import { + __u64 hgscsk_cert_address; /* In */ + __u32 hgscsk_cert_len; /* In */ + __u64 hgsc_cert_address; /* In */ + __u32 hgsc_cert_len; /* In */ +} __packed; + #endif /* __PSP_HYGON_USER_H__ */ From 36d7f9418afcd39837204c27836a8692cfe45e93 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:36:36 +0800 Subject: [PATCH 07/99] Documentation/arch/x86: Add HYGON secure virtualization description hygon inclusion category: feature CVE: NA --------------------------- Add the HYGON secure virtualization document describing the secure virtualization features. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit e9aaad45f220b897b2395faa901ada7c29ad2378) Signed-off-by: Wentao Guan --- .../arch/x86/hygon-secure-virtualization.rst | 100 ++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 Documentation/arch/x86/hygon-secure-virtualization.rst diff --git a/Documentation/arch/x86/hygon-secure-virtualization.rst b/Documentation/arch/x86/hygon-secure-virtualization.rst new file mode 100644 index 000000000000..3e709af93758 --- /dev/null +++ b/Documentation/arch/x86/hygon-secure-virtualization.rst @@ -0,0 +1,100 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +HYGON Secure Virtualization +=========================== + +China Secure Virtualization (CSV) is a key virtualization feature on Hygon +processors. + +The 1st generation of CSV (CSV for short) is a secure virtualization technology +to provide memory encryption for the virtual machine (VM), each VM's memory is +encrypted by its unique encryption key which is managed by secure processor. + +The 2nd generation of CSV (CSV2 for short) provides security enhancement to CSV +by encrypting not only the VM's memory but also the vCPU's registers of the VM. + +The 3rd generation of CSV (CSV3 for short) is a more advanced secure +virtualization technology, it integrates secure processor, memory encryption and +memory isolation to provide the ability to protect guest's private data. The CSV3 +guest's context like CPU registers, control block and nested page table is accessed +only by the guest itself and the secure processor. Neither other guests nor the +host can tamper with the guest's context. + +The secure processor is a separate processor inside Hygon hardware. The firmware +running inside the secure processor performs activities in a secure way, such as +OVMF encryption, VM launch, secure memory management and nested page table +management etc. For more information, please see CSV spec and CSV3 spec from Hygon. + +A CSV guest is running in the memory that is encrypted with a dedicated encrypt +key which is set by the secure processor. And CSV guest's memory encrypt key is +unique from the others. A low latency crypto engine resides on Hygon hardware +to minimize the negative effect on memory bandwidth. In CSV guest, a guest private +page will be automatically decrypted when read from memory and encrypted when +written to memory. + +CSV3 provides an enhancement technology named memory isolation to improve the +security. A dedicated memory isolation hardware is built in Hygon hardware. Only +the secure processor has privilege to configure the isolation hardware. The VMM +allocates CMA memory and transfers them to secure processor. The secure processor +maps the memory to secure nested page table and manages them as guest's private +memory. Any memory access (read or write) to CSV3 guest's private memory outside +the guest will be blocked by isolation hardware. + +A CSV3 guest may declare some memory regions as shared to share data with the +host. When a page is set as shared, read/write on the page will bypass the +isolation hardware and the guest's shared memory can be accessed by the host. A +method named CSV3 secure call command is designed and CSV3 guest sends the secure +call command to the secure processor to change private memory to shared memory. +In the method, 2 dedicated pages are reserved at early stage of the guest. Any +read/write on the dedicated pages will trigger nested page fault. When NPF +happens, the host helps to issue an external command to the secure processor but +cannot tamper with the data in the guest's private memory. Then the secure +processor checks the fault address and handles the command if the address is +exactly the dedicated pages. + +Support for CSV can be determined through the CPUID instruction. The CPUID +function 0x8000001f reports information to CSV:: + + 0x8000001f[eax]: + Bit[1] indicates support for CSV + Bit[3] indicates support for CSV2 + Bit[30] indicates support for CSV3 + +If CSV is support, MSR 0xc0010131 can be used to determine if CSV is active:: + + 0xc0010131: + Bit[0] 0 = CSV is not active + 1 = CSV is active + Bit[1] 0 = CSV2 is not active + 1 = CSV2 is active + Bit[30] 0 = CSV3 is not active + 1 = CSV3 is active + +All CSV/CSV2's configurations must be enabled in CSV3. Linux can activate CSV3 by +default (CONFIG_HYGON_CSV=y, CONFIG_CMA=y). CSV3 guest's memory is managed by +CMA (Contiguous Memory Allocation). User must specify CSV3 total secure memory on +the linux kernel command line with csv_mem_size or csv_mem_percentage:: + + csv_mem_size=nn[MG] + [KNL,CSV] + Reserve specified CSV3 memory size in CMA. CSV3's memory will be + allocated from these CMAs. + For instance, csv_mem_size=40G, 40G memory is reserved for CSV3. + + csv_mem_percentage=nn + [KNL,CSV] + Reserve specified memory size which is prorated according to the + whole system memory size. CSV3 guest's memory will be allocated + from these CMAs. + For instance, csv_mem_percentage=60, means 60% system memory is + reserved for CSV3. + The maximum percentage is 80. And the default percentage is 0. + +Limitations +The reserved CSV3 memory within CMA cannot be used by kernel or any application that +may pin memory using long term gup during the application's life time. +For instance, if the whole system memory is 64G and 32G is reserved for CSV3 with +kernel command line csv_mem_percentage=50, only 32G memory is available for CSV/CSV2. +As a result, user will fail to run a CSV/CSV2 guest with memory size which exceeds +32G. From 25eb6cca2b658e391c9966ed14e9b5fb3ec335e6 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 14 Jul 2023 17:17:58 +0800 Subject: [PATCH 08/99] x86/mm: Provide a Kconfig entry to build the HYGON memory encryption support into the kernel hygon inclusion category: feature CVE: NA --------------------------- Provide CONFIG_HYGON_CSV to the arch/x86/Kconfig, and build HYGON's specific memory encryption support into the kernel when CONFIG_HYGON_CSV=y. Besides, add arch/x86/include/asm/processor-hygon.h to contains helpers to determine the Hygon CPUs so that we can call functions specific to CSV in the native code and reduce code intruision. Signed-off-by: hanliyang (cherry picked from commit 9a215e273df1f905fc1ae8666dce1e036ccb2e58) Signed-off-by: Wentao Guan --- arch/x86/Kconfig | 23 +++++++++++++++++++++++ arch/x86/include/asm/processor-hygon.h | 23 +++++++++++++++++++++++ arch/x86/mm/Makefile | 2 ++ arch/x86/mm/mem_encrypt_hygon.c | 16 ++++++++++++++++ 4 files changed, 64 insertions(+) create mode 100644 arch/x86/include/asm/processor-hygon.h create mode 100644 arch/x86/mm/mem_encrypt_hygon.c diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fa3b616af03a..54bca1f6c3d1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1975,6 +1975,29 @@ config EFI_RUNTIME_MAP See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. +config HYGON_CSV + bool "Hygon secure virtualization CSV support" + default y + depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT + help + Hygon CSV integrates secure processor, memory encryption and + memory isolation to provide the ability to protect guest's private + data. It has evolved from CSV, CSV2 to CSV3. + + For CSV, the guest's memory is encrypted. + + For CSV2, not only the guest's memory, but also the guest's vCPU + registers are encrypted, neither other guests nor the host can tamper + with the vCPU registers. + + For CSV3, the guest's context like vCPU registers, control block and + nested page table is accessed only by the guest itself and the secure + processor. Neither other guests nor the host can tamper with the + guest's context. + + Say Y here to enable support for the whole capbilities of Hygon secure + virtualization on hygon processor. + source "kernel/Kconfig.hz" config ARCH_SUPPORTS_KEXEC diff --git a/arch/x86/include/asm/processor-hygon.h b/arch/x86/include/asm/processor-hygon.h new file mode 100644 index 000000000000..a19bda3ed005 --- /dev/null +++ b/arch/x86/include/asm/processor-hygon.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * The helpers to support Hygon CPU specific code path. + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef _ASM_X86_PROCESSOR_HYGON_H +#define _ASM_X86_PROCESSOR_HYGON_H + +#include + +/* + * helper to determine HYGON CPU + */ +static inline bool is_x86_vendor_hygon(void) +{ + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON; +} + +#endif /* _ASM_X86_PROCESSOR_HYGON_H */ diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 5b9908f13dcf..c0ebdcc1e25b 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -57,3 +57,5 @@ obj-$(CONFIG_X86_MEM_ENCRYPT) += mem_encrypt.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o + +obj-$(CONFIG_HYGON_CSV) += mem_encrypt_hygon.o diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c new file mode 100644 index 000000000000..45919ecad02a --- /dev/null +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Memory Encryption Support + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define DISABLE_BRANCH_PROFILING + +#include From 82cfaa99ab01e8faf2396c73344eed1a8db74bc5 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 5 Aug 2024 17:05:28 +0800 Subject: [PATCH 09/99] crypto: ccp: Fix compile error on csv_cmd_buffer_len() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hygon inclusion category: bugfix CVE: NA --------------------------- The error messages is shown as following: drivers/crypto/ccp/hygon/csv-dev.c:21:5: error: no previous prototype for ‘csv_cmd_buffer_len’ [-Werror=missing-prototypes] 21 | int csv_cmd_buffer_len(int cmd) | ^~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors Reported-by: WangYuli Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/351 (cherry picked from commit 47ea01f1301152f3e06a98126235351280165fd4) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 6f238aaeb434..dc3a6dfc86b5 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -15,6 +15,7 @@ #include #include +#include "csv-dev.h" #include "psp-dev.h" int csv_cmd_buffer_len(int cmd) From df30541ba5a224add0a44ef7d0d9cc85a632ff71 Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Mon, 22 Dec 2025 14:28:58 +0800 Subject: [PATCH 10/99] crypto: ccp: Adapt for kernel >=6.11 The following commit remove some macro, fix it. commit 8609dd25f9b271b3338e38b018fd39e49de1e6ca Author: Mario Limonciello Date: Tue May 28 16:07:08 2024 -0500 crypto: ccp - Represent capabilities register as a union Making the capabilities register a union makes it easier to refer to the members instead of always doing bit shifts. No intended functional changes. Acked-by: Tom Lendacky Suggested-by: Yazen Ghannam Signed-off-by: Mario Limonciello Signed-off-by: Herbert Xu Link: https://github.com/deepin-community/kernel/pull/350 Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/psp-dev.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index dd5285e1ba37..8f83e68257bf 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -21,10 +21,10 @@ struct hygon_psp_hooks_table hygon_psp_hooks; int fixup_hygon_psp_caps(struct psp_device *psp) { /* the hygon psp is unavailable if bit0 is cleared in feature reg */ - if (!(psp->capability & PSP_CAPABILITY_SEV)) + if (!psp->capability.sev) return -ENODEV; - psp->capability &= ~(PSP_CAPABILITY_TEE | - PSP_CAPABILITY_PSP_SECURITY_REPORTING); + psp->capability.tee = 0; + psp->capability.security_reporting = 0; return 0; } From d390ec7a734381703e272916340d7d2e56e6c7ed Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 14 Jul 2023 17:17:58 +0800 Subject: [PATCH 11/99] x86/mm: Print CSV info into the kernel log hygon inclusion category: feature CVE: NA --------------------------- Add CSV and CSV2 to the list of memory encryption features. Also print CPU vendor while printing CSV infos. Signed-off-by: hanliyang Link: Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 3a15ccabaa574963ab0ab4cb9d7fcd370daaa37c) Signed-off-by: Wentao Guan Conflicts: arch/x86/include/asm/mem_encrypt.h arch/x86/mm/mem_encrypt.c --- arch/x86/include/asm/mem_encrypt.h | 6 ++++++ arch/x86/mm/mem_encrypt.c | 8 ++++++++ arch/x86/mm/mem_encrypt_hygon.c | 23 +++++++++++++++++++++++ 3 files changed, 37 insertions(+) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index ea6494628cb0..957e92a9c656 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -114,6 +114,12 @@ void add_encrypt_protection_map(void); extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[]; +#ifdef CONFIG_HYGON_CSV +extern void print_hygon_cc_feature_info(void); +#else /* !CONFIG_HYGON_CSV */ +static inline void print_hygon_cc_feature_info(void) { } +#endif /* CONFIG_HYGON_CSV */ + #endif /* __ASSEMBLER__ */ #endif /* __X86_MEM_ENCRYPT_H__ */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 95bae74fdab2..82f22a37537b 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -16,6 +16,8 @@ #include +#include + /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) { @@ -46,6 +48,11 @@ static void print_mem_encrypt_feature_info(void) { pr_info("Memory Encryption Features active: "); + if (is_x86_vendor_hygon()) { + print_hygon_cc_feature_info(); + return; + } + switch (cc_vendor) { case CC_VENDOR_INTEL: pr_cont("Intel TDX\n"); @@ -55,6 +62,7 @@ static void print_mem_encrypt_feature_info(void) /* Secure Memory Encryption */ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* * SME is mutually exclusive with any of the SEV * features below. diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index 45919ecad02a..4c8a7f24aa41 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -14,3 +14,26 @@ #define DISABLE_BRANCH_PROFILING #include +#include +#include + +void print_hygon_cc_feature_info(void) +{ + /* Secure Memory Encryption */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* + * HYGON SME is mutually exclusive with any of the + * HYGON CSV features below. + */ + pr_info(" HYGON SME"); + return; + } + + /* Secure Encrypted Virtualization */ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + pr_info(" HYGON CSV"); + + /* Encrypted Register State */ + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + pr_info(" HYGON CSV2"); +} From a65c3b9765c11f2345491b18b6f6feecc13b5d4b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 17 Jul 2023 18:44:56 +0800 Subject: [PATCH 12/99] crypto: ccp: Print Hygon CSV API version when CSV support is detected hygon inclusion category: feature CVE: NA --------------------------- The Cryptographic Co-Processor module will print 'SEV API' instead of 'CSV API' on Hygon CPU if CSV is supported. Fix this confused message here. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 9d1c6b9ee496fcbcaec8419e6ca3ad07767c49de) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 20 ++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 2 ++ drivers/crypto/ccp/sev-dev.c | 15 +++++++++++++-- 3 files changed, 35 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index dc3a6dfc86b5..24f8d30b57b7 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -18,6 +18,26 @@ #include "csv-dev.h" #include "psp-dev.h" +/* + * Hygon CSV build info: + * Hygon CSV build info is 32-bit in length other than 8-bit as that + * in AMD SEV. + */ +u32 hygon_csv_build; + +/* + * csv_update_api_version used to update the api version of HYGON CSV + * firmwareat driver side. + * Currently, we only need to update @hygon_csv_build. + */ +void csv_update_api_version(struct sev_user_data_status *status) +{ + if (status) { + hygon_csv_build = (status->flags >> 9) | + ((u32)status->build << 23); + } +} + int csv_cmd_buffer_len(int cmd) { switch (cmd) { diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 43ca224be610..35e47cb08c11 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -12,8 +12,10 @@ #include +extern u32 hygon_csv_build; extern const struct file_operations csv_fops; +void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); #endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 6d603f52162a..83b1ea8dbc52 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1619,8 +1619,12 @@ static int __sev_platform_init_locked(int *error) dev_dbg(sev->dev, "SEV firmware initialized\n"); - dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, - sev->api_minor, sev->build); + if (is_vendor_hygon()) + dev_info(sev->dev, "CSV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, hygon_csv_build); + else + dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); return 0; } @@ -1920,6 +1924,13 @@ static int sev_get_api_version(void) sev->api_minor = status.api_minor; sev->build = status.build; + /* + * The api version fields of HYGON CSV firmware are not consistent + * with AMD SEV firmware. + */ + if (is_vendor_hygon()) + csv_update_api_version(&status); + return 0; } From 857bf107dec4cec366ad9adc43311a4a441cd59b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 17 Jul 2023 19:02:27 +0800 Subject: [PATCH 13/99] KVM: SVM: Print Hygon CSV support info if support is detected hygon inclusion category: feature CVE: NA --------------------------- The KVM will print 'SEV supported' instead of 'CSV supported' on Hygon CPU if CSV is supported. Fix these confused messages here. Fix other 'SEV' messages in arch/x86/kvm/svm/svm.c. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 8ab045cf85b283de7ff9c03b7392bca18e5f5bc1) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/svm/sev.c --- arch/x86/kvm/svm/sev.c | 7 +++++-- arch/x86/kvm/svm/svm.c | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 0835c664fbfd..7f1b516b8229 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "mmu.h" #include "x86.h" @@ -3119,13 +3120,15 @@ void __init sev_hardware_setup(void) } if (boot_cpu_has(X86_FEATURE_SEV)) - pr_info("SEV %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + is_x86_vendor_hygon() ? "CSV" : "SEV", sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" : "unusable" : "disabled", min_sev_asid, max_sev_asid); if (boot_cpu_has(X86_FEATURE_SEV_ES)) - pr_info("SEV-ES %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + is_x86_vendor_hygon() ? "CSV2" : "SEV-ES", sev_es_supported ? min_sev_es_asid <= max_sev_es_asid ? "enabled" : "unusable" : "disabled", diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f2fa69dd5cc7..9e46de35884f 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -436,7 +437,8 @@ static bool __kvm_is_svm_supported(void) } if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - pr_info("KVM is unsupported when running as an SEV guest\n"); + pr_info("KVM is unsupported when running as an %s guest\n", + is_x86_vendor_hygon() ? "CSV" : "SEV"); return false; } From 1c9a40c5e6eac11456c98fcf4c24501cec8d7e7f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 31 Jul 2023 23:35:42 +0800 Subject: [PATCH 14/99] x86/cpu: Detect memory encryption features on Hygon CPUs hygon inclusion category: feature CVE: NA --------------------------- Hygon SME is identified by CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of MSR_AMD64_SYSCFG). Hygon CSV and CSV2 are identified by CPUID 0x8000001f, but requires BIOS support to enable it (set bit 23 of MSR_AMD64_SYSCFG and set bit 0 of MSR_K7_HWCR). Only show the SME, CSV, CSV2 features as available if reported by CPUID and enabled by BIOS. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 54f080596fadf8417a01e43870b2fb083f2156d7) Signed-off-by: Wentao Guan Conflicts: arch/x86/kernel/cpu/hygon.c --- arch/x86/kernel/cpu/hygon.c | 46 +++++++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/proc.c | 10 ++++++-- 2 files changed, 54 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 1fda6c3a2b65..3b6854813841 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -122,6 +122,50 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) resctrl_cpu_detect(c); } +static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) +{ + u64 msr; + u32 eax; + + eax = cpuid_eax(0x8000001f); + + /* Check whether SME or CSV is supported */ + if (!(eax & (BIT(0) | BIT(1)))) + return; + + /* If BIOS has not enabled SME then don't advertise the SME feature. */ + rdmsrl(MSR_AMD64_SYSCFG, msr); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + goto clear_all; + + /* + * Always adjust physical address bits. Even though this will be a + * value above 32-bits this is still done for CONFIG_X86_32 so that + * accurate values are reported. + */ + c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; + + /* Don't advertise SME and CSV features under CONFIG_X86_32. */ + if (IS_ENABLED(CONFIG_X86_32)) + goto clear_all; + + /* + * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 + * feature. + */ + rdmsrl(MSR_K7_HWCR, msr); + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_csv; + + return; + +clear_all: + setup_clear_cpu_cap(X86_FEATURE_SME); +clear_csv: + setup_clear_cpu_cap(X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV_ES); +} + static void early_init_hygon(struct cpuinfo_x86 *c) { u32 dummy; @@ -166,6 +210,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c) * we can set it unconditionally. */ set_cpu_cap(c, X86_FEATURE_VMMCALL); + + early_detect_mem_encrypt(c); } static void init_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 6571d432cbe3..d7468a7d8216 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -103,8 +103,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) { + if (c->x86_vendor == X86_VENDOR_HYGON) + seq_printf(m, " %s", i == X86_FEATURE_SEV ? "csv" : + (i == X86_FEATURE_SEV_ES ? "csv2" : + x86_cap_flags[i])); + else + seq_printf(m, " %s", x86_cap_flags[i]); + } #ifdef CONFIG_X86_VMX_FEATURE_NAMES if (cpu_has(c, X86_FEATURE_VMX) && c->vmx_capability[0]) { From d19ce8bafdec14a20cc697016ef8e89eb3c1aefb Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 4 Aug 2023 03:20:47 +0800 Subject: [PATCH 15/99] x86/cpufeatures: Add CPUID_8C86_0000_EDX CPUID leaf hygon inclusion category: feature CVE: NA --------------------------- This is a pure feature bits leaf. Add SM3 and SM4 feature bits from this leaf on Hygon CPUs. Signed-off-by: hanliyang [disabled-features.h and required-features.h removed by the commit commit 8f97566c8a8165cd994baf6219d86fbbf250d2df Author: Xin Li (Intel) Date: Mon Mar 10 08:32:12 2025 +0100 x86/cpufeatures: Remove {disabled,required}-features.h The functionalities of {disabled,required}-features.h have been replaced with the auto-generated generated/ header. Thus they are no longer needed and can be removed. None of the macros defined in {disabled,required}-features.h is used in tools, delete them too. Signed-off-by: Xin Li (Intel) Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Ingo Molnar Cc: Linus Torvalds Link: https://lore.kernel.org/r/20250305184725.3341760-4-xin@zytor.com] Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 4a0be8dc9e1fb739ed91f73762a35540dfca3c4c) Signed-off-by: Wentao Guan Conflicts: arch/x86/include/asm/cpufeature.h arch/x86/include/asm/cpufeatures.h arch/x86/include/asm/disabled-features.h arch/x86/include/asm/required-features.h --- arch/x86/include/asm/cpufeature.h | 2 ++ arch/x86/include/asm/cpufeatures.h | 6 +++++- arch/x86/kernel/cpu/hygon.c | 14 ++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 893cbca37fe9..e842da8b73e5 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -35,6 +35,8 @@ enum cpuid_leafs CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, CPUID_LNX_5, + CPUID_C000_0006_EAX, + CPUID_8C86_0000_EDX, /* 23 */ NR_CPUID_WORDS, }; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 8e7ed0e5246b..70cf6ea73593 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -5,7 +5,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 22 /* N 32-bit words worth of info */ +#define NCAPINTS 24 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -519,6 +519,10 @@ #define X86_FEATURE_ABMC (21*32+15) /* Assignable Bandwidth Monitoring Counters */ #define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */ +/* HYGON-defined CPU features, CPUID level 0x8c860000:0 (EDX), word 23 */ +#define X86_FEATURE_SM3 (23*32 + 1) /* SM3 instructions */ +#define X86_FEATURE_SM4 (23*32 + 2) /* SM4 instructions */ + /* * BUG word(s) */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 3b6854813841..4977fcf8e3ad 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -122,6 +122,18 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) resctrl_cpu_detect(c); } +static void init_hygon_cap(struct cpuinfo_x86 *c) +{ + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0x8C860000) >= 0x8C860000) { + /* + * Store Extended Feature Flags of the CPU capability + * bit array + */ + c->x86_capability[CPUID_8C86_0000_EDX] = cpuid_edx(0x8C860000); + } +} + static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) { u64 msr; @@ -278,6 +290,8 @@ static void init_hygon(struct cpuinfo_x86 *c) /* Hygon CPUs don't need fencing after x2APIC/TSC_DEADLINE MSR writes. */ clear_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE); + + init_hygon_cap(c); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) From 63c421512778f7ca536338342655dfd4e6e4f369 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 4 Aug 2023 03:54:15 +0800 Subject: [PATCH 16/99] x86/cpufeatures: Add CSV3 CPU feature hygon inclusion category: feature CVE: NA --------------------------- Add CPU feature detection for Hygon 3rd CSV. This feature enhances CSV2 by also isolating NPT and VMCB, making them in-accessible to the hypervisor. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 00a1c40d70d8f3aed6f81149112e5c1790d58399) Signed-off-by: Wentao Guan Conflicts: arch/x86/include/asm/cpufeatures.h --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kernel/cpu/hygon.c | 1 + 2 files changed, 3 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 70cf6ea73593..346ec8cb932a 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -472,6 +472,8 @@ #define X86_FEATURE_ALLOWED_SEV_FEATURES (19*32+27) /* Allowed SEV Features */ #define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */ #define X86_FEATURE_HV_INUSE_WR_ALLOWED (19*32+30) /* Allow Write to in-use hypervisor-owned pages */ +/* HYGON 3rd CSV */ +#define X86_FEATURE_CSV3 (19*32 + 30) /* HYGON 3rd CSV */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 4977fcf8e3ad..694448a61743 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -176,6 +176,7 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) clear_csv: setup_clear_cpu_cap(X86_FEATURE_SEV); setup_clear_cpu_cap(X86_FEATURE_SEV_ES); + setup_clear_cpu_cap(X86_FEATURE_CSV3); } static void early_init_hygon(struct cpuinfo_x86 *c) From 168e3f299ae84b5f4a13993367bfc583e6cb9681 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 10 Mar 2024 14:58:10 +0800 Subject: [PATCH 17/99] x86/cpu/hygon: Clear SME feature flag when not in use hygon inclusion category: feature CVE: NA --------------------------- The commit 08f253ec3767 ("x86/cpu: Clear SME feature flag when not in use") will clear SME feature flag if the kernel is not using it on AMD CPUs, this will help userspace to determine if SME is available and in use from /proc/cpuinfo. Apply this change to Hygon CPUs as well. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit 941989f53848c32c07c4c652020d862db997b1f8) Signed-off-by: Wentao Guan --- arch/x86/kernel/cpu/hygon.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index 694448a61743..ab981291088d 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -161,6 +161,10 @@ static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) if (IS_ENABLED(CONFIG_X86_32)) goto clear_all; + /* Clear the SME feature flag if the kernel is not using it. */ + if (!sme_me_mask) + setup_clear_cpu_cap(X86_FEATURE_SME); + /* * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 * feature. From 18f14e24c3de4daa25c449ac4f3c1f0daa8cc782 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 25 Apr 2024 17:53:05 +0800 Subject: [PATCH 18/99] deepin_x86_desktop_defconfig: Set CONFIG_HYGON_CSV by default hygon inclusion category: feature CVE: NA --------------------------- Configure CONFIG_HYGON_CSV=y so that Hygon Confidential Computing support will be compiled. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/350 (cherry picked from commit b0567bb7bc3702ecd3e0ff8693344d0a115594b9) Signed-off-by: Wentao Guan Conflicts: arch/x86/configs/deepin_x86_desktop_defconfig --- arch/x86/configs/deepin_x86_desktop_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index f22f871e4190..2c6145a09b69 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -85,6 +85,7 @@ CONFIG_X86_USER_SHADOW_STACK=y CONFIG_EFI=y CONFIG_EFI_STUB=y CONFIG_EFI_MIXED=y +CONFIG_HYGON_CSV=y CONFIG_LIVEPATCH=y CONFIG_HIBERNATION=y CONFIG_PM_WAKELOCKS=y From e05ed3530f4c86636b18101373569206a5445849 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 27 Feb 2025 14:50:02 +0800 Subject: [PATCH 19/99] x86/cpufeatures: Rename X86_FEATURE_SM{3,4} to X86_FEATURE_HYGON_SM{3,4} hygon inclusion category: bugfix CVE: NA --------------------------- The upstream commit a0423af92cb3 ("x86: KVM: Advertise CPUIDs for new instructions in Clearwater Forest") has introduced the macros X86_FEATURE_SM3 and X86_FEATURE_SM4, which conflict with the non-upstreamed commit 4a0be8dc9e1f ("x86/cpufeatures: Add CPUID_8C86_0000_EDX CPUID leaf"). To address this issue, we rename X86_FEATURE_SM{3,4} to X86_FEATURE_HYGON_SM{3,4}. Fixes: 4a0be8dc9e1f ("x86/cpufeatures: Add CPUID_8C86_0000_EDX CPUID leaf") Link: https://github.com/deepin-community/kernel/pull/643 Signed-off-by: hanliyang (cherry picked from commit a552a37dc658ee737b60855ee83f09a9fd2a313e) Signed-off-by: Wentao Guan --- arch/x86/include/asm/cpufeatures.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 346ec8cb932a..58b4a45def0f 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -522,8 +522,8 @@ #define X86_FEATURE_MSR_IMM (21*32+16) /* MSR immediate form instructions */ /* HYGON-defined CPU features, CPUID level 0x8c860000:0 (EDX), word 23 */ -#define X86_FEATURE_SM3 (23*32 + 1) /* SM3 instructions */ -#define X86_FEATURE_SM4 (23*32 + 2) /* SM4 instructions */ +#define X86_FEATURE_HYGON_SM3 (23*32 + 1) /* "sm3" SM3 instructions */ +#define X86_FEATURE_HYGON_SM4 (23*32 + 2) /* "sm4" SM4 instructions */ /* * BUG word(s) From 112c15d017202bccf49ac71e4043ef8b93b765d6 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 6 Aug 2024 16:16:57 +0800 Subject: [PATCH 20/99] crypto: ccp: Fix compile error on file csv-dev.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hygon inclusion category: bugfix CVE: NA --------------------------- The error messages is shown as following: In file included from drivers/crypto/ccp/hygon/csv-dev.c:19: drivers/crypto/ccp/hygon/csv-dev.h:18:36: error: ‘struct sev_user_data_status’ declared inside parameter list will not be visible outside of this definition or declaration [-Werror] 18 | void csv_update_api_version(struct sev_user_data_status *status); | ^~~~~~~~~~~~~~~~~~~~ drivers/crypto/ccp/hygon/csv-dev.c:34:6: error: conflicting types for ‘csv_update_api_version’; have ‘void(struct sev_user_data_status *)’ 34 | void csv_update_api_version(struct sev_user_data_status *status) | ^~~~~~~~~~~~~~~~~~~~~~ drivers/crypto/ccp/hygon/csv-dev.h:18:6: note: previous declaration of ‘csv_update_api_version’ with type ‘void(struct sev_user_data_status *)’ 18 | void csv_update_api_version(struct sev_user_data_status *status); | ^~~~~~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/353 (cherry picked from commit 174941c5dfd6d07fe998b6ec6b111a7be8f1c5c3) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 35e47cb08c11..677669e2371f 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -11,6 +11,7 @@ #define __CCP_HYGON_CSV_DEV_H__ #include +#include extern u32 hygon_csv_build; extern const struct file_operations csv_fops; From 99b3f9a7966f1c022af03588fed438570dce014d Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 26 Apr 2021 10:47:46 +0800 Subject: [PATCH 21/99] KVM: x86: Support VM_ATTESTATION hypercall hygon inclusion category: feature CVE: NA --------------------------- When sev guest wants to collect the attestation report, it cannot directly communicate with psp. Add VM_ATTESTATION hypercall to allow sev guest to tell host to help get the attestation report. Since sev guest memory is encrypted, host cannot tamper with the report data. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 6be6dfe0336139262f56745b36dfc5dfea301057) Signed-off-by: Wentao Guan Conflicts: arch/x86/include/asm/kvm-x86-ops.h arch/x86/include/asm/kvm_host.h arch/x86/kvm/Makefile arch/x86/kvm/svm/sev.c arch/x86/kvm/x86.c --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 4 ++ arch/x86/kvm/Makefile | 3 + arch/x86/kvm/svm/csv.c | 112 +++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 44 ++++++++++++ arch/x86/kvm/svm/sev.c | 24 +++++++ arch/x86/kvm/svm/svm.c | 20 +++++- arch/x86/kvm/x86.c | 10 ++- include/uapi/linux/kvm_para.h | 1 + 9 files changed, 217 insertions(+), 2 deletions(-) create mode 100644 arch/x86/kvm/svm/csv.c create mode 100644 arch/x86/kvm/svm/csv.h diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index fdf178443f85..2884603769dd 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -147,6 +147,7 @@ KVM_X86_OP_OPTIONAL(alloc_apic_backing_page) KVM_X86_OP_OPTIONAL_RET0(gmem_prepare) KVM_X86_OP_OPTIONAL_RET0(gmem_max_mapping_level) KVM_X86_OP_OPTIONAL(gmem_invalidate) +KVM_X86_OP_OPTIONAL(vm_attestation) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 974d64bf0a4d..04c1020c23f2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1944,6 +1944,10 @@ struct kvm_x86_ops { int (*gmem_prepare)(struct kvm *kvm, kvm_pfn_t pfn, gfn_t gfn, int max_order); void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private); + /* + * Attestation interface for HYGON CSV guest + */ + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index c4b8950c7abe..42828fc7d6e2 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -32,7 +32,10 @@ kvm-intel-y += vmx/vmx_onhyperv.o vmx/hyperv_evmcs.o kvm-amd-y += svm/svm_onhyperv.o endif +kvm-amd-$(CONFIG_HYGON_CSV) += svm/csv.o + obj-$(CONFIG_KVM_X86) += kvm.o + obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_AMD) += kvm-amd.o diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c new file mode 100644 index 000000000000..ef69fe72e769 --- /dev/null +++ b/arch/x86/kvm/svm/csv.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include "kvm_cache_regs.h" +#include "svm.h" +#include "csv.h" +#include "x86.h" + +#undef pr_fmt +#define pr_fmt(fmt) "CSV: " fmt + +/* Function and variable pointers for hooks */ +struct hygon_kvm_hooks_table hygon_kvm_hooks; + +static struct kvm_x86_ops csv_x86_ops; +static const char csv_vm_mnonce[] = "VM_ATTESTATION"; + +int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data = NULL; + struct page **pages; + unsigned long guest_uaddr, n; + int ret = 0, offset, error; + + if (!sev_guest(kvm) || !hygon_kvm_hooks.sev_hooks_installed) + return -ENOTTY; + + /* + * The physical address of guest must valid and page aligned, and + * the length of guest memory region must be page size aligned. + */ + if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { + pr_err("invalid guest address or length\n"); + return -EFAULT; + } + + guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + pages = hygon_kvm_hooks.sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * The attestation report must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous before we + * issue commmand. + */ + if (hygon_kvm_hooks.get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + /* csv_vm_mnonce indicates attestation request from guest */ + if (sizeof(csv_vm_mnonce) >= sizeof(data->mnonce)) { + ret = -EINVAL; + goto e_free; + } + + memcpy(data->mnonce, csv_vm_mnonce, sizeof(csv_vm_mnonce)); + + offset = guest_uaddr & (PAGE_SIZE - 1); + data->address = __sme_page_pa(pages[0]) + offset; + data->len = len; + + data->handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, + data, &error); + + if (ret) + pr_err("vm attestation ret %#x, error %#x\n", ret, error); + +e_free: + kfree(data); +e_unpin_memory: + hygon_kvm_hooks.sev_unpin_memory(kvm, pages, n); + return ret; +} + +void csv_exit(void) +{ +} + +void __init csv_init(struct kvm_x86_ops *ops) +{ + /* + * Hygon CSV is indicated by X86_FEATURE_SEV, return directly if CSV + * is unsupported. + */ + if (!boot_cpu_has(X86_FEATURE_SEV)) + return; + + memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + + ops->vm_attestation = csv_vm_attestation; +} diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h new file mode 100644 index 000000000000..655fe457b27f --- /dev/null +++ b/arch/x86/kvm/svm/csv.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __SVM_CSV_H +#define __SVM_CSV_H + +#include + +#ifdef CONFIG_HYGON_CSV + +/* + * Hooks table: a table of function and variable pointers filled in + * when module init. + */ +extern struct hygon_kvm_hooks_table { + bool sev_hooks_installed; + int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); + unsigned long (*get_num_contig_pages)(unsigned long idx, + struct page **inpages, + unsigned long npages); + struct page **(*sev_pin_memory)(struct kvm *kvm, unsigned long uaddr, + unsigned long ulen, unsigned long *n, + int write); + void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, + unsigned long npages); +} hygon_kvm_hooks; + +void __init csv_init(struct kvm_x86_ops *ops); +void csv_exit(void); + +#else /* !CONFIG_HYGON_CSV */ + +static inline void __init csv_init(struct kvm_x86_ops *ops) { } +static inline void csv_exit(void) { } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 7f1b516b8229..3ff2f22c3420 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -37,6 +37,8 @@ #include "cpuid.h" #include "trace.h" +#include "csv.h" + #define GHCB_VERSION_MAX 2ULL #define GHCB_VERSION_MIN 1ULL @@ -2982,6 +2984,19 @@ static bool is_sev_snp_initialized(void) return initialized; } +#ifdef CONFIG_HYGON_CSV +/* Code to set all of the function and vaiable pointers */ +void sev_install_hooks(void) +{ + hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; + hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; + hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; + hygon_kvm_hooks.sev_unpin_memory = sev_unpin_memory; + + hygon_kvm_hooks.sev_hooks_installed = true; +} +#endif + void __init sev_hardware_setup(void) { unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count; @@ -3152,6 +3167,15 @@ void __init sev_hardware_setup(void) if (sev_snp_enabled && tsc_khz && cpu_feature_enabled(X86_FEATURE_SNP_SECURE_TSC)) sev_supported_vmsa_features |= SVM_SEV_FEAT_SECURE_TSC; + +#ifdef CONFIG_HYGON_CSV + /* + * Install sev related function and variable pointers hooks only for + * Hygon CPUs. + */ + if (is_x86_vendor_hygon()) + sev_install_hooks(); +#endif } void sev_hardware_unsetup(void) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9e46de35884f..f617d10ffc75 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -56,6 +56,8 @@ #include "kvm_onhyperv.h" #include "svm_onhyperv.h" +#include "csv.h" + MODULE_AUTHOR("Qumranet"); MODULE_DESCRIPTION("KVM support for SVM (AMD-V) extensions"); MODULE_LICENSE("GPL"); @@ -5453,6 +5455,10 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = { static void __svm_exit(void) { + /* Unregister CSV specific interface for Hygon CPUs */ + if (is_x86_vendor_hygon()) + csv_exit(); + kvm_x86_vendor_exit(); } @@ -5467,9 +5473,21 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; + /* Register CSV specific interface for Hygon CPUs */ + if (is_x86_vendor_hygon()) + csv_init(&svm_x86_ops); + r = kvm_x86_vendor_init(&svm_init_ops); - if (r) + if (r) { + /* + * Unregister CSV specific interface for Hygon CPUs + * if error occurs. + */ + if (is_x86_vendor_hygon()) + csv_exit(); + return r; + } /* * Common KVM initialization _must_ come last, after this, /dev/kvm is diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e6f2e34ec97d..58ae529dab35 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -85,6 +85,8 @@ #include #include +#include + #define CREATE_TRACE_POINTS #include "trace.h" @@ -10371,7 +10373,8 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl, a3 &= 0xFFFFFFFF; } - if (cpl) { + if (cpl && + !(is_x86_vendor_hygon() && nr == KVM_HC_VM_ATTESTATION)) { ret = -KVM_EPERM; goto out; } @@ -10441,6 +10444,11 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl, vcpu->arch.complete_userspace_io = complete_hypercall; return 0; } + case KVM_HC_VM_ATTESTATION: + ret = -KVM_ENOSYS; + if (is_x86_vendor_hygon() && kvm_x86_ops.vm_attestation) + ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); + break; default: ret = -KVM_ENOSYS; break; diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 960c7e93d1a9..67192835455e 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -30,6 +30,7 @@ #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 +#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ /* * hypercalls use architecture specific From 9cb2e15512e5a13d4b69153ac0d3882739d74f5b Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Tue, 30 May 2023 17:34:30 +0800 Subject: [PATCH 22/99] driver/virt/coco: Add HYGON CSV Guest dirver. hygon inclusion category: feature CVE: NA --------------------------- CSV firmware provides the guest a mechanism to communicate with the PSP without risk from a malicious hypervisor who wishes to read, alter, drop or replay the messages sent. The driver provides userspace interface to communicate with the PSP to request the attestation report and more. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit a97e35211cd3c0adb48e632e1ecf5d9798bced42) Signed-off-by: Wentao Guan Conflicts: drivers/virt/Makefile --- Documentation/virt/coco/csv-guest.rst | 33 +++++++++ drivers/virt/Kconfig | 2 + drivers/virt/coco/Makefile | 1 + drivers/virt/coco/csv-guest/Kconfig | 12 +++ drivers/virt/coco/csv-guest/Makefile | 2 + drivers/virt/coco/csv-guest/csv-guest.c | 98 +++++++++++++++++++++++++ drivers/virt/coco/csv-guest/csv-guest.h | 42 +++++++++++ 7 files changed, 190 insertions(+) create mode 100644 Documentation/virt/coco/csv-guest.rst create mode 100644 drivers/virt/coco/csv-guest/Kconfig create mode 100644 drivers/virt/coco/csv-guest/Makefile create mode 100644 drivers/virt/coco/csv-guest/csv-guest.c create mode 100644 drivers/virt/coco/csv-guest/csv-guest.h diff --git a/Documentation/virt/coco/csv-guest.rst b/Documentation/virt/coco/csv-guest.rst new file mode 100644 index 000000000000..23cba2a5fd7c --- /dev/null +++ b/Documentation/virt/coco/csv-guest.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================================================== +CSV Guest API Documentation +=================================================================== + +1. General description +====================== + +The CSV guest driver exposes IOCTL interfaces via the /dev/csv-guest misc +device to allow userspace to get certain CSV guest-specific details. + +2. API description +================== + +In this section, for each supported IOCTL, the following information is +provided along with a generic description. + +:Input parameters: Parameters passed to the IOCTL and related details. +:Output: Details about output data and return value (with details about + the non common error values). + +2.1 CSV_CMD_GET_REPORT +----------------------- + +:Input parameters: struct csv_report_req +:Output: Upon successful execution, CSV_REPORT data is copied to + csv_report_req.report_data and return 0. Return -EINVAL for invalid + operands, -EIO on VMMCALL failure or standard error number on other + common failures. + +The CSV_CMD_GET_REPORT IOCTL can be used by the attestation software to get +the CSV_REPORT from the CSV module using VMMCALL[KVM_HC_VM_ATTESTATION]. diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index d8c848cf09a6..941aaae288b2 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -49,4 +49,6 @@ source "drivers/virt/acrn/Kconfig" source "drivers/virt/coco/Kconfig" +source "drivers/virt/coco/csv-guest/Kconfig" + endif diff --git a/drivers/virt/coco/Makefile b/drivers/virt/coco/Makefile index f918bbb61737..d829b7a9f5d8 100644 --- a/drivers/virt/coco/Makefile +++ b/drivers/virt/coco/Makefile @@ -6,5 +6,6 @@ obj-$(CONFIG_EFI_SECRET) += efi_secret/ obj-$(CONFIG_ARM_PKVM_GUEST) += pkvm-guest/ obj-$(CONFIG_SEV_GUEST) += sev-guest/ obj-$(CONFIG_INTEL_TDX_GUEST) += tdx-guest/ +obj-$(CONFIG_CSV_GUEST) += csv-guest/ obj-$(CONFIG_ARM_CCA_GUEST) += arm-cca-guest/ obj-$(CONFIG_TSM_GUEST) += guest/ diff --git a/drivers/virt/coco/csv-guest/Kconfig b/drivers/virt/coco/csv-guest/Kconfig new file mode 100644 index 000000000000..f14f6766e5ae --- /dev/null +++ b/drivers/virt/coco/csv-guest/Kconfig @@ -0,0 +1,12 @@ +config CSV_GUEST + tristate "HYGON CSV Guest driver" + default m + depends on HYGON_CSV + help + CSV firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called csv-guest. diff --git a/drivers/virt/coco/csv-guest/Makefile b/drivers/virt/coco/csv-guest/Makefile new file mode 100644 index 000000000000..a1c3a1499fc6 --- /dev/null +++ b/drivers/virt/coco/csv-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CSV_GUEST) += csv-guest.o diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c new file mode 100644 index 000000000000..7db8177637ce --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Userspace interface for CSV guest driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: fangbaoshun + */ +#include +#include +#include +#include +#include +#include + +#include + +#include "csv-guest.h" + +static long csv_get_report(void __user *argp) +{ + u8 *csv_report; + long ret; + struct csv_report_req req; + + if (copy_from_user(&req, argp, sizeof(struct csv_report_req))) + return -EFAULT; + + if (req.len < CSV_REPORT_INPUT_DATA_LEN) + return -EINVAL; + + csv_report = kzalloc(req.len, GFP_KERNEL); + if (!csv_report) { + ret = -ENOMEM; + goto out; + } + + /* Save user input data */ + if (copy_from_user(csv_report, req.report_data, CSV_REPORT_INPUT_DATA_LEN)) { + ret = -EFAULT; + goto out; + } + + /* Generate CSV_REPORT using "KVM_HC_VM_ATTESTATION" VMMCALL */ + ret = kvm_hypercall2(KVM_HC_VM_ATTESTATION, __pa(csv_report), req.len); + if (ret) + goto out; + + if (copy_to_user(req.report_data, csv_report, req.len)) + ret = -EFAULT; + +out: + kfree(csv_report); + return ret; +} + +static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case CSV_CMD_GET_REPORT: + return csv_get_report((void __user *)arg); + default: + return -ENOTTY; + } +} + +static const struct file_operations csv_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_guest_ioctl, + .compat_ioctl = csv_guest_ioctl, +}; + +static struct miscdevice csv_guest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "csv-guest", + .fops = &csv_guest_fops, + .mode = 0777, +}; + +static int __init csv_guest_init(void) +{ + // This module only working on CSV guest vm. + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return -ENODEV; + + return misc_register(&csv_guest_dev); +} + +static void __exit csv_guest_exit(void) +{ + misc_deregister(&csv_guest_dev); +} + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("HYGON CSV Guest Driver"); +module_init(csv_guest_init); +module_exit(csv_guest_exit); diff --git a/drivers/virt/coco/csv-guest/csv-guest.h b/drivers/virt/coco/csv-guest/csv-guest.h new file mode 100644 index 000000000000..337211b928db --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * Userspace interface for CSV guest driver + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __VIRT_CSVGUEST_H__ +#define __VIRT_CSVGUEST_H__ + +#include +#include + +/* Length of the user input datas used in VMMCALL */ +#define CSV_REPORT_USER_DATA_LEN 64 +#define CSV_REPORT_MNONCE_LEN 16 +#define CSV_REPORT_HASH_LEN 32 +#define CSV_REPORT_INPUT_DATA_LEN (CSV_REPORT_USER_DATA_LEN + CSV_REPORT_MNONCE_LEN \ + + CSV_REPORT_HASH_LEN) + +/** + * struct csv_report_req - Request struct for CSV_CMD_GET_REPORT IOCTL. + * + * @report_data:User buffer with REPORT_DATA to be included into CSV_REPORT, and it's also + * user buffer to store CSV_REPORT output from VMMCALL[KVM_HC_VM_ATTESTATION]. + * @len: Length of the user buffer. + */ +struct csv_report_req { + u8 *report_data; + int len; +}; + +/* + * CSV_CMD_GET_REPORT - Get CSV_REPORT using VMMCALL[KVM_HC_VM_ATTESTATION] + * + * Return 0 on success, -EIO on VMMCALL execution failure, and + * standard errno on other general error cases. + */ +#define CSV_CMD_GET_REPORT _IOWR('D', 1, struct csv_report_req) + +#endif /* __VIRT_CSVGUEST_H__ */ From fdd081de69b3fbd37e590cc615a793de24e12b06 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 06:08:07 -0500 Subject: [PATCH 23/99] crypto: ccp: Support DOWNLOAD_FIRMWARE when detect CSV hygon inclusion category: feature CVE: NA --------------------------- When ccp driver detect CSV support on Hygon CPU, it should try to update the latest CSV firmware on the system paths. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit fba194e7a4d8a7ffaace89aa22850154144b4294) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/csv-dev.h | 7 +++++++ drivers/crypto/ccp/sev-dev.c | 20 +++++++++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 677669e2371f..10c57987f050 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -13,10 +13,17 @@ #include #include +#define CSV_FW_FILE "hygon/csv.fw" + extern u32 hygon_csv_build; extern const struct file_operations csv_fops; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); +static inline bool csv_version_greater_or_equal(u32 build) +{ + return hygon_csv_build >= build; +} + #endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 83b1ea8dbc52..c67782105e68 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1940,6 +1940,14 @@ static int sev_get_firmware(struct device *dev, char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; + if (is_vendor_hygon()) { + /* Check for CSV FW to using generic name: csv.fw */ + if (firmware_request_nowarn(firmware, CSV_FW_FILE, dev) >= 0) + return 0; + else + return -ENOENT; + } + snprintf(fw_name_specific, sizeof(fw_name_specific), "amd/amd_sev_fam%.2xh_model%.2xh.sbin", boot_cpu_data.x86, boot_cpu_data.x86_model); @@ -1978,13 +1986,15 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; - if (!sev_version_greater_or_equal(0, 15)) { + if (!sev_version_greater_or_equal(0, 15) && + !(is_vendor_hygon() && csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } if (sev_get_firmware(dev, &firmware) == -ENOENT) { - dev_dbg(dev, "No SEV firmware file present\n"); + dev_dbg(dev, "No %s firmware file present\n", + is_vendor_hygon() ? "CSV" : "SEV"); return -1; } @@ -2024,7 +2034,11 @@ static int sev_update_firmware(struct device *dev) ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); if (ret) - dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + dev_dbg(dev, "Failed to update %s firmware: %#x\n", + is_vendor_hygon() ? "CSV" : "SEV", error); + else + dev_info(dev, "%s firmware update successful\n", + is_vendor_hygon() ? "CSV" : "SEV"); __free_pages(p, order); From 1848f09d92c97be129cacc6f0e303246f76ffc2e Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:31:27 -0500 Subject: [PATCH 24/99] crypto: ccp: Implement CSV_PLATFORM_INIT ioctl command hygon inclusion category: feature CVE: NA --------------------------- The CSV_PLATFORM_INIT command can be used by the platform owner to switch platform from PSTATE.UNINIT to PSTATE.INIT. In the upcoming patches, we'll support DOWNLOAD_FIRMWARE at userspace. Due to DOWNLOAD_FIRMWARE can only performed when platform is in the PSTATE.UNINIT, we need invoke PLATFORM_INIT following DOWNLOAD_FIRMWARE to switch platform back to PSTATE.INIT. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 17ed0b76275dc75683eb12d55f1e3ccfc94cd864) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 3 +++ drivers/crypto/ccp/hygon/psp-dev.h | 1 + drivers/crypto/ccp/sev-dev.c | 1 + include/uapi/linux/psp-hygon.h | 1 + 4 files changed, 6 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 24f8d30b57b7..164231c2fdf0 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -118,6 +118,9 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_HGSC_CERT_IMPORT: ret = csv_ioctl_do_hgsc_import(&input); break; + case CSV_PLATFORM_INIT: + ret = hygon_psp_hooks.__sev_platform_init_locked(&input.error); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index b984237b4795..40233669e197 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -25,6 +25,7 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); + int (*__sev_platform_init_locked)(int *error); long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index c67782105e68..0b73a8d65f07 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -2711,6 +2711,7 @@ static void sev_dev_install_hooks(void) { hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; + hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; hygon_psp_hooks.sev_ioctl = sev_ioctl; hygon_psp_hooks.sev_dev_hooks_installed = true; diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h index 004ad4c50b70..966e91d96e47 100644 --- a/include/uapi/linux/psp-hygon.h +++ b/include/uapi/linux/psp-hygon.h @@ -21,6 +21,7 @@ * CSV guest/platform commands */ enum { + CSV_PLATFORM_INIT = 101, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, From b7a833252efcefc3131338a72bcd840061221b5b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:33:25 -0500 Subject: [PATCH 25/99] crypto: ccp: Implement CSV_PLATFORM_SHUTDOWN ioctl command hygon inclusion category: feature CVE: NA --------------------------- The CSV_PLATFORM_SHUTDOWN command can be used by the platform owner to switch platform to PSTATE.UNINIT. The DOWNLOAD_FIRMWARE API can only performed when platform is in the PSTATE.UNINIT. In order to support DOWNLOAD_FIRMWARE at userspace, we need invoke PLATFORM_SHUTDOWN before that. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 76b83f82dc35ad9306a165b1c60b9ea05d3446f0) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 3 +++ drivers/crypto/ccp/hygon/psp-dev.h | 1 + drivers/crypto/ccp/sev-dev.c | 1 + include/uapi/linux/psp-hygon.h | 1 + 4 files changed, 6 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 164231c2fdf0..6a269a77c882 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -121,6 +121,9 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_PLATFORM_INIT: ret = hygon_psp_hooks.__sev_platform_init_locked(&input.error); break; + case CSV_PLATFORM_SHUTDOWN: + ret = hygon_psp_hooks.__sev_platform_shutdown_locked(&input.error); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 40233669e197..062884218945 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -26,6 +26,7 @@ extern struct hygon_psp_hooks_table { struct mutex *sev_cmd_mutex; int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); int (*__sev_platform_init_locked)(int *error); + int (*__sev_platform_shutdown_locked)(int *error); long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 0b73a8d65f07..89fb07f42391 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -2712,6 +2712,7 @@ static void sev_dev_install_hooks(void) hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; + hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; hygon_psp_hooks.sev_ioctl = sev_ioctl; hygon_psp_hooks.sev_dev_hooks_installed = true; diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h index 966e91d96e47..9ec57ad5437b 100644 --- a/include/uapi/linux/psp-hygon.h +++ b/include/uapi/linux/psp-hygon.h @@ -22,6 +22,7 @@ */ enum { CSV_PLATFORM_INIT = 101, + CSV_PLATFORM_SHUTDOWN = 102, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, From e34323db6e703a60a9aa4517e452e5392facbbe1 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 3 Dec 2021 05:58:23 -0500 Subject: [PATCH 26/99] crypto: ccp: Implement CSV_DOWNLOAD_FIRMWARE ioctl command hygon inclusion category: feature CVE: NA --------------------------- The CSV_DOWNLOAD_FIRMWARE command can be used by the platform owner to updating CSV firmware. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 20a17642b1cc3bd17f7be9125792e7083e5fb9e3) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 71 ++++++++++++++++++++++++++++++ include/linux/psp-hygon.h | 2 + include/uapi/linux/psp-hygon.h | 12 +++++ 3 files changed, 85 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 6a269a77c882..d38d44ee3216 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -91,6 +91,74 @@ static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) return ret; } +static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) +{ + struct sev_data_download_firmware *data = NULL; + struct csv_user_data_download_firmware input; + int ret, order; + struct page *p; + u64 data_size; + + /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ + if (!csv_version_greater_or_equal(1667)) { + pr_err("DOWNLOAD_FIRMWARE not supported\n"); + return -EIO; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + if (!input.address) { + argp->error = SEV_RET_INVALID_ADDRESS; + return -EINVAL; + } + + if (!input.length || input.length > CSV_FW_MAX_SIZE) { + argp->error = SEV_RET_INVALID_LEN; + return -EINVAL; + } + + /* + * CSV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the CSV + * FW. Allocate enough memory for data structure + alignment + * padding + CSV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(input.length + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) + return -ENOMEM; + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + if (copy_from_user((void *)(page_address(p) + data_size), + (void *)input.address, input.length)) { + ret = -EFAULT; + goto err_free_page; + } + + data->address = __psp_pa(page_address(p) + data_size); + data->len = input.length; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, + data, &argp->error); + if (ret) + pr_err("Failed to update CSV firmware: %#x\n", argp->error); + else + pr_info("CSV firmware update successful\n"); + +err_free_page: + __free_pages(p, order); + + return ret; +} + static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; @@ -124,6 +192,9 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) case CSV_PLATFORM_SHUTDOWN: ret = hygon_psp_hooks.__sev_platform_shutdown_locked(&input.error); break; + case CSV_DOWNLOAD_FIRMWARE: + ret = csv_ioctl_do_download_firmware(&input); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 5c7abb06740a..e9f006e86496 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -16,6 +16,8 @@ /***************************** CSV interface *********************************/ /*****************************************************************************/ +#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ + /** * Guest/platform management commands for CSV */ diff --git a/include/uapi/linux/psp-hygon.h b/include/uapi/linux/psp-hygon.h index 9ec57ad5437b..0e65afbeea3c 100644 --- a/include/uapi/linux/psp-hygon.h +++ b/include/uapi/linux/psp-hygon.h @@ -23,6 +23,7 @@ enum { CSV_PLATFORM_INIT = 101, CSV_PLATFORM_SHUTDOWN = 102, + CSV_DOWNLOAD_FIRMWARE = 128, CSV_HGSC_CERT_IMPORT = 201, CSV_MAX, @@ -43,4 +44,15 @@ struct csv_user_data_hgsc_cert_import { __u32 hgsc_cert_len; /* In */ } __packed; +/** + * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of CSV firmware image + * @length: length of the CSV firmware image + */ +struct csv_user_data_download_firmware { + __u64 address; /* In */ + __u32 length; /* In */ +} __packed; + #endif /* __PSP_HYGON_USER_H__ */ From 21405aa8c0c8c5c58f731e25668c5a00e9451fbb Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 14:42:49 +0800 Subject: [PATCH 27/99] crypto: ccp: Introduce init and free helpers to manage CSV RING_BUFFER queues hygon inclusion category: feature CVE: NA --------------------------- There are up to two queues created in RING_BUFFER mode, each with two sub-queues. The sub-queues store the command pointer entries (written only by the x86) and status entries (written only by the CSV Firmware) respectively. The two queues are low priority queue (required) and high priority queue (optional) respectively. In this change, we introduce csv_ring_buffer_queue_init() to initialize CSV RING_BUFFER queues, and csv_ring_buffer_queue_free() to cleanup CSV RING_BUFFER queues. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 2b2510679cbe4ff0ec91fcc09fe1c46d92ec921e) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sev-dev.h --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/hygon/csv-dev.c | 103 ++++++++++++++++++++++++- drivers/crypto/ccp/hygon/ring-buffer.c | 37 +++++++++ drivers/crypto/ccp/hygon/ring-buffer.h | 19 +++++ drivers/crypto/ccp/sev-dev.h | 5 ++ include/linux/psp-hygon.h | 38 +++++++++ 6 files changed, 203 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/ring-buffer.c create mode 100644 drivers/crypto/ccp/hygon/ring-buffer.h diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index c6184de3f46b..1d05191e811f 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -17,7 +17,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hsti.o \ sfs.o \ hygon/psp-dev.o \ - hygon/csv-dev.o + hygon/csv-dev.o \ + hygon/ring-buffer.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index d38d44ee3216..6f6b6665e381 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -15,8 +15,9 @@ #include #include -#include "csv-dev.h" #include "psp-dev.h" +#include "csv-dev.h" +#include "ring-buffer.h" /* * Hygon CSV build info: @@ -218,3 +219,103 @@ const struct file_operations csv_fops = { .owner = THIS_MODULE, .unlocked_ioctl = csv_ioctl, }; + +/* + * __csv_ring_buffer_queue_init will allocate memory for command queue + * and status queue. If error occurs, this function will return directly, + * the caller must free the memories allocated for queues. + * + * Function csv_ring_buffer_queue_free() can be used to handling error + * return by this function and cleanup ring buffer queues when exiting + * from RING BUFFER mode. + * + * Return -ENOMEM if fail to allocate memory for queues, otherwise 0 + */ +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + /* If reach here, the command and status queues must be NULL */ + WARN_ON(ring_buffer->cmd_ptr.data || + ring_buffer->stat_val.data); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + /* the command queue will points to @cmd_ptr_buffer */ + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) + return -ENOMEM; + + /* the status queue will points to @stat_val_buffer */ + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + return 0; +} + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + /* + * If command queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + csv_queue_cleanup(&ring_buffer->cmd_ptr); + } + + /* + * If status queue is not NULL, it must points to memory + * that allocated in __csv_ring_buffer_queue_init(). + */ + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + csv_queue_cleanup(&ring_buffer->stat_val); + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c new file mode 100644 index 000000000000..beeb325136c6 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +#include "ring-buffer.h" + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} + +void csv_queue_cleanup(struct csv_queue *queue) +{ + memset((void *)queue, 0, sizeof(struct csv_queue)); +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h new file mode 100644 index 000000000000..5ea4e2f54b9a --- /dev/null +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __CCP_HYGON_RINGBUF_H__ +#define __CCP_HYGON_RINGBUF_H__ + +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); +void csv_queue_cleanup(struct csv_queue *queue); + +#endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index ac03bd0848f7..2cafc6db8008 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "hygon/ring-buffer.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -61,6 +63,9 @@ struct sev_device { struct sev_user_data_snp_status snp_plat_status; struct snp_feature_info snp_feat_info_0; + + /* Management of the Hygon RING BUFFER mode */ + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index e9f006e86496..cf1435f7cb96 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -26,6 +26,18 @@ enum csv_cmd { CSV_CMD_MAX, }; +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + /** * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters * @@ -42,8 +54,34 @@ struct csv_data_hgsc_cert_import { u32 hgsc_cert_len; /* In */ } __packed; +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP + +int csv_ring_buffer_queue_init(void); +int csv_ring_buffer_queue_free(void); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ + +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_HYGON_H__ */ From 001ea461600747f2c60e0dd986590608976aed6a Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:21:33 +0800 Subject: [PATCH 28/99] crypto: ccp: Add support for enqueue command pointers in CSV RING_BUFFER mode hygon inclusion category: feature CVE: NA --------------------------- In CSV RING_BUFFER mode, X86 will enqueue command pointers to the sub-queue which stores the command pointers. The priority will be given through parameter. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 8a6b639a72ae43b84c2b4a6a41ebfd1b6ae9a296) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 22 +++++++++++ drivers/crypto/ccp/hygon/ring-buffer.c | 54 ++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.h | 2 + include/linux/psp-hygon.h | 10 +++++ 4 files changed, 88 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 6f6b6665e381..d216fa39af4e 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -319,3 +319,25 @@ int csv_ring_buffer_queue_free(void) return 0; } EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index beeb325136c6..8058d7a4fb66 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -13,9 +13,49 @@ #include #include +#include + +#include #include "ring-buffer.h" +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize) { @@ -35,3 +75,17 @@ void csv_queue_cleanup(struct csv_queue *queue) { memset((void *)queue, 0, sizeof(struct csv_queue)); } + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index 5ea4e2f54b9a..6e3c799c09e1 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -15,5 +15,7 @@ int csv_queue_init(struct csv_queue *queue, void *buffer, unsigned int size, size_t esize); void csv_queue_cleanup(struct csv_queue *queue); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index cf1435f7cb96..6783cf424d56 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -58,6 +58,13 @@ struct csv_data_hgsc_cert_import { #define CSV_COMMAND_PRIORITY_LOW 1 #define CSV_COMMAND_PRIORITY_NUM 2 +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -76,11 +83,14 @@ struct csv_ringbuffer_queue { int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ From d26eca42393e3c14a859fbf8641c4ad47d67eaf8 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 15:35:47 +0800 Subject: [PATCH 29/99] crypto: ccp: Add support for dequeue status in CSV RING_BUFFER mode hygon inclusion category: feature CVE: NA --------------------------- In CSV RING_BUFFER mode, X86 will dequeue status entries written by PSP after the corresponding command has been handled. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit c9b421a18f5405f07105e42192cf5b5f0e2652af) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 32 +++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.c | 39 ++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/ring-buffer.h | 2 ++ include/linux/psp-hygon.h | 9 ++++++ 4 files changed, 82 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index d216fa39af4e..23fe783cc208 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -341,3 +341,35 @@ int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) return 0; } EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index 8058d7a4fb66..93402b13b93a 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -46,6 +46,31 @@ static void enqueue_data(struct csv_queue *queue, smp_wmb(); } +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + static unsigned int queue_avail_size(struct csv_queue *queue) { /* @@ -89,3 +114,17 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, queue->tail += len; return len; } + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index 6e3c799c09e1..2c99ade02512 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -17,5 +17,7 @@ int csv_queue_init(struct csv_queue *queue, void csv_queue_cleanup(struct csv_queue *queue); unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 6783cf424d56..71f1bfc9a5e5 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -65,6 +65,13 @@ struct csv_cmdptr_entry { u64 cmd_buf_ptr; } __packed; +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + struct csv_queue { u32 head; u32 tail; @@ -84,6 +91,7 @@ struct csv_ringbuffer_queue { int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); +int csv_check_stat_queue_status(int *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ @@ -91,6 +99,7 @@ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ From 9f2e1f5b65683f78a49f0cde19ed978cc7d0bcc3 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:03:54 +0800 Subject: [PATCH 30/99] crypto: ccp: Add support to switch to CSV RING_BUFFER mode hygon inclusion category: feature CVE: NA --------------------------- Invoke RING_BUFFER command will switch CSV firmware to RING_BUFFER mode. When CSV firmware stays in RING_BUFFER mode, it will fetch commands from CSV RING_BUFFER queues which are filled by X86. The CSV firmware will exit RING_BUFFER mode after SHUTDOWN command is completed. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 7395d65eecde88f7da76cd8b7aa0170ed770c98a) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/csv-dev.c | 55 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 7 ++++ drivers/crypto/ccp/sev-dev.c | 4 +++ include/linux/psp-hygon.h | 40 ++++++++++++++++++++++ 4 files changed, 106 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 23fe783cc208..4280622f9171 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -26,6 +26,8 @@ */ u32 hygon_csv_build; +int csv_comm_mode = CSV_COMM_MAILBOX_ON; + /* * csv_update_api_version used to update the api version of HYGON CSV * firmwareat driver side. @@ -43,6 +45,7 @@ int csv_cmd_buffer_len(int cmd) { switch (cmd) { case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); default: return 0; } } @@ -220,6 +223,58 @@ const struct file_operations csv_fops = { .unlocked_ioctl = csv_ioctl, }; +/* + * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER + * mode, the caller must acquire the mutex lock. + */ +static int __maybe_unused __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +void csv_restore_mailbox_mode_postprocess(void) +{ + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); +} + /* * __csv_ring_buffer_queue_init will allocate memory for command queue * and status queue. If error occurs, this function will return directly, diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 10c57987f050..187aedef084a 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -16,14 +16,21 @@ #define CSV_FW_FILE "hygon/csv.fw" extern u32 hygon_csv_build; +extern int csv_comm_mode; extern const struct file_operations csv_fops; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); +void csv_restore_mailbox_mode_postprocess(void); static inline bool csv_version_greater_or_equal(u32 build) { return hygon_csv_build >= build; } +static inline bool csv_in_ring_buffer_mode(void) +{ + return csv_comm_mode == CSV_COMM_RINGBUFFER_ON; +} + #endif /* __CCP_HYGON_CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 89fb07f42391..1ede7b271576 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1695,6 +1695,10 @@ static int __sev_platform_shutdown_locked(int *error) return ret; } + /* RING BUFFER mode exits if a SHUTDOWN command is executed */ + if (is_vendor_hygon() && csv_in_ring_buffer_mode()) + csv_restore_mailbox_mode_postprocess(); + sev->sev_plat_status.state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 71f1bfc9a5e5..1ef2d83cf0e0 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -22,10 +22,21 @@ * Guest/platform management commands for CSV */ enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, CSV_CMD_HGSC_CERT_IMPORT = 0x300, CSV_CMD_MAX, }; +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + /** * Ring Buffer Mode regions: * There are 4 regions and every region is a 4K area that must be 4K aligned. @@ -86,6 +97,35 @@ struct csv_ringbuffer_queue { struct csv_queue stat_val; } __packed; +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int csv_ring_buffer_queue_init(void); From 81cc0b2f9991b8ae4f4db01ff61d0f05ba154f66 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:36:19 +0800 Subject: [PATCH 31/99] crypto: ccp: Add support for issue commands in CSV RING_BUFFER mode hygon inclusion category: feature CVE: NA --------------------------- The CSV firmware stays in Mailbox mode by default. Upon successfully switched to CSV RING_BUFFER mode, the semantics of the 3 registers used for communicate between X86 and CSV firmware will be changed: - The CmdResp register becomes the RBCtl register. It is only ever written by X86. - The CmdBufAddr_Hi register becomes the RBTail register. It is only ever written by X86. - The CmdBufAddr_Lo register becomes the RBHead register. It should never be written by X86; the PSP will update it. The CSV firmware will exit CSV RING_BUFFER mode when it read invalid value from the RBCtl register. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 113f456c6fce07be91fc671de83843c1950d6205) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 132 ++++++++++++++++++++++++++++- drivers/crypto/ccp/hygon/csv-dev.h | 14 +++ drivers/crypto/ccp/hygon/psp-dev.h | 2 + drivers/crypto/ccp/sev-dev.c | 5 +- include/linux/psp-hygon.h | 9 ++ 5 files changed, 160 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 4280622f9171..c0a9a17fe951 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -227,7 +227,7 @@ const struct file_operations csv_fops = { * __csv_ring_buffer_enter_locked issues command to switch to RING BUFFER * mode, the caller must acquire the mutex lock. */ -static int __maybe_unused __csv_ring_buffer_enter_locked(int *error) +static int __csv_ring_buffer_enter_locked(int *error) { struct psp_device *psp = psp_master; struct sev_device *sev; @@ -269,6 +269,136 @@ static int __maybe_unused __csv_ring_buffer_enter_locked(int *error) return ret; } +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, + (*hygon_psp_hooks.psp_timeout) * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +/* + * csv_do_ringbuf_cmds will enter RING BUFFER mode and handling commands + * queued in RING BUFFER queues, the user is obligate to manage RING + * BUFFER queues including allocate, enqueue and free, etc. + */ +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &csv_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void csv_restore_mailbox_mode_postprocess(void) { csv_comm_mode = CSV_COMM_MAILBOX_ON; diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 187aedef084a..92df6b723b59 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -12,9 +12,23 @@ #include #include +#include #define CSV_FW_FILE "hygon/csv.fw" +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + extern u32 hygon_csv_build; extern int csv_comm_mode; extern const struct file_operations csv_fops; diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 062884218945..072a699fbab8 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -24,6 +24,8 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; + bool *psp_dead; + int *psp_timeout; int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); int (*__sev_platform_init_locked)(int *error); int (*__sev_platform_shutdown_locked)(int *error); diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 1ede7b271576..781ac963e42b 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -162,7 +162,8 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + (is_vendor_hygon() && csv_in_ring_buffer_mode())) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -2714,6 +2715,8 @@ static int sev_misc_init(struct sev_device *sev) static void sev_dev_install_hooks(void) { hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; + hygon_psp_hooks.psp_dead = &psp_dead; + hygon_psp_hooks.psp_timeout = &psp_timeout; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 1ef2d83cf0e0..888b7d8677bc 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -11,6 +11,7 @@ #define __PSP_HYGON_H__ #include +#include /*****************************************************************************/ /***************************** CSV interface *********************************/ @@ -133,6 +134,12 @@ int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); int csv_check_stat_queue_status(int *psp_ret); +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } @@ -140,6 +147,8 @@ static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ From 6e019f0c5cb42de4895225f4b491d9a45eb26b2d Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 17:51:55 +0800 Subject: [PATCH 32/99] KVM: SVM: Add KVM_CSV_COMMAND_BATCH command for applying CSV RING_BUFFER mode hygon inclusion category: feature CVE: NA --------------------------- The API KVM_CSV_COMMAD_BATCH receives data of structure kvm_csv_command_batch which embedded a link list of CSV command requests from userspace. It will do some preparation works to ensure data available for CSV RING_BUFFER mode, and then issues RING_BUFFER command. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 45269c6cd0ba52984c156facd8173d435f1372de) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 209 +++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 21 ++++ arch/x86/kvm/svm/sev.c | 1 + include/uapi/linux/kvm.h | 18 ++++ 4 files changed, 249 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index ef69fe72e769..8f1fe1e99100 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -26,6 +26,29 @@ struct hygon_kvm_hooks_table hygon_kvm_hooks; static struct kvm_x86_ops csv_x86_ops; static const char csv_vm_mnonce[] = "VM_ATTESTATION"; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) { @@ -93,6 +116,191 @@ int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + hygon_kvm_hooks.sev_unpin_memory(kvm, item->pages, + item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} + +static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r; + + if (!hygon_kvm_hooks.sev_hooks_installed || + !(*hygon_kvm_hooks.sev_enabled)) + return -ENOTTY; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV_COMMAND_BATCH: + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + default: + /* + * If the command is compatible between CSV and SEV, the + * native implementation of the driver is invoked. + * Release the mutex before calling the native ioctl function + * because it will acquires the mutex. + */ + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_ioctl)) + return csv_x86_ops.mem_enc_ioctl(kvm, argp); + } + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + + mutex_unlock(&kvm->lock); + return r; +} + void csv_exit(void) { } @@ -108,5 +316,6 @@ void __init csv_init(struct kvm_x86_ops *ops) memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + ops->mem_enc_ioctl = csv_mem_enc_ioctl; ops->vm_attestation = csv_vm_attestation; } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 655fe457b27f..9b9929277e0f 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -12,6 +12,26 @@ #include +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #ifdef CONFIG_HYGON_CSV /* @@ -20,6 +40,7 @@ */ extern struct hygon_kvm_hooks_table { bool sev_hooks_installed; + bool *sev_enabled; int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); unsigned long (*get_num_contig_pages)(unsigned long idx, struct page **inpages, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 3ff2f22c3420..18fe957aa915 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2988,6 +2988,7 @@ static bool is_sev_snp_initialized(void) /* Code to set all of the function and vaiable pointers */ void sev_install_hooks(void) { + hygon_kvm_hooks.sev_enabled = &sev_enabled; hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 52f6000ab020..0a395aadace7 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1617,4 +1617,22 @@ struct kvm_pre_fault_memory { __u64 padding[5]; }; +enum csv_cmd_id { + /* HYGON CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + + KVM_CSV_NR_MAX, +}; + +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + #endif /* __LINUX_KVM_H */ From 252d84821804c6b4f3394cb5158d8525b7128dbe Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Fri, 30 Jul 2021 18:22:04 +0800 Subject: [PATCH 33/99] KVM: SVM: Prepare memory pool to allocate buffers for KVM_CSV_COMMAND_BATCH hygon inclusion category: feature CVE: NA --------------------------- In the upcoming patches, many buffers need to be allocated in KVM_CSV_COMMAND_BATCH code paths. To avoid memory allocation failures, directly allocate a memory pool in sev_hardware_setup() and free the memory pool in sev_hardware_teardown(). When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA/RECEIVE_UPDATE_DATA commands, it will allocate trans buffers from the memory pool. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 89b2f24be82f774c67d1d2da6211fe5ba5e7ddc8) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 87 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 6 +++ arch/x86/kvm/svm/sev.c | 24 +++++++++--- 3 files changed, 112 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 8f1fe1e99100..63b055fbf868 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -116,6 +116,92 @@ int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) return ret; } +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void csv_reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +int csv_alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + csv_reset_mempool_offset(); + return 0; + +free_trans_mempool: + csv_free_trans_mempool(); + pr_warn("Fail to allocate mem pool, CSV(2) live migration will very slow\n"); + + return -ENOMEM; +} + +void csv_free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + csv_reset_mempool_offset(); +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + if (!trans_data) + return NULL; + + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -252,6 +338,7 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) err_free_ring_buffer_infos_items: csv_ringbuf_infos_free(kvm, ringbuf_infos); kfree(ringbuf_infos); + csv_reset_mempool_offset(); err_free_ring_buffer: csv_ring_buffer_queue_free(); diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 9b9929277e0f..4747cfeeb379 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -55,11 +55,17 @@ extern struct hygon_kvm_hooks_table { void __init csv_init(struct kvm_x86_ops *ops); void csv_exit(void); +int csv_alloc_trans_mempool(void); +void csv_free_trans_mempool(void); + #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } static inline void csv_exit(void) { } +static inline int csv_alloc_trans_mempool(void) { return 0; } +static inline void csv_free_trans_mempool(void) { } + #endif /* CONFIG_HYGON_CSV */ #endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 18fe957aa915..c5bd29782ab7 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3170,12 +3170,22 @@ void __init sev_hardware_setup(void) sev_supported_vmsa_features |= SVM_SEV_FEAT_SECURE_TSC; #ifdef CONFIG_HYGON_CSV - /* - * Install sev related function and variable pointers hooks only for - * Hygon CPUs. - */ - if (is_x86_vendor_hygon()) + /* Setup resources which are necessary for HYGON CSV */ + if (is_x86_vendor_hygon()) { + /* + * Install sev related function and variable pointers hooks + * no matter @sev_enabled is false. + */ sev_install_hooks(); + + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ + if (sev_enabled) + csv_alloc_trans_mempool(); + } #endif } @@ -3184,6 +3194,10 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; + /* Free the memory pool that allocated in sev_hardware_setup(). */ + if (is_x86_vendor_hygon()) + csv_free_trans_mempool(); + /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); From cab194e9e9e0b4da1a20cab1528205c07580619f Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:38:41 +0800 Subject: [PATCH 34/99] KVM: SVM: Add SEND_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH hygon inclusion category: feature CVE: NA --------------------------- When KVM_CSV_COMMAND_BATCH handling a batch of SEND_UPDATE_DATA commands, it need execute 3 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command 3. Copy the output of RING_BUFFER command to userspace In this change, we add sev_send_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1, and add sev_send_update_data_copy_to_user() to copy output userspace as dictated in step 3. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit cf69e4096ff353e5082051d787562b883def26f4) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 141 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 1 + arch/x86/kvm/svm/sev.c | 1 + 3 files changed, 143 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 63b055fbf868..e73c681e8213 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -202,6 +203,142 @@ static void __maybe_unused *get_trans_data_from_mempool(size_t size) return trans; } +static int +csv_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->hdr_uaddr = params.hdr_uaddr; + item->hdr_len = params.hdr_len; + item->trans_vaddr = (uintptr_t)trans_data; + item->trans_uaddr = params.trans_uaddr; + item->trans_len = params.trans_len; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); + return ret; +} + +static int +csv_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -244,6 +381,10 @@ static int get_cmd_helpers(__u32 cmd, /* copy commands to ring buffer*/ switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = csv_send_update_data_to_ringbuf; + *to_user_fn = csv_send_update_data_copy_to_user; + break; default: ret = -EINVAL; break; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 4747cfeeb379..3b543bec6841 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -41,6 +41,7 @@ struct csv_ringbuf_infos { extern struct hygon_kvm_hooks_table { bool sev_hooks_installed; bool *sev_enabled; + unsigned long *sev_me_mask; int (*sev_issue_cmd)(struct kvm *kvm, int id, void *data, int *error); unsigned long (*get_num_contig_pages)(unsigned long idx, struct page **inpages, diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index c5bd29782ab7..00d36ca8a4f0 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2989,6 +2989,7 @@ static bool is_sev_snp_initialized(void) void sev_install_hooks(void) { hygon_kvm_hooks.sev_enabled = &sev_enabled; + hygon_kvm_hooks.sev_me_mask = &sev_me_mask; hygon_kvm_hooks.sev_issue_cmd = sev_issue_cmd; hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; From 58e5dfe224f9016ad722e69f24413065fda68ee7 Mon Sep 17 00:00:00 2001 From: fangbaoshun Date: Sun, 1 Aug 2021 13:50:54 +0800 Subject: [PATCH 35/99] KVM: SVM: Add RECEIVE_UPDATE_DATA command helper to support KVM_CSV_COMMAND_BATCH hygon inclusion category: feature CVE: NA --------------------------- When KVM_CSV_COMMAND_BATCH handling a batch of RECEIVE_UPDATE_DATA commands, it need execute 2 steps: 1. Enqueue each SEND_UPDATE_DATA command data to CSV RING_BUFFER queues (as input of RING_BUFFER command) 2. Issue RING_BUFFER command In this change, we add sev_receive_update_data_to_ringbuf() to prepare input required by RING_BUFFER command as dictated in step 1. Signed-off-by: fangbaoshun Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 92abe9fd515a139c8360e365c63508d9c29449e8) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 119 +++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 1 + arch/x86/kvm/svm/sev.c | 1 + 3 files changed, 121 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index e73c681e8213..23991f24771f 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -339,6 +339,121 @@ csv_send_update_data_copy_to_user(struct kvm *kvm, return ret; } +static int +csv_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + hygon_kvm_hooks.sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= *hygon_kvm_hooks.sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num++] = item; + + /* copy to ring buffer success, data freed after commands completed */ + return 0; + +e_unpin: + hygon_kvm_hooks.sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int csv_ringbuf_infos_free(struct kvm *kvm, struct csv_ringbuf_infos *ringbuf_infos) { @@ -385,6 +500,10 @@ static int get_cmd_helpers(__u32 cmd, *to_ringbuf_fn = csv_send_update_data_to_ringbuf; *to_user_fn = csv_send_update_data_copy_to_user; break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = csv_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; default: ret = -EINVAL; break; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 3b543bec6841..4494a6941da6 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -51,6 +51,7 @@ extern struct hygon_kvm_hooks_table { int write); void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, unsigned long npages); + void (*sev_clflush_pages)(struct page *pages[], unsigned long npages); } hygon_kvm_hooks; void __init csv_init(struct kvm_x86_ops *ops); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 00d36ca8a4f0..d7f2dde2dd2f 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2994,6 +2994,7 @@ void sev_install_hooks(void) hygon_kvm_hooks.get_num_contig_pages = get_num_contig_pages; hygon_kvm_hooks.sev_pin_memory = sev_pin_memory; hygon_kvm_hooks.sev_unpin_memory = sev_unpin_memory; + hygon_kvm_hooks.sev_clflush_pages = sev_clflush_pages; hygon_kvm_hooks.sev_hooks_installed = true; } From bc7a43dcd1570d8ed8bde4d1de319dc9ef629e27 Mon Sep 17 00:00:00 2001 From: chench Date: Fri, 15 Mar 2024 17:07:40 +0800 Subject: [PATCH 36/99] crypto: ccp: Add a new interface for X86 sending command to PSP hygon inclusion category: feature CVE: NA --------------------------- HYGON's fTPM and TDM need to send commands to PSP to complete the firmware function. In order to be compatible with the original kernel code, The command of psp_do_cmd is added to send the command from x86 to PSP. The interface is currently used for fTPM/TDM/TPCM of HYGON. Signed-off-by: chench Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit dc9efc599253d393a007ac39d1d1ad82a8fde583) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/hygon/psp-dev.h drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/psp-dev.c | 76 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 4 ++ drivers/crypto/ccp/sev-dev.c | 3 ++ include/linux/psp-hygon.h | 4 ++ 4 files changed, 87 insertions(+) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 8f83e68257bf..15384a227b44 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -11,7 +11,9 @@ * published by the Free Software Foundation. */ +#include #include +#include #include "psp-dev.h" @@ -28,3 +30,77 @@ int fixup_hygon_psp_caps(struct psp_device *psp) psp->capability.security_reporting = 0; return 0; } + +static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + rc = __psp_do_cmd_locked(cmd, data, psp_ret); + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} +EXPORT_SYMBOL_GPL(psp_do_cmd); diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 072a699fbab8..c99ca0599c2e 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -26,9 +26,13 @@ extern struct hygon_psp_hooks_table { struct mutex *sev_cmd_mutex; bool *psp_dead; int *psp_timeout; + int *psp_cmd_timeout; + int (*sev_cmd_buffer_len)(int cmd); int (*__sev_do_cmd_locked)(int cmd, void *data, int *psp_ret); int (*__sev_platform_init_locked)(int *error); int (*__sev_platform_shutdown_locked)(int *error); + int (*sev_wait_cmd_ioc)(struct sev_device *sev, + unsigned int *reg, unsigned int timeout); long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 781ac963e42b..7ef7cbc23ef9 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -2717,9 +2717,12 @@ static void sev_dev_install_hooks(void) hygon_psp_hooks.sev_cmd_mutex = &sev_cmd_mutex; hygon_psp_hooks.psp_dead = &psp_dead; hygon_psp_hooks.psp_timeout = &psp_timeout; + hygon_psp_hooks.psp_cmd_timeout = &psp_cmd_timeout; + hygon_psp_hooks.sev_cmd_buffer_len = sev_cmd_buffer_len; hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; + hygon_psp_hooks.sev_wait_cmd_ioc = sev_wait_cmd_ioc; hygon_psp_hooks.sev_ioctl = sev_ioctl; hygon_psp_hooks.sev_dev_hooks_installed = true; diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 888b7d8677bc..b9b8f786c373 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -129,6 +129,8 @@ struct csv_data_ring_buffer { #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int psp_do_cmd(int cmd, void *data, int *psp_ret); + int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); @@ -142,6 +144,8 @@ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } static inline From bd3ba6efe08350470ae1c88146a04e140975ce32 Mon Sep 17 00:00:00 2001 From: chench Date: Mon, 18 Mar 2024 11:55:07 +0800 Subject: [PATCH 37/99] crypto: ccp: Add another mailbox interrupt support for PSP sending command to X86 hygon inclusion category: feature CVE: NA --------------------------- The existing kernel supports only interrupt for the mailbox interface for X86 sending commands to PSP and PSP to ack, e.g. the SEV commands. However, some PSP-based security modules in Hygon CPU, such as TPCM and TDM(Trusted Dynamic Measuring), needs sending commands/notifications proactively to X86 core via interrupt and a 2nd mailbox interface. Similar to the existing one, the 2nd mailbox consists of a 32-bits command register and two 32-bits data registers. The PSP interrupt handling needs to add this interrupt support; besides, in order to support user defined command handler, a callback registration function is also provided. Up to 16 command callbacks is supported, which are indexed by command IDs. Currently, command ID 0 is assigned to TPCM and 1 to TDM, while others are reserved. Currently, Hygon PSP does not support bootloader info reg, remove the value of bootloader_info_reg. Signed-off-by: chench Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit a942067b1d45941a921f53b860d0aef12a731f54) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Kconfig | 7 ++ drivers/crypto/ccp/hygon/psp-dev.c | 119 +++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 8 ++ drivers/crypto/ccp/hygon/sp-pci.c | 11 ++- drivers/crypto/ccp/psp-dev.c | 6 +- drivers/crypto/ccp/sp-dev.h | 5 ++ include/linux/psp-hygon.h | 14 ++++ 7 files changed, 168 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index f394e45e11ab..e58ce2c77eb3 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -53,3 +53,10 @@ config CRYPTO_DEV_CCP_DEBUGFS help Expose CCP device information such as operation statistics, feature information, and descriptor queue contents. + +config HYGON_PSP2CPU_CMD + bool "Hygon PSP2CPU Command Interface" + default y + depends on CRYPTO_DEV_SP_PSP + help + Hygon PSP2CPU Command Support diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 15384a227b44..bfb7b9bbcb9b 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -104,3 +104,122 @@ int psp_do_cmd(int cmd, void *data, int *psp_ret) return rc; } EXPORT_SYMBOL_GPL(psp_do_cmd); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +static DEFINE_SPINLOCK(p2c_notifier_lock); +static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { + p2c_notifiers[cmd_id] = notifier; + ret = 0; + } + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { + p2c_notifiers[cmd_id] = NULL; + ret = 0; + } + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); + +#define PSP2CPU_MAX_LOOP 100 + +static irqreturn_t psp_irq_handler_hygon(int irq, void *data) +{ + struct psp_device *psp = data; + struct sev_device *sev = psp->sev_irq_data; + unsigned int status; + int reg; + unsigned long flags; + int count = 0; + uint32_t p2c_cmd; + uint32_t p2c_lo_data; + uint32_t p2c_hi_data; + uint64_t p2c_data; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + + while (status && (count++ < PSP2CPU_MAX_LOOP)) { + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); + + /* Check if it is command completion: */ + if (status & SEV_CMD_COMPLETE) { + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } + } + + if (status & PSP_X86_CMD) { + /* Check if it is P2C command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); + if (!(reg & PSP_CMDRESP_RESP)) { + p2c_lo_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_lo_reg); + p2c_hi_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_hi_reg); + p2c_data = (((uint64_t)(p2c_hi_data) << 32) + + ((uint64_t)(p2c_lo_data))); + p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); + if (p2c_cmd < P2C_NOTIFIERS_MAX) { + spin_lock_irqsave(&p2c_notifier_lock, flags); + + if (p2c_notifiers[p2c_cmd]) + p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + } + + reg |= PSP_CMDRESP_RESP; + iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); + } + } + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + } + + return IRQ_HANDLED; +} + +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + return sp_request_psp_irq(sp, psp_irq_handler_hygon, name, data); +} + +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ + +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data) +{ + return sp_request_psp_irq(sp, handler, name, data); +} + +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index c99ca0599c2e..f5679c1559a9 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -11,12 +11,18 @@ #define __CCP_HYGON_PSP_DEV_H__ #include +#include #include "sp-dev.h" #include "../psp-dev.h" #include "../sev-dev.h" +#ifdef CONFIG_HYGON_PSP2CPU_CMD +#define PSP_X86_CMD BIT(2) +#define P2C_NOTIFIERS_MAX 16 +#endif + /* * Hooks table: a table of function and variable pointers filled in * when psp init. @@ -37,5 +43,7 @@ extern struct hygon_psp_hooks_table { } hygon_psp_hooks; int fixup_hygon_psp_caps(struct psp_device *psp); +int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, + const char *name, void *data); #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c index ba3b2448d0b6..6779f9ef0188 100644 --- a/drivers/crypto/ccp/hygon/sp-pci.c +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -22,10 +22,14 @@ static const struct sev_vdata csvv1 = { static const struct psp_vdata pspv1 = { .sev = &csvv1, - .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */ .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; static const struct psp_vdata pspv2 = { @@ -33,6 +37,11 @@ static const struct psp_vdata pspv2 = { .feature_reg = 0x105fc, .inten_reg = 0x10670, .intsts_reg = 0x10674, +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; #endif diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 4ed402370712..9dca716bcee7 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -283,7 +283,11 @@ int psp_dev_init(struct sp_device *sp) iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); /* Request an irq */ - ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + if (is_vendor_hygon()) { + ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } else { + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } if (ret) { dev_err(dev, "psp: unable to allocate an IRQ\n"); goto e_err; diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 6f9d7063257d..62873b786cc8 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -80,6 +80,11 @@ struct psp_vdata { const unsigned int intsts_reg; const unsigned int bootloader_info_reg; const unsigned int platform_features; +#ifdef CONFIG_HYGON_PSP2CPU_CMD + const unsigned int p2c_cmdresp_reg; + const unsigned int p2c_cmdbuff_addr_lo_reg; + const unsigned int p2c_cmdbuff_addr_hi_reg; +#endif }; /* Structure to hold SP device data */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index b9b8f786c373..b87ad363e91a 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -156,4 +156,18 @@ csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); + +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ + +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } + +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ + #endif /* __PSP_HYGON_H__ */ From 2f441036405aaba18da9380ba1bd3e6deb52a7a0 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 24 May 2022 22:03:04 +0800 Subject: [PATCH 38/99] crypto: ccp: Fix definition of struct sev_data_send_update_vmsa hygon inclusion category: feature CVE: NA --------------------------- The current definition of struct sev_data_send_update_vmsa in include/linux/psp-sev.h does not comply with SEV API spec. Fix it here. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 7444d4aa1a16bcf91f42bf1b691d527663ce7c6d) Signed-off-by: Wentao Guan --- include/linux/psp-sev.h | 1 + 1 file changed, 1 insertion(+) diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index e0dbcb4b4fd9..f3d02cb00daa 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -418,6 +418,7 @@ struct sev_data_send_update_data { */ struct sev_data_send_update_vmsa { u32 handle; /* In */ + u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; From e6d6c408fc00bdafc5356c2a0744ea4a71cba4ab Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:07:08 -0400 Subject: [PATCH 39/99] KVM: SVM: Add KVM_SEV_SEND_UPDATE_VMSA command hygon inclusion category: feature CVE: NA --------------------------- The command is used for encrypting the VCPU register states of CSV2 guest using the encryption context created with KVM_SEV_SEND_START. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 9c41e741646b97234693742aa12458c894473b94) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 121 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 +++ 2 files changed, 129 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 23991f24771f..2b5cf5dcf755 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -606,6 +606,118 @@ static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__csv_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, + vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_csv_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int csv_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_csv_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __csv_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | + *hygon_kvm_hooks.sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, + vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -629,6 +741,15 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) r = csv_command_batch(kvm, &sev_cmd); mutex_unlock(&csv_cmd_batch_mutex); break; + case KVM_SEV_SEND_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_SEND_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_send_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_send_update_vmsa(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 0a395aadace7..7b5e1fe21350 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1635,4 +1635,12 @@ struct kvm_csv_command_batch { __u64 csv_batch_list_uaddr; }; +struct kvm_csv_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ From 2cc4a49b04fbc17b272240530a58875bf2a922b7 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 8 Apr 2021 08:39:49 -0400 Subject: [PATCH 40/99] KVM: SVM: Add KVM_SEV_RECEIVE_UPDATE_VMSA command hygon inclusion category: feature CVE: NA --------------------------- The command is used for copying the incoming buffer into the VMSA memory regions of CSV2 guest. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 6be721f27796783bdafad9ea1968547050c96136) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 86 ++++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 8 ++++ 2 files changed, 94 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 2b5cf5dcf755..801fa1df9527 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -718,6 +718,83 @@ static int csv_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | + *hygon_kvm_hooks.sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, + vmsa, &argp->error); + + if (!ret) + vcpu->arch.guest_state_protected = true; + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -750,6 +827,15 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) */ r = csv_send_update_vmsa(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + /* + * Hygon implement the specific interface, although + * KVM_SEV_RECEIVE_UPDATE_VMSA is the command shared by CSV and + * SEV. The struct sev_data_receive_update_vmsa is also shared + * by CSV and SEV, we'll use this structure in the code. + */ + r = csv_receive_update_vmsa(kvm, &sev_cmd); + break; default: /* * If the command is compatible between CSV and SEV, the diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 7b5e1fe21350..147cae254ba2 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1643,4 +1643,12 @@ struct kvm_csv_send_update_vmsa { __u32 trans_len; }; +struct kvm_csv_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ From 8ab2e3d819d4a14d202461a54112411595d833a3 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 7 Apr 2021 02:46:11 -0400 Subject: [PATCH 41/99] KVM: x86: Restore control registers in __set_sregs() to support CSV2 guest live migration hygon inclusion category: feature CVE: NA --------------------------- When migrate CSV2 guest to the recipient, the KVM which on recipient's side needs to update the guest context so that the guest can continues to run. The control register state is necessary for updating the guest context. Allows the control registers to be updated in __set_sregs() so that the CSV2 guest could continue running correctly after migrated to the recipient. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit c69c92e095386288cf2a8c767ebf2721ceb677a5) Signed-off-by: Wentao Guan --- arch/x86/kvm/x86.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 58ae529dab35..8eaee91f1549 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -12321,8 +12321,16 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_apic_set_base(vcpu, sregs->apic_base, true)) return -EINVAL; - if (vcpu->arch.guest_state_protected) + if (vcpu->arch.guest_state_protected) { + /* + * For HYGON CSV2 guest, we need update some regs to support + * live migration. + */ + if (is_x86_vendor_hygon()) + goto skip_dt_cr2_cr3; + return 0; + } dt.size = sregs->idt.limit; dt.address = sregs->idt.base; @@ -12337,6 +12345,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); kvm_x86_call(post_set_cr3)(vcpu, sregs->cr3); +skip_dt_cr2_cr3: kvm_set_cr8(vcpu, sregs->cr8); *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; @@ -12348,6 +12357,9 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; kvm_x86_call(set_cr4)(vcpu, sregs->cr4); + if (vcpu->arch.guest_state_protected) + return 0; + if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { From 4e8fb55fa67cb2156e34701cd7038a0eff628c18 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 15 Jun 2021 11:29:13 +0800 Subject: [PATCH 42/99] KVM: SVM: Export MSR_AMD64_SEV_ES_GHCB to userspace for CSV2 guest hygon inclusion category: feature CVE: NA --------------------------- VMCB.control.ghcb_gpa contains necessary info to support runtime CSV2 guest. At present, it includes the following points: 1. For GHCB MSR protocol, ghcb_gpa stores the negotiation result 2. For GHCB page protocol, ghcb_gpa stores the GPA of GHCB page In addition, AP VCPU's SIPI state and GHCB page mapping state are temporarily stored in KVM. When CSV2 guest was migrated to the recipient, KVM needs to restore VMCB.control.ghcb_gpa, VCPU's SIPI state and GHCB page mapping state on the source side. This patch is to support export MSR_AMD64_SEV_ES_GHCB to userspace. KVM can collect all the infos dictated above and return to userspace if userspace request to get MSR_AMD64_SEV_ES_GHCB, and KVM can restore all the infos dictated above if userspace request to set MSR_AMD64_SEV_ES_GHCB. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit ef239c4512ab354af15f8a1946e3ba57633f4eb6) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/svm/svm.h arch/x86/kvm/x86.c include/uapi/linux/kvm.h --- arch/x86/kvm/svm/csv.c | 118 +++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 50 +++++++++++++++++ arch/x86/kvm/svm/svm.c | 31 ++++++++++ arch/x86/kvm/svm/svm.h | 4 ++ arch/x86/kvm/vmx/vmx.c | 1 + arch/x86/kvm/x86.c | 13 +++++ include/uapi/linux/kvm.h | 2 + 7 files changed, 219 insertions(+) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 801fa1df9527..b24fa31071e2 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -855,6 +855,124 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) return r; } +static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} + +static bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return !!(ghcb_val & GHCB_MSR_INFO_MASK); +} + +/* + * csv_get_msr return msr data to the userspace. + * + * Return 0 if get msr success. + */ +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + + switch (msr_info->index) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace get from vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm)) + return 1; + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + default: + return 1; + } + return 0; +} + +/* + * csv_set_msr set msr data from the userspace. + * + * Return 0 if set msr success. + */ +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 ecx = msr_info->index; + u64 data = msr_info->data; + + switch (ecx) { + case MSR_AMD64_SEV_ES_GHCB: + /* Only support userspace set to vmcb.control.ghcb_gpa */ + if (!msr_info->host_initiated || !sev_es_guest(vcpu->kvm)) + return 1; + + /* + * Value 0 means uninitialized userspace MSR data, userspace + * need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on the recipient of migration, + * should return error if cannot map the ghcb + * page. + */ + if (csv2_map_ghcb_gpa(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + default: + return 1; + } + return 0; +} + +bool csv_has_emulated_ghcb_msr(struct kvm *kvm) +{ + /* this should be determined after KVM_CREATE_VM. */ + if (kvm && !sev_es_guest(kvm)) + return false; + + return true; +} + void csv_exit(void) { } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 4494a6941da6..cd64e72c2a3e 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -59,6 +59,15 @@ void csv_exit(void); int csv_alloc_trans_mempool(void); void csv_free_trans_mempool(void); +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); +bool csv_has_emulated_ghcb_msr(struct kvm *kvm); + +static inline bool csv2_state_unstable(struct vcpu_svm *svm) +{ + return svm->sev_es.receiver_ghcb_map_fail; +} + #else /* !CONFIG_HYGON_CSV */ @@ -67,7 +76,48 @@ static inline void csv_exit(void) { } static inline int csv_alloc_trans_mempool(void) { return 0; } static inline void csv_free_trans_mempool(void) { } +static inline +int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } +static inline +int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } +static inline bool csv_has_emulated_ghcb_msr(struct kvm *kvm) { return false; } +static inline bool csv2_state_unstable(struct vcpu_svm *svm) { return false; } #endif /* CONFIG_HYGON_CSV */ +#include + +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + #endif /* __SVM_CSV_H */ diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index f617d10ffc75..67f7dea3fb74 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2778,6 +2778,12 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + case MSR_AMD64_SEV_ES_GHCB: + /* HYGON CSV2 support export this MSR to userspace */ + if (is_x86_vendor_hygon()) + return csv_get_msr(vcpu, msr_info); + else + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3037,6 +3043,12 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_AMD64_SEV_ES_GHCB: + /* HYGON CSV2 support update this MSR from userspace */ + if (is_x86_vendor_hygon()) + return csv_set_msr(vcpu, msr); + else + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4206,6 +4218,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags) trace_kvm_entry(vcpu, force_immediate_exit); + /* + * For receipient side of CSV2 guest, fake the exit code as SVM_EXIT_ERR + * and return directly if failed to mapping the necessary GHCB page. + * When handling the exit code afterwards, it can exit to userspace and + * stop the guest. + */ + if (is_x86_vendor_hygon() && sev_es_guest(vcpu->kvm)) { + if (csv2_state_unstable(svm)) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } + } + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -4400,6 +4425,12 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) if (kvm && sev_es_guest(kvm)) return false; break; + case MSR_AMD64_SEV_ES_GHCB: + /* HYGON CSV2 support emulate this MSR */ + if (is_x86_vendor_hygon()) + return csv_has_emulated_ghcb_msr(kvm); + else + return false; default: break; } diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index dd78e6402345..369df1aae28f 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -249,6 +249,10 @@ struct vcpu_sev_es_state { gpa_t snp_vmsa_gpa; bool snp_ap_waiting_for_reset; bool snp_has_guest_vmsa; +#ifdef CONFIG_HYGON_CSV + /* migrated ghcb mapping state for HYGON CSV2 */ + bool receiver_ghcb_map_fail; +#endif }; struct vcpu_svm { diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 91b6f2f3edc2..6826e472ce48 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -7075,6 +7075,7 @@ bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: + case MSR_AMD64_SEV_ES_GHCB: /* This is AMD only. */ return false; default: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8eaee91f1549..cf517263740f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -454,6 +454,8 @@ static const u32 emulated_msrs_all[] = { MSR_K7_HWCR, MSR_KVM_POLL_CONTROL, + + MSR_AMD64_SEV_ES_GHCB, }; static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; @@ -5019,6 +5021,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_READONLY_MEM: r = kvm ? kvm_arch_has_readonly_mem(kvm) : 1; break; + case KVM_CAP_SEV_ES_GHCB: + r = 0; + + /* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB, + * but only CSV2 guest support export to emulate + * MSR_AMD64_SEV_ES_GHCB. + */ + if (is_x86_vendor_hygon()) + r = static_call(kvm_x86_has_emulated_msr)(kvm, + MSR_AMD64_SEV_ES_GHCB); + break; default: break; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 147cae254ba2..19ffd1869c85 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -964,6 +964,8 @@ struct kvm_enable_cap { #define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243 #define KVM_CAP_GUEST_MEMFD_FLAGS 244 +#define KVM_CAP_SEV_ES_GHCB 500 + struct kvm_irq_routing_irqchip { __u32 irqchip; __u32 pin; From b3f63709beb5ca37894fe5d0fa362ebf95cd9e18 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 8 Aug 2023 23:47:22 +0800 Subject: [PATCH 43/99] KVM: x86: Introduce control_{pre,post}_system_reset ioctl interfaces hygon inclusion category: feature CVE: NA --------------------------- In the upcoming patches, we will support for rebooting CSV2 guests. In order to support rebooting CSV2 guest, we will set vcpu->arch.guest_state_protected to false, before VMRUN, so that VMM can initialize vCPU states and VMSA, and then set vcpu->arch.guest_state_protected back to true to bypass unexpected behaviour in KVM. Besides, cache flush is necessary during rebooting a memory encrypted guest. Introduce control_{pre,post}_system_reset ioctl interfaces to support rebooting memory encrypted guests correctly. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit f1330df9669a3eca19b380ec136b5f285ac5f986) Signed-off-by: Wentao Guan --- arch/x86/include/asm/kvm-x86-ops.h | 2 ++ arch/x86/include/asm/kvm_host.h | 4 +++- arch/x86/kvm/svm/csv.c | 12 ++++++++++++ arch/x86/kvm/x86.c | 12 ++++++++++++ include/uapi/linux/kvm.h | 4 ++++ 5 files changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 2884603769dd..0fac30809b2c 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -148,6 +148,8 @@ KVM_X86_OP_OPTIONAL_RET0(gmem_prepare) KVM_X86_OP_OPTIONAL_RET0(gmem_max_mapping_level) KVM_X86_OP_OPTIONAL(gmem_invalidate) KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 04c1020c23f2..09eaa37e5d4e 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1945,9 +1945,11 @@ struct kvm_x86_ops { void (*gmem_invalidate)(kvm_pfn_t start, kvm_pfn_t end); int (*gmem_max_mapping_level)(struct kvm *kvm, kvm_pfn_t pfn, bool is_private); /* - * Attestation interface for HYGON CSV guest + * Interfaces for HYGON CSV guest */ int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); + int (*control_pre_system_reset)(struct kvm *kvm); + int (*control_post_system_reset)(struct kvm *kvm); }; struct kvm_x86_nested_ops { diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index b24fa31071e2..194e98aa1d3c 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -973,6 +973,16 @@ bool csv_has_emulated_ghcb_msr(struct kvm *kvm) return true; } +static int csv_control_pre_system_reset(struct kvm *kvm) +{ + return 0; +} + +static int csv_control_post_system_reset(struct kvm *kvm) +{ + return 0; +} + void csv_exit(void) { } @@ -990,4 +1000,6 @@ void __init csv_init(struct kvm_x86_ops *ops) ops->mem_enc_ioctl = csv_mem_enc_ioctl; ops->vm_attestation = csv_vm_attestation; + ops->control_pre_system_reset = csv_control_pre_system_reset; + ops->control_post_system_reset = csv_control_post_system_reset; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cf517263740f..7b9ad6ae979d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7613,6 +7613,18 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; } + case KVM_CONTROL_PRE_SYSTEM_RESET: + if (kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); + else + r = -ENOTTY; + break; + case KVM_CONTROL_POST_SYSTEM_RESET: + if (kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); + else + r = -ENOTTY; + break; default: r = -ENOTTY; } diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 19ffd1869c85..d5793da178c2 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1653,4 +1653,8 @@ struct kvm_csv_receive_update_vmsa { __u32 trans_len; }; +/* ioctls for control vm during system reset, currently only for CSV */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + #endif /* __LINUX_KVM_H */ From 49f3a459976cfb1ec61896fd196ccacc7bf5687a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 15 Apr 2021 07:56:55 -0400 Subject: [PATCH 44/99] KVM: SVM: Add support for rebooting CSV2 guest hygon inclusion category: feature CVE: NA --------------------------- Currently, reboot a CSV2 guest is unsupported because vCPU state is encrypted and can't be initialized when guest reboots to execute OVMF code. In order to support reboot a CSV2 guest, make a backup of the encrypted VMSA before booting the guest, and restore VMSA from the backup before rebooting the guest. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit 44cca0d7bf9342cf3cfd82fb1cf0753e2ffdbf05) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/svm/svm.c --- arch/x86/kvm/svm/csv.c | 72 ++++++++++++++++++++++++++++++++++++++++++ arch/x86/kvm/svm/csv.h | 7 +++- arch/x86/kvm/svm/sev.c | 19 +++++++++++ arch/x86/kvm/svm/svm.h | 2 ++ 4 files changed, 99 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 194e98aa1d3c..27a243e814f9 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -855,6 +855,33 @@ static int csv_mem_enc_ioctl(struct kvm *kvm, void __user *argp) return r; } +/* The caller must flush the stale caches about svm->sev_es.vmsa */ +void csv2_sync_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); +} + +void csv2_free_reset_vmsa(struct vcpu_svm *svm) +{ + if (svm->sev_es.reset_vmsa) { + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); + svm->sev_es.reset_vmsa = NULL; + } +} + +int csv2_setup_reset_vmsa(struct vcpu_svm *svm) +{ + struct page *reset_vmsa_page = NULL; + + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + return -ENOMEM; + + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + return 0; +} + static int csv2_map_ghcb_gpa(struct vcpu_svm *svm, u64 ghcb_gpa) { if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { @@ -975,11 +1002,56 @@ bool csv_has_emulated_ghcb_msr(struct kvm *kvm) static int csv_control_pre_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } static int csv_control_post_system_reset(struct kvm *kvm) { + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + /* Flush both host and guest caches of VMSA */ + wbinvd_on_all_cpus(); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + return 0; } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index cd64e72c2a3e..3422ece01008 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -62,13 +62,15 @@ void csv_free_trans_mempool(void); int csv_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); bool csv_has_emulated_ghcb_msr(struct kvm *kvm); +void csv2_sync_reset_vmsa(struct vcpu_svm *svm); +void csv2_free_reset_vmsa(struct vcpu_svm *svm); +int csv2_setup_reset_vmsa(struct vcpu_svm *svm); static inline bool csv2_state_unstable(struct vcpu_svm *svm) { return svm->sev_es.receiver_ghcb_map_fail; } - #else /* !CONFIG_HYGON_CSV */ static inline void __init csv_init(struct kvm_x86_ops *ops) { } @@ -82,6 +84,9 @@ static inline int csv_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { return 1; } static inline bool csv_has_emulated_ghcb_msr(struct kvm *kvm) { return false; } static inline bool csv2_state_unstable(struct vcpu_svm *svm) { return false; } +static inline void csv2_sync_reset_vmsa(struct vcpu_svm *svm) { } +static inline void csv2_free_reset_vmsa(struct vcpu_svm *svm) { } +static inline int csv2_setup_reset_vmsa(struct vcpu_svm *svm) { return 0; } #endif /* CONFIG_HYGON_CSV */ diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index d7f2dde2dd2f..d90ba9ffae20 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -1000,6 +1000,17 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, fpstate_set_confidential(&vcpu->arch.guest_fpu); vcpu->arch.guest_state_protected = true; + /* + * Backup encrypted vmsa to support rebooting CSV2 guest. The + * clflush_cache_range() is necessary to invalidate prefetched + * memory area pointed by svm->sev_es.vmsa so that we can read + * fresh memory updated by PSP. + */ + if (is_x86_vendor_hygon()) { + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + csv2_sync_reset_vmsa(svm); + } + /* * SEV-ES guest mandates LBR Virtualization to be _always_ ON. Enable it * only after setting guest_state_protected because KVM_SET_MSRS allows @@ -3306,6 +3317,9 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) skip_vmsa_free: if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); + + if (is_x86_vendor_hygon()) + csv2_free_reset_vmsa(svm); } static u64 kvm_get_cached_sw_exit_code(struct vmcb_control_area *control) @@ -4702,6 +4716,11 @@ int sev_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) return -ENOMEM; + if (is_x86_vendor_hygon()) { + if (csv2_setup_reset_vmsa(svm)) + return -ENOMEM; + } + svm->sev_es.vmsa = page_address(vmsa_page); vcpu->arch.guest_tsc_protected = snp_is_secure_tsc_enabled(vcpu->kvm); diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index 369df1aae28f..2f2de54109bd 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -252,6 +252,8 @@ struct vcpu_sev_es_state { #ifdef CONFIG_HYGON_CSV /* migrated ghcb mapping state for HYGON CSV2 */ bool receiver_ghcb_map_fail; + /* CSV2 reboot vmsa */ + struct vmcb_save_area *reset_vmsa; #endif }; From efb24489dc9aec36813bb73d4cfc1772380ba6e7 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sat, 6 May 2023 16:01:25 +0800 Subject: [PATCH 45/99] KVM: SVM: Force flush caches before reboot CSV guest hygon inclusion category: feature CVE: NA --------------------------- For memory encrypted guest, its pages' encrypt status will changed at runtime. When user reboot the guest, the pages' encrypt status during last boot were ignored. So during the boot flow of reboot, there may be 2 versions of memory data lies in cache as follows: +--------+ | | | | +--------------+ --+ | | | | \ |________| | | \ cacheline for -> |________| <-+ | | \ pa1(c=0) | | \ |______________| \ | | \_ 64 bytes aligned <- pa1 \ | | _ |______________| 4K | | / | | page cacheline for |________| / | | / pa1(c=1) -> |________| <-+ | | / | | | | / | | | | / | | | | / | | +--------------+ --+ | | | | If the older version cache was flushed after that of newer version, and guest read the memory again, then it will get corrupted data and may lead to crash. In this change, for any memory encrypted guest, the cache is forcibly flushed to memory before the next boot flow, which ensures that memory access is up-to-date. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit aeab58712cb5102684c22ad4262c3d7cccf468d2) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 27a243e814f9..65e8075bc334 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1028,12 +1028,15 @@ static int csv_control_post_system_reset(struct kvm *kvm) unsigned long i; int ret; - if (!sev_es_guest(kvm)) + if (!sev_guest(kvm)) return 0; - /* Flush both host and guest caches of VMSA */ + /* Flush both host and guest caches before next boot flow */ wbinvd_on_all_cpus(); + if (!sev_es_guest(kvm)) + return 0; + kvm_for_each_vcpu(i, vcpu, kvm) { struct vcpu_svm *svm = to_svm(vcpu); From 4aaeb19755bd1139c38cb9fda120c04e352ff27f Mon Sep 17 00:00:00 2001 From: hanliyang Date: Tue, 6 Aug 2024 19:47:04 +0800 Subject: [PATCH 46/99] deepin_x86_desktop_defconfig: Set CONFIG_CSV_GUEST=m by default hygon inclusion category: feature CVE: NA --------------------------- Configure CONFIG_CSV_GUEST=m so that the CSV guest can acquire attestation report when this kernel is used as guest kernel. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/354 (cherry picked from commit e076d81db62573db13a0d0af34bd5aed4ae35fe7) Signed-off-by: Wentao Guan --- arch/x86/configs/deepin_x86_desktop_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index 2c6145a09b69..b1ac98452dc7 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -4198,6 +4198,7 @@ CONFIG_VFIO_PCI_VGA=y CONFIG_VIRT_DRIVERS=y CONFIG_VBOXGUEST=m CONFIG_NITRO_ENCLAVES=m +CONFIG_CSV_GUEST=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_VDPA=m CONFIG_VIRTIO_PMEM=m From 4fac841c4206e283cd4b21fbd0ade380312d8fff Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Mon, 22 Dec 2025 20:44:08 +0800 Subject: [PATCH 47/99] KVM: SVM: convert to fd_file() Log: commit 1da91ea87aefe2c25b68c9f96947a9271ba6325d Author: Al Viro Date: Fri May 31 14:12:01 2024 -0400 introduce fd_file(), convert all accessors to it. For any changes of struct fd representation we need to turn existing accesses to fields into calls of wrappers. Accesses to struct fd::flags are very few (3 in linux/file.h, 1 in net/socket.c, 3 in fs/overlayfs/file.c and 3 more in explicit initializers). Those can be dealt with in the commit converting to new layout; accesses to struct fd::file are too many for that. This commit converts (almost) all of f.file to fd_file(f). It's not entirely mechanical ('file' is used as a member name more than just in struct fd) and it does not even attempt to distinguish the uses in pointer context from those in boolean context; the latter will be eventually turned into a separate helper (fd_empty()). NOTE: mass conversion to fd_empty(), tempting as it might be, is a bad idea; better do that piecewise in commit that convert from fdget...() to CLASS(...). [conflicts in fs/fhandle.c, kernel/bpf/syscall.c, mm/memcontrol.c caught by git; fs/stat.c one got caught by git grep] Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index 65e8075bc334..b37834d20e39 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -35,10 +35,10 @@ static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) int ret; f = fdget(fd); - if (!f.file) + if (!fd_file(f)) return -EBADF; - ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + ret = csv_issue_ringbuf_cmds_external_user(fd_file(f), psp_ret); fdput(f); return ret; From 89fce7906bca1e20860a26f2b1aea172b636f00b Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Mon, 22 Dec 2025 20:56:49 +0800 Subject: [PATCH 48/99] KVM: SEV: hygon: Use long-term pin when registering encrypted memory regions Adapt hygon patchi for kernel v6.18. Log: commit 7e066cb9b71a22c3e5ef233de63ff14525baf6f0 Author: Ge Yang Date: Tue Feb 11 10:37:03 2025 +0800 KVM: SEV: Use long-term pin when registering encrypted memory regions When registering an encrypted memory region for SEV-MEM/SEV-ES guests, pin the pages with FOLL_TERM so that the pages are migrated out of MIGRATE_CMA/ZONE_MOVABLE. Failure to do so violates the CMA/MOVABLE mechanisms and can result in fragmentation due to unmovable pages, e.g. can make CMA allocations fail. Signed-off-by: Ge Yang Reviewed-by: Tom Lendacky Acked-by: David Hildenbrand Link: https://lore.kernel.org/r/1739241423-14326-1-git-send-email-yangge1116@126.com [sean: massage changelog, make @flags an unsigned int] Signed-off-by: Sean Christopherson Link: https://github.com/deepin-community/kernel/pull/354 Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 6 +++--- arch/x86/kvm/svm/csv.h | 2 +- arch/x86/kvm/svm/sev.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index b37834d20e39..e5de6ef46995 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -51,7 +51,7 @@ static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); } -int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +static int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; struct sev_data_attestation_report *data = NULL; @@ -72,7 +72,7 @@ int csv_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) } guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); - pages = hygon_kvm_hooks.sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + pages = hygon_kvm_hooks.sev_pin_memory(kvm, guest_uaddr, len, &n, FOLL_WRITE); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -397,7 +397,7 @@ csv_receive_update_data_to_ringbuf(struct kvm *kvm, /* Pin guest memory */ guest_page = hygon_kvm_hooks.sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, - PAGE_SIZE, &n, 1); + PAGE_SIZE, &n, FOLL_WRITE); if (IS_ERR(guest_page)) { ret = PTR_ERR(guest_page); goto e_free; diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 3422ece01008..8ae2bc015f41 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -48,7 +48,7 @@ extern struct hygon_kvm_hooks_table { unsigned long npages); struct page **(*sev_pin_memory)(struct kvm *kvm, unsigned long uaddr, unsigned long ulen, unsigned long *n, - int write); + unsigned int flag); void (*sev_unpin_memory)(struct kvm *kvm, struct page **pages, unsigned long npages); void (*sev_clflush_pages)(struct page *pages[], unsigned long npages); diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index d90ba9ffae20..d4b65f847fee 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -2997,7 +2997,7 @@ static bool is_sev_snp_initialized(void) #ifdef CONFIG_HYGON_CSV /* Code to set all of the function and vaiable pointers */ -void sev_install_hooks(void) +static void sev_install_hooks(void) { hygon_kvm_hooks.sev_enabled = &sev_enabled; hygon_kvm_hooks.sev_me_mask = &sev_me_mask; From 3986326d871edf25a866e80739da12ffe9f27b40 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Tue, 13 Aug 2024 11:27:36 +0800 Subject: [PATCH 49/99] x86: config: Increase maximum number of CPUs to 512 The default number is 64. At a cloud environment, you may be assigned 128 or more CPUs so that 64 is not enough. Reported-by: Zhao Ou Signed-off-by: WangYuli Link: https://github.com/deepin-community/kernel/pull/360 (cherry picked from commit ee971e684727f2df40e9023caba6dc2f8458d051) Signed-off-by: Wentao Guan --- arch/x86/configs/deepin_x86_desktop_defconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index b1ac98452dc7..7ef0a68d92b6 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -65,6 +65,7 @@ CONFIG_JAILHOUSE_GUEST=y CONFIG_ACRN_GUEST=y CONFIG_PROCESSOR_SELECT=y CONFIG_GART_IOMMU=y +CONFIG_NR_CPUS=512 CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y CONFIG_X86_MCELOG_LEGACY=y CONFIG_PERF_EVENTS_INTEL_RAPL=m From 86d3f97c7769e302ac0565642080b8a4015ab9d5 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Sun, 11 Aug 2024 10:36:05 +0800 Subject: [PATCH 50/99] deepin: Add OWNERS for deepin kernel Introduce OWNERS files to streamline the code review process and foster greater involvement from members of deepin/UOS kernel development team in deepin kernel development efforts. Link: https://www.kubernetes.dev/docs/guide/owners/ Link: https://github.com/deepin-community/template-repository Signed-off-by: WangYuli Link: https://github.com/deepin-community/kernel/pull/358 (cherry picked from commit 3e8422bb9d7d5713fa5ba2644f15922bb8ba2269) Signed-off-by: Wentao Guan Conflicts: MAINTAINERS --- MAINTAINERS | 5 +++++ arch/loongarch/OWNERS | 5 +++++ arch/mips/OWNERS | 5 +++++ deepin/OWNERS | 26 ++++++++++++++++++++++++++ drivers/OWNERS | 5 +++++ drivers/bluetooth/OWNERS | 4 ++++ drivers/gpu/OWNERS | 5 +++++ drivers/net/OWNERS | 4 ++++ mm/OWNERS | 4 ++++ net/OWNERS | 4 ++++ security/OWNERS | 4 ++++ 11 files changed, 71 insertions(+) create mode 100644 arch/loongarch/OWNERS create mode 100644 arch/mips/OWNERS create mode 100644 deepin/OWNERS create mode 100644 drivers/OWNERS create mode 100644 drivers/bluetooth/OWNERS create mode 100644 drivers/gpu/OWNERS create mode 100644 drivers/net/OWNERS create mode 100644 mm/OWNERS create mode 100644 net/OWNERS create mode 100644 security/OWNERS diff --git a/MAINTAINERS b/MAINTAINERS index 1c4e69e014e3..d31e3bd159ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6878,6 +6878,11 @@ F: arch/mips/dec/ F: arch/mips/include/asm/dec/ F: arch/mips/include/asm/mach-dec/ +DEEPIN OWNERS +M: "WangYuli" +S: Maintained +F: OWNERS + DEFXX FDDI NETWORK DRIVER M: "Maciej W. Rozycki" S: Maintained diff --git a/arch/loongarch/OWNERS b/arch/loongarch/OWNERS new file mode 100644 index 000000000000..a808047489ea --- /dev/null +++ b/arch/loongarch/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- allinaent +- JohnsPony \ No newline at end of file diff --git a/arch/mips/OWNERS b/arch/mips/OWNERS new file mode 100644 index 000000000000..a808047489ea --- /dev/null +++ b/arch/mips/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- allinaent +- JohnsPony \ No newline at end of file diff --git a/deepin/OWNERS b/deepin/OWNERS new file mode 100644 index 000000000000..49189a40db39 --- /dev/null +++ b/deepin/OWNERS @@ -0,0 +1,26 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- Avenger-285714 +- matrix-wsk +- MingcongBai +- opsiff +emeritus_approvers: +- deepin-community/deepin-sysdev-team +- hudeng-go +- UTsweetyfish +- xzl01 +- YukariChiba +- zccrs +- Zeno-sole +reviewers: +- BLumia +- chenchongbiao +- Clansty +- huangbibo +- justforlxz +- morduang +- myml +- Rabenda +- shy129 +- Wenlp \ No newline at end of file diff --git a/drivers/OWNERS b/drivers/OWNERS new file mode 100644 index 000000000000..a808047489ea --- /dev/null +++ b/drivers/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- allinaent +- JohnsPony \ No newline at end of file diff --git a/drivers/bluetooth/OWNERS b/drivers/bluetooth/OWNERS new file mode 100644 index 000000000000..ddece097a4bf --- /dev/null +++ b/drivers/bluetooth/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- hello666888999 \ No newline at end of file diff --git a/drivers/gpu/OWNERS b/drivers/gpu/OWNERS new file mode 100644 index 000000000000..b2865d133224 --- /dev/null +++ b/drivers/gpu/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- hongaoo +- Ink-Paper \ No newline at end of file diff --git a/drivers/net/OWNERS b/drivers/net/OWNERS new file mode 100644 index 000000000000..ddece097a4bf --- /dev/null +++ b/drivers/net/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- hello666888999 \ No newline at end of file diff --git a/mm/OWNERS b/mm/OWNERS new file mode 100644 index 000000000000..bb88b15201bd --- /dev/null +++ b/mm/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- xu-lang \ No newline at end of file diff --git a/net/OWNERS b/net/OWNERS new file mode 100644 index 000000000000..ddece097a4bf --- /dev/null +++ b/net/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- hello666888999 \ No newline at end of file diff --git a/security/OWNERS b/security/OWNERS new file mode 100644 index 000000000000..bb88b15201bd --- /dev/null +++ b/security/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- xu-lang \ No newline at end of file From 24e676910dd9936fdd22edd6a86047cf70a0b601 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 7 Aug 2024 16:06:34 +0800 Subject: [PATCH 51/99] KVM: SVM: CSV: Explicitly enable LBR Virtualization after succeed to RECEIVE_UPDATE_VMSA hygon inclusion category: feature CVE: NA --------------------------- Before the commit b7e4be0a224f ("KVM: SEV-ES: Delegate LBR virtualization to the processor"), the LBR Virtualization is enabled during init VMCB: init_vmcb() -> sev_init_vmcb() -> sev_es_init_vmcb() While the commit b7e4be0a224f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") enable LBR Virtualization after succeed to LAUNCH_UPDATE_VMSA for each vCPUs. The process to enable LBR Virtualization will not be executed in common code path. To ensure the CSV2 guest to work properly after migrated to target machine, we should explicitly to enable LBR Virtualization after succeed to RECEIVE_UPDATE_VMSA for each vCPUs. Fixes: b7e4be0a224f ("KVM: SEV-ES: Delegate LBR virtualization to the processor") Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/362 (cherry picked from commit 8f5efcd0a7072c77f36227187a61f1a38cc90df4) Signed-off-by: Wentao Guan --- arch/x86/kvm/svm/csv.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index e5de6ef46995..a98bb1a6bd63 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -783,9 +783,19 @@ static int csv_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) ret = hygon_kvm_hooks.sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); - if (!ret) + if (!ret) { vcpu->arch.guest_state_protected = true; + /* + * CSV2 guest mandates LBR Virtualization to be _always_ ON. + * Enable it only after setting guest_state_protected because + * KVM_SET_MSRS allows dynamic toggling of LBRV (for performance + * reason) on write access to MSR_IA32_DEBUGCTLMSR when + * guest_state_protected is not set. + */ + svm_enable_lbrv(vcpu); + } + kfree(vmsa); e_free_trans: kfree(trans); From 5d74b7214d3f9903fda5cb1d9e2c2d84805b28e0 Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Tue, 13 Aug 2024 14:25:36 +0800 Subject: [PATCH 52/99] config: enable support for MT7925 Signed-off-by: Wentao Guan Link: https://github.com/deepin-community/kernel/pull/363 (cherry picked from commit ebf2e3a2dd0225cce94c4b7c59d482604d93d89a) Signed-off-by: Wentao Guan --- arch/arm64/configs/deepin_arm64_desktop_defconfig | 2 ++ arch/loongarch/configs/deepin_loongarch_desktop_defconfig | 2 ++ arch/x86/configs/deepin_x86_desktop_defconfig | 2 ++ 3 files changed, 6 insertions(+) diff --git a/arch/arm64/configs/deepin_arm64_desktop_defconfig b/arch/arm64/configs/deepin_arm64_desktop_defconfig index bf4586bdea4f..ead64c49ae53 100644 --- a/arch/arm64/configs/deepin_arm64_desktop_defconfig +++ b/arch/arm64/configs/deepin_arm64_desktop_defconfig @@ -1663,6 +1663,8 @@ CONFIG_MT7921E=m CONFIG_MT7921S=m CONFIG_MT7921U=m CONFIG_MT7996E=m +CONFIG_MT7925E=m +CONFIG_MT7925U=m CONFIG_RT2X00=m CONFIG_RT2400PCI=m CONFIG_RT2500PCI=m diff --git a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig index 43f7b114bbbf..577ae807ea35 100644 --- a/arch/loongarch/configs/deepin_loongarch_desktop_defconfig +++ b/arch/loongarch/configs/deepin_loongarch_desktop_defconfig @@ -1710,6 +1710,8 @@ CONFIG_MT7921E=m CONFIG_MT7921S=m CONFIG_MT7921U=m CONFIG_MT7996E=m +CONFIG_MT7925E=m +CONFIG_MT7925U=m CONFIG_WILC1000_SDIO=m CONFIG_WILC1000_SPI=m CONFIG_WILC1000_HW_OOB_INTR=y diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index 7ef0a68d92b6..12769841cd3b 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -1495,6 +1495,8 @@ CONFIG_MT7921E=m CONFIG_MT7921S=m CONFIG_MT7921U=m CONFIG_MT7996E=m +CONFIG_MT7925E=m +CONFIG_MT7925U=m CONFIG_WILC1000_SDIO=m CONFIG_WILC1000_SPI=m CONFIG_WILC1000_HW_OOB_INTR=y From 348050feb34eb2a569c918d7bea680fc133bf982 Mon Sep 17 00:00:00 2001 From: chench00 Date: Thu, 1 Aug 2024 20:58:24 +0800 Subject: [PATCH 53/99] crypto: tdm: Add Hygon TDM driver MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9B9XS CVE: NA --------------------------- TDM(Trusted Dynamic Measurement) is a module designed and implemented by HYGON in its X86 CPU's embedded secure processor, providing dynamical measurement service to X86 side aiming at memory that needs to be protected, e.g. the memory area kernel code resides. With this new feature, the goal of protecting any specified memory dynamically in the runtime can be achieved. When the protected memory is modified illegally, TDM will detect the event immediately and give an alarm in the form of an exception, meantime, the abnormal information is recorded inside the TDM for subsequent audit or remote attestation. The TDM driver mainly implements the following functions: (1) Send the required memory block information and configuration information to TDM device for protection; (2) Manage the further distribution of exceptions when TDM detects illegal memory modification and an exception is triggered. (3) Record abnormal information for subsequent audit or attestation. Signed-off-by: chench Link: https://github.com/deepin-community/kernel/pull/366 (cherry picked from commit 05663c6aa1e7d4e5cd98dd505b716a0e6de8e55b) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Kconfig | 8 + drivers/crypto/ccp/Makefile | 2 + drivers/crypto/ccp/hygon/tdm-dev.c | 1595 ++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/tdm-dev.h | 504 +++++++++ drivers/crypto/ccp/psp-dev.c | 16 + 5 files changed, 2125 insertions(+) create mode 100644 drivers/crypto/ccp/hygon/tdm-dev.c create mode 100644 drivers/crypto/ccp/hygon/tdm-dev.h diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index e58ce2c77eb3..41e7d5e9361c 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -60,3 +60,11 @@ config HYGON_PSP2CPU_CMD depends on CRYPTO_DEV_SP_PSP help Hygon PSP2CPU Command Support + +config TDM_DEV_HYGON + bool "Hygon TDM Interface" + default y + depends on CRYPTO_DEV_CCP_DD + depends on HYGON_PSP2CPU_CMD + help + Hygon TDM driver diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 1d05191e811f..4faf398075bd 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -20,6 +20,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hygon/csv-dev.o \ hygon/ring-buffer.o +ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o + obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ diff --git a/drivers/crypto/ccp/hygon/tdm-dev.c b/drivers/crypto/ccp/hygon/tdm-dev.c new file mode 100644 index 000000000000..42322c6bd4ae --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-dev.c @@ -0,0 +1,1595 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "tdm: " fmt + +#define TDM_CMD_ID_MAX 16 +#define TDM2PSP_CMD(id) (0x110 | (id)) +#define TDM_P2C_CMD_ID 1 +#define TDM_C2P_CMD_SIZE (3*PAGE_SIZE) +#define TDM_KFIFO_SIZE 1024 + +#define TDM_IOC_TYPE 'D' +#define TDM_CMD_LEN_LIMIT (1U << 12) + +struct context_message { + uint32_t flag; + uint32_t pid; + uint8_t comm[16]; + uint8_t module_name[64]; +}; + +struct tdm_task_head { + struct list_head head; + rwlock_t lock; +}; + +struct tdm_task_ctx { + uint32_t task_id; + uint32_t cmd_ctx_flag; + measure_exception_handler_t handler; + struct list_head list; +}; + +static struct tdm_task_head dyn_head; +static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; +static struct task_struct *kthread; +static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); +static spinlock_t kfifo_lock; +static int tdm_support; +static int tdm_init_flag; +static int tdm_destroy_flag; + +static int list_check_exist(uint32_t task_id) +{ + int found = 0; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + found = 1; + break; + } + } + read_unlock(lock); + + return found; +} + +static int list_enqueue(void *entry) +{ + int ret = 0; + struct list_head *head, *entry_list = NULL; + rwlock_t *lock = NULL; + + if (!entry) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + entry_list = &(((struct tdm_task_ctx *)entry)->list); + + write_lock(lock); + if (entry_list) + list_add_tail(entry_list, head); + write_unlock(lock); + +end: + return 0; +} + +static __maybe_unused int list_print(void) +{ + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + pr_info("id: %d ", task_node->task_id); + } + read_unlock(lock); + pr_info("\n"); + + return 0; +} + +static int measure_exception_handling_thread(void *data) +{ + int ret = 0; + int copied = 0; + uint32_t error_task_id = 0xffffffff; + struct measure_status task_measure_status; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + pr_info("Thread started for measurement exception handler dispatching...\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + while (!kfifo_is_empty(&kfifo_error_task)) { + copied = kfifo_out_spinlocked(&kfifo_error_task, + (unsigned char *)&error_task_id, sizeof(uint32_t), &kfifo_lock); + if (copied != sizeof(uint32_t)) { + ret = -DYN_ERR_API; + pr_err("kfifio_out exception,return\n"); + goto end; + } + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == error_task_id) + break; + } + read_unlock(lock); + + if (!task_node) { + ret = -DYN_NULL_POINTER; + pr_err("task_node is null,return\n"); + goto end; + } + + if (task_node->task_id == error_task_id) { + if (task_node->handler) { + pr_info("-----Measurement exception handler dispatching " + "thread------\n"); + pr_info("Measurement exception received for task %d\n", + error_task_id); + pr_info("Step1: Query PSP for task %d status to confirm " + "the error.\n", error_task_id); + pr_info("Step2: Error confirmed, CALL measurement " + "exception handler.\n"); + ret = psp_query_measure_status(error_task_id, + &task_measure_status); + if (ret) { + pr_err("task_id %d status query failed\n", + error_task_id); + goto end; + } + + if (task_measure_status.error == MER_ERR) { + /*error--1 normal--0 */ + pr_info("Error detected for task %d, " + "action TODO!\n", error_task_id); + pr_info("----Measurement exception handler----\n"); + task_node->handler(error_task_id); + pr_info("Exit measurement exception handler.\n"); + } else { + pr_info("No error detected for task %d, please " + "check it again!\n", error_task_id); + } + } else { + pr_err("task %d's callback function is not registered, " + "please check it\n", error_task_id); + } + } + } + } +end: + return ret; +} + +static int tdm_interrupt_handler(uint32_t id, uint64_t data) +{ + if (kthread) { + kfifo_in_spinlocked(&kfifo_error_task, (unsigned char *)&data, sizeof(uint32_t), + &kfifo_lock); + wake_up_process(kthread); + } + + return 0; +} + +static int tdm_do_cmd(unsigned int cmd_id, void *cmd_data, int *error) +{ + if (cmd_id >= TDM_CMD_ID_MAX) { + pr_err("%s cmd_id %u beyond limit\n", __func__, cmd_id); + return -DYN_BEYOND_MAX; + } + + return psp_do_cmd(TDM2PSP_CMD(cmd_id), cmd_data, error); +} + +static int calc_task_context_hash(struct context_message context_msg, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + shash = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(shash)) { + pr_err("can't alloc hash\n"); + return -DYN_ERR_API; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + ret = crypto_shash_init(sdesc); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_init failed\n"); + goto end; + } + + if (context_msg.flag & CONTEXT_CHECK_PID) { + ret = crypto_shash_update(sdesc, (uint8_t *)&context_msg.pid, + sizeof(context_msg.pid)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_COMM) { + ret = crypto_shash_update(sdesc, context_msg.comm, + strlen(context_msg.comm)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_MODNAME) { + ret = crypto_shash_update(sdesc, context_msg.module_name, + strlen(context_msg.module_name)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_final failed\n"); + goto free_shash; + } + } + +free_shash: + crypto_free_shash(shash); +end: + return ret; +} + +static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) +{ + int ret = 0; + struct context_message ctx_msg = {0}; + unsigned long return_address = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + struct module *p_module = NULL; +#elif IS_ENABLED(CONFIG_KALLSYMS) + char symbol_buf[128] = {0}; + int symbol_len = 0; + char *symbol_begin = NULL; + char *symbol_end = NULL; +#endif + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + ctx_msg.flag = flag; + ctx_msg.pid = current->pid; + memcpy(ctx_msg.comm, current->comm, sizeof(current->comm)); + + return_address = CALLER_ADDR1; + if (return_address) { +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + p_module = __module_address(return_address); + // caller is module + if (p_module) + memcpy(ctx_msg.module_name, p_module->name, sizeof(p_module->name)); + // caller is build-in + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#elif IS_ENABLED(CONFIG_KALLSYMS) + symbol_len = sprint_symbol((char *)symbol_buf, return_address); + if (!symbol_len) { + ret = -DYN_ERR_API; + pr_err("sprint_symbol failed\n"); + goto end; + } + symbol_begin = strchr((char *)symbol_buf, '['); + if (!symbol_begin) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_end = strchr((char *)symbol_buf, ']'); + if (!symbol_end) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_begin++; + if (symbol_end - symbol_begin) + memcpy(ctx_msg.module_name, symbol_begin, symbol_end - symbol_begin); + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#endif + } else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); + + ret = calc_task_context_hash(ctx_msg, hash); + if (ret) { + pr_err("calc_task_context_hash failed\n"); + goto end; + } + +end: + return ret; +} + +static int tdm_verify_phy_addr_valid(struct addr_range_info *range) +{ + int ret = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + int i; + uint64_t phy_addr_start, phy_addr_end; + + for (i = 0; i < range->count; i++) { + phy_addr_start = __sme_clr(range->addr[i].addr_start); + phy_addr_end = __sme_clr(range->addr[i].addr_start + range->addr[i].length); + + if ((PHYS_PFN(phy_addr_start) >= max_pfn) || (PHYS_PFN(phy_addr_end) >= max_pfn)) { + pr_err("phy_addr or length beyond max_pfn\n"); + ret = -DYN_ERR_MEM; + break; + } + } +#else + pr_warn("TDM: Can't get max_pfn, skip physical address check\n"); +#endif + + return ret; +} + +/* Convert the virtual address to physics address,then judge whether it is + * continuous physics memory + */ +static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uint64_t *left_convert) +{ + int ret = 0; + unsigned int level = 0; + pte_t *pte; + uint64_t local_page_mask = 0; + uint64_t local_page_size = 0; + uint64_t now_base = vaddr; + uint64_t last_phy_addr = 0; + uint64_t last_phy_len = 0; + uint64_t now_phy_addr = 0; + + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + local_page_size = page_level_size(level); + local_page_mask = page_level_mask(level); + + switch (level) { + case PG_LEVEL_4K: + p_addr_info->addr_start = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + p_addr_info->addr_start = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + p_addr_info->addr_start = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + if ((p_addr_info->addr_start & ~local_page_mask) == 0) { + /*|--------------page_size-------------------|*/ + /*|-------*left_convert-------|*/ + if (*left_convert < local_page_size) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|-----*/ + /*|---------------------*left_convert-----------------------|*/ + else { + p_addr_info->length = local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + } + } else { + /*|--------------page_size-------------------|------*/ + /* |-------*left_convert---------|*/ + if ((p_addr_info->addr_start + *left_convert) < + ((p_addr_info->addr_start & local_page_mask) + local_page_size)) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|........*/ + /* |-----------------*left_convert-----------------|*/ + else { + p_addr_info->length = (p_addr_info->addr_start & local_page_mask) + + local_page_size - p_addr_info->addr_start; + now_base += p_addr_info->length; + *left_convert -= p_addr_info->length; + } + } + + last_phy_len = p_addr_info->length; + last_phy_addr = p_addr_info->addr_start; + + while (*left_convert) { + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + switch (level) { + case PG_LEVEL_4K: + now_phy_addr = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + now_phy_addr = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + now_phy_addr = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + /*not continuous memory*/ + if ((last_phy_addr + last_phy_len) != now_phy_addr) + break; + + if (*left_convert < local_page_size) { + p_addr_info->length += *left_convert; + *left_convert = 0; + } else { + p_addr_info->length += local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + last_phy_addr = now_phy_addr; + last_phy_len = local_page_size; + } + } + +end: + return ret; +} + +int psp_check_tdm_support(void) +{ + int ret = 0; + struct tdm_version version; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (tdm_support) + goto end; + + ret = psp_get_fw_info(&version); + if (ret) { + tdm_support = 0; + goto end; + } + + tdm_support = 1; + } + +end: + return tdm_support; +} +EXPORT_SYMBOL_GPL(psp_check_tdm_support); + +int psp_get_fw_info(struct tdm_version *version) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_fw_cmd *fw_cmd = NULL; + struct tdm_fw_resp *fw_resp = NULL; + + if (!version) { + ret = -DYN_NULL_POINTER; + pr_err("version is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + fw_cmd = (struct tdm_fw_cmd *)tdm_cmdresp_data; + fw_cmd->cmd_type = TDM_FW_VERSION; + + ret = tdm_do_cmd(0, (void *)fw_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + + if (error) { + ret = -error; + pr_warn("get_fw_info exception: 0x%x\n", error); + goto free_cmdresp; + } + + fw_resp = (struct tdm_fw_resp *)tdm_cmdresp_data; + memcpy(version, &fw_resp->version, sizeof(struct tdm_version)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_get_fw_info); + +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_create_cmd *create_cmd = NULL; + struct tdm_create_resp *create_resp = NULL; + uint32_t addr_range_info_len = 0; + struct addr_range_info *paddr_range_info = NULL; + uint32_t info_index = 0; + uint64_t now_base_vaddr = 0; + uint64_t tf_left_size = 0; + uint32_t count = 0; + + if (!range) { + ret = -DYN_NULL_POINTER; + pr_err("range is null pointer\n"); + goto end; + } + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (range->count > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("range->count %d is beyond RANGE_CNT_MAX %d\n", range->count, RANGE_CNT_MAX); + goto end; + } + if (range->count == 0) { + ret = -DYN_ERR_SIZE_SMALL; + pr_err("range->count is zero!\n"); + goto end; + } + + /*create task by vaddr*/ + if (flag & TASK_CREATE_VADDR) { + paddr_range_info = kzalloc(sizeof(struct addr_range_info) + + RANGE_CNT_MAX * sizeof(struct addr_info), GFP_KERNEL); + if (!paddr_range_info) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for paddr_range_info failed\n"); + goto end; + } + + now_base_vaddr = range->addr[0].addr_start; + tf_left_size = range->addr[0].length; + while (tf_left_size && (count++ < RANGE_CNT_MAX + 1)) { + ret = ptable_virt_to_phy(now_base_vaddr, + &paddr_range_info->addr[info_index], &tf_left_size); + if (ret) { + pr_err("address convert failed!\n"); + goto free_paddr_range_info; + } + + now_base_vaddr = now_base_vaddr + + paddr_range_info->addr[info_index++].length; + if (info_index > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("info_index: %d is beyond %d\n", info_index, RANGE_CNT_MAX); + goto free_paddr_range_info; + } + } + + paddr_range_info->count = info_index; + addr_range_info_len = paddr_range_info->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } else { + /*check if physics address valid*/ + ret = tdm_verify_phy_addr_valid(range); + if (ret) { + pr_err("range address is abnormal!\n"); + goto end; + } + addr_range_info_len = range->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto free_paddr_range_info; + } + + create_cmd = (struct tdm_create_cmd *)tdm_cmdresp_data; + create_cmd->cmd_type = TDM_TASK_CREATE; + create_cmd->cmd_ctx_flag = flag; + + memcpy(&create_cmd->m_data, data, sizeof(struct measure_data)); + create_cmd->authcode_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + + ret = tdm_get_cmd_context_hash(flag, create_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + if (flag & TASK_CREATE_VADDR) + memcpy(&create_cmd->range_info, paddr_range_info, addr_range_info_len); + else + memcpy(&create_cmd->range_info, range, addr_range_info_len); + + ret = tdm_do_cmd(0, (void *)create_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("create_measure_task exception error: 0x%x\n", error); + goto free_cmdresp; + } + + create_resp = (struct tdm_create_resp *)tdm_cmdresp_data; + code->len = create_resp->authcode_len; + code->len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + memcpy(&code->val[0], &create_resp->authcode_val[0], code->len); + + head = &dyn_head.head; + task_node = kzalloc(sizeof(struct tdm_task_ctx), GFP_KERNEL); + if (!task_node) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", sizeof(struct tdm_task_ctx)); + goto free_cmdresp; + } + + task_node->task_id = create_resp->task_id; + task_node->handler = NULL; + task_node->cmd_ctx_flag = flag; + + ret = list_enqueue(task_node); + if (ret) { + pr_err("task %d enqueue failed!!!\n", task_node->task_id); + goto free_task_node; + } + + kfree(tdm_cmdresp_data); + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); + + return task_node->task_id; + +free_task_node: + kfree(task_node); +free_cmdresp: + kfree(tdm_cmdresp_data); +free_paddr_range_info: + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_create_measure_task); + +int psp_query_measure_status(uint32_t task_id, struct measure_status *status) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_query_cmd *query_cmd = NULL; + struct tdm_query_resp *query_resp = NULL; + + if (!status) { + ret = -DYN_NULL_POINTER; + pr_err("status is null pointer\n"); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + query_cmd = (struct tdm_query_cmd *)tdm_cmdresp_data; + query_cmd->cmd_type = TDM_TASK_QUERY; + query_cmd->task_id = task_id; + + ret = tdm_do_cmd(0, query_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + query_resp = (struct tdm_query_resp *)tdm_cmdresp_data; + memcpy(status, &query_resp->m_status, sizeof(struct measure_status)); +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_query_measure_status); + +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_register_cmd *register_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + /* check if task_id is registered already */ + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + if ((handler && task_node->handler)) { + pr_err("task %d is registered already\n", task_id); + read_unlock(lock); + return -DYN_EEXIST; + } + break; + /* task_node will be used for next context */ + } + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + register_cmd = (struct tdm_register_cmd *)tdm_cmdresp_data; + temp_cmd = ®ister_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_VERIFY_AUTH; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, register_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + write_lock(lock); + task_node->handler = handler; + write_unlock(lock); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_measure_exception_handler); + +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_destroy_cmd *destroy_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to destroy!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + destroy_cmd = (struct tdm_destroy_cmd *)tdm_cmdresp_data; + temp_cmd = &destroy_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_DESTROY; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, destroy_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + if (task_node->handler) { + write_lock(lock); + task_node->handler = NULL; + write_unlock(lock); + } + + write_lock(lock); + list_del(&task_node->list); + write_unlock(lock); + + kfree(task_node); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_destroy_measure_task); + +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_update_cmd *update_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to update!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + update_cmd = (struct tdm_update_cmd *)tdm_cmdresp_data; + temp_cmd = &update_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_UPDATE; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + memcpy(&update_cmd->update_data, data, sizeof(struct measure_update_data)); + + ret = tdm_do_cmd(0, tdm_cmdresp_data, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_update_measure_task); + +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_startstop_cmd *startstop_cmd = NULL; + struct tdm_startstop_resp *startstop_resp = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + startstop_cmd = (struct tdm_startstop_cmd *)tdm_cmdresp_data; + temp_cmd = &startstop_cmd->cmd; + temp_cmd->cmd_type = start ? TDM_TASK_START : TDM_TASK_STOP; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + if ((temp_cmd->cmd_type == TDM_TASK_STOP) && (task_node->cmd_ctx_flag & + TASK_ATTR_NO_UPDATE)) { + pr_warn("Task %d is not allowed to stop!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto free_cmdresp; + } + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, startstop_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + startstop_resp = (struct tdm_startstop_resp *)tdm_cmdresp_data; + + kfree(tdm_cmdresp_data); + + return startstop_resp->m_status.status; + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_startstop_measure_task); + +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_export_cert_cmd *cert_cmd = NULL; + struct tdm_export_cert_resp *cert_resp = NULL; + + if (!cert) { + ret = -DYN_NULL_POINTER; + pr_err("cert is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + cert_cmd = (struct tdm_export_cert_cmd *)tdm_cmdresp_data; + cert_cmd->cmd_type = TDM_EXPORT_CERT; + cert_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)cert_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + cert_resp = (struct tdm_export_cert_resp *)tdm_cmdresp_data; + memcpy(cert, &cert_resp->cert, sizeof(struct tdm_cert)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_export_cert); + +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_report *report_resp = NULL; + uint32_t needed_length = 0; + + if (!user_supplied_data) { + ret = -DYN_NULL_POINTER; + pr_err("user_supplied_data is null pointer\n"); + goto end; + } + if (!report_buffer) { + ret = -DYN_NULL_POINTER; + pr_err("report_buffer is null pointer\n"); + goto end; + } + if (!length) { + ret = -DYN_NULL_POINTER; + pr_err("length is null pointer\n"); + goto end; + } + if ((report_type != TDM_REPORT_SUMMARY) && (report_type != TDM_REPORT_DETAIL)) { + ret = -DYN_ERR_REPORT_TYPE; + pr_err("invalid report_type\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + report_cmd = (struct tdm_get_report_cmd *)tdm_cmdresp_data; + + report_cmd->cmd_type = TDM_GET_REPORT; + report_cmd->task_id = task_id; + if (task_id == TDM_TASK_ALL) { + if (!selection) { + ret = -DYN_NULL_POINTER; + pr_err("selection is null pointer\n"); + goto end; + } + report_cmd->selection_len = selection->len; + report_cmd->selection_len = (report_cmd->selection_len > TDM_MAX_TASK_BITMAP) ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + memcpy(&report_cmd->selection_bitmap[0], &selection->bitmap[0], + report_cmd->selection_len); + } + + report_cmd->user_data_len = (user_supplied_data->len > TDM_MAX_NONCE_SIZE) ? + TDM_MAX_NONCE_SIZE : user_supplied_data->len; + memcpy(&report_cmd->user_data_val[0], &user_supplied_data->val[0], + report_cmd->user_data_len); + report_cmd->report_type = report_type; + report_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)report_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + report_resp = (struct tdm_report *)tdm_cmdresp_data; + if (report_type == TDM_REPORT_SUMMARY) + needed_length = sizeof(struct tdm_report) + sizeof(struct tdm_report_sig); + else + needed_length = sizeof(struct tdm_report) + + report_resp->task_nums * sizeof(struct tdm_detail_task_status) + + sizeof(struct tdm_report_sig); + + if (needed_length > *length) { + pr_warn("needed_length %d is beyond length %d\n", needed_length, *length); + *length = needed_length; + ret = -DYN_ERR_SIZE_SMALL; + } else { + memcpy(report_buffer, report_resp, needed_length); + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_report); + +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + + if (!digest) { + ret = -DYN_NULL_POINTER; + pr_err("digest is null pointer\n"); + goto end; + } + if (!pcr_values) { + ret = -DYN_NULL_POINTER; + pr_err("pcr_values is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)tdm_cmdresp_data; + + vpcr_cmd->cmd_type = TDM_VPCR_AUDIT; + memcpy(&vpcr_cmd->pcr, &pcr, sizeof(struct pcr_select)); + + ret = tdm_do_cmd(0, (void *)vpcr_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + vpcr_resp = (struct tdm_get_vpcr_resp *)tdm_cmdresp_data; + memcpy(digest, &vpcr_resp->digest, sizeof(struct tpm2b_digest)); + pcr_values->task_nums = vpcr_resp->pcr_values.task_nums; + memcpy(&pcr_values->task_data[0], &vpcr_resp->pcr_values.task_data[0], + pcr_values->task_nums * sizeof(struct tdm_task_data)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_vpcr_audit); + +static long tdm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + unsigned int tdm_cmd = 0; + unsigned char *temp_cmd_data = NULL; + struct task_selection_2b *selection = NULL; + struct data_2b *data = NULL; + uint32_t data_to_user_len = 0; + uint16_t selection_len = 0; + uint16_t user_data_len = 0; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_user_report_cmd *user_report_cmd = NULL; + uint32_t needed_length = 0; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + uint32_t pcr_num = 0; + + if (_IOC_TYPE(ioctl) != TDM_IOC_TYPE) { + ret = -EINVAL; + pr_err("ioctl 0x%08x is invalid\n", ioctl); + goto end; + } + + temp_cmd_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!temp_cmd_data) { + ret = -ENOMEM; + pr_err("kzalloc for size 0x%lx failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + tdm_cmd = _IOC_NR(ioctl); + + switch (tdm_cmd) { + case USER_EXPORT_CERT: + ret = tdm_export_cert(TDM_AK_USAGE_ID, (struct tdm_cert *)temp_cmd_data); + if (ret) { + pr_err("Execute tdm export cert command failed!\n"); + goto free_mem; + } + data_to_user_len = sizeof(struct tdm_cert); + break; + + case USER_GET_REPORT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_user_report_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + user_report_cmd = (struct tdm_user_report_cmd *)temp_cmd_data; + needed_length = user_report_cmd->needed_length; + report_cmd = &user_report_cmd->report_cmd; + selection_len = report_cmd->selection_len > TDM_MAX_TASK_BITMAP ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + + selection = kzalloc(sizeof(struct task_selection_2b) + + selection_len * sizeof(uint8_t), GFP_KERNEL); + if (!selection) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + selection->len = selection_len; + memcpy(&selection->bitmap[0], &report_cmd->selection_bitmap[0], selection->len); + + user_data_len = report_cmd->user_data_len > TDM_MAX_NONCE_SIZE ? + TDM_MAX_NONCE_SIZE : report_cmd->user_data_len; + data = kzalloc(sizeof(struct data_2b) + + user_data_len * sizeof(uint8_t), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + data->len = user_data_len; + memcpy(&data->val[0], &report_cmd->user_data_val[0], data->len); + + ret = tdm_get_report(report_cmd->task_id, selection, data, report_cmd->report_type, + report_cmd->key_usage_id, temp_cmd_data, &needed_length); + if (ret) { + pr_err("Execute tdm report command failed!\n"); + goto free_mem; + } + + data_to_user_len = needed_length; + break; + + case USER_VPCR_AUDIT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_get_vpcr_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)temp_cmd_data; + vpcr_resp = (struct tdm_get_vpcr_resp *)temp_cmd_data; + pcr_num = vpcr_cmd->pcr.pcr; + + ret = tdm_get_vpcr_audit(vpcr_cmd->pcr, &vpcr_resp->digest, &vpcr_resp->pcr_values); + if (ret) { + pr_err("Execute tdm vpcr audit command failed!\n"); + goto free_mem; + } + + vpcr_resp->pcr = pcr_num; + data_to_user_len = sizeof(struct tdm_get_vpcr_resp) + + vpcr_resp->pcr_values.task_nums * sizeof(struct tdm_task_data); + break; + + case USER_SHOW_DEVICE: + ret = psp_get_fw_info(&((struct tdm_show_device *)temp_cmd_data)->version); + if (ret) { + pr_err("firmware version get failed!\n"); + goto free_mem; + } + + data_to_user_len = sizeof(struct tdm_show_device); + break; + + default: + pr_err("invalid tdm_cmd: %d from user\n", tdm_cmd); + ret = -EINVAL; + goto free_mem; + } + + if (copy_to_user(argp, temp_cmd_data, data_to_user_len)) { + pr_err("%s copy to user failed\n", __func__); + ret = -EFAULT; + goto free_mem; + } + +free_mem: + kfree(temp_cmd_data); + kfree(selection); + kfree(data); +end: + return ret; +} + +static const struct file_operations tdm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tdm_ioctl, +}; + +static struct miscdevice misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tdm", + .fops = &tdm_fops, +}; + +int tdm_dev_init(void) +{ + int ret = 0; + + if (tdm_init_flag) + return 0; + + INIT_KFIFO(kfifo_error_task); + INIT_LIST_HEAD(&dyn_head.head); + rwlock_init(&dyn_head.lock); + spin_lock_init(&kfifo_lock); + + ret = psp_register_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + if (ret) { + pr_err("notifier function registration failed\n"); + return ret; + } + + kthread = kthread_create(measure_exception_handling_thread, NULL, + "measure_exception_handling_thread"); + if (IS_ERR(kthread)) { + pr_err("kthread_create fail\n"); + ret = PTR_ERR(kthread); + goto unreg; + } + + wake_up_process(kthread); + + ret = misc_register(&misc); + if (ret) { + pr_err("misc_register for tdm failed\n"); + goto stop_kthread; + } + + tdm_init_flag = 1; + pr_info("TDM driver loaded successfully!\n"); + + return ret; + +stop_kthread: + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } +unreg: + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + return ret; +} + +int tdm_dev_destroy(void) +{ + if (tdm_destroy_flag) + goto end; + + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } + + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + misc_deregister(&misc); + tdm_destroy_flag = 1; +end: + return 0; +} + diff --git a/drivers/crypto/ccp/hygon/tdm-dev.h b/drivers/crypto/ccp/hygon/tdm-dev.h new file mode 100644 index 000000000000..afc4761a7e81 --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-dev.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Change log: + * Version: 0.7 (fw version 1.4) + * 1.Adjust the TDM driver to accommodate multiple versions of the kernel. + * Version: 0.6 (fw version 1.4) + * 1.remove psp_get_fw_info from hygon_tdm_init, add tdm show device support to ioctl for hag. + * Version: 0.5 (fw version 1.4) + * 1.add support for hanging machine when task exception with special attribute. + * Version: 0.4 (fw version 1.3) + * 1.add vpcr support. + * 2.add task create by vaddr. + * Version: 0.3 (fw version 1.2) + * 1.add remote authentication support. + */ +#ifndef __TDM_DEV_H__ +#define __TDM_DEV_H__ + +#include +#include + +#define MIN_VPCR 10 +#define MAX_VPCR 16 + +/*Macro definition for measurement*/ +#define TDM_MAX_TASK_BITMAP 16 +#define TDM_MAX_NONCE_SIZE 32 + +#define RANGE_CNT_MAX 0x80 +#define MEASURE_TASK_MAX 100 +#define AUTHCODE_MAX 16 +#define AUTH_TRY_DELAY 1 + +#define HASH_ALGO_SM3 0 +#define HASH_ALGO_SHA1 1 +#define HASH_ALGO_SHA256 2 +#define HASH_ALGO_SHA384 3 +#define HASH_ALGO_SHA512 4 + +#define SM3_256_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 +#define SHA512_DIGEST_SIZE 64 + +#define CONTEXT_CHECK_PID 0x1 +#define CONTEXT_CHECK_COMM 0x2 +#define CONTEXT_CHECK_MODNAME 0x4 +#define TASK_ATTR_NO_UPDATE 0x10000 +#define TASK_SUPPORT_VPCR 0x20000 +#define TASK_CREATE_VADDR 0x40000 +#define TASK_EXCEPTION_CRASH 0x80000 + +#define MEASURE_UPDATE_ALGO 0x1 +#define MEASURE_UPDATE_EXPECTED_MEASUREMENT 0x2 + +/*Macro definition for tdm certificate*/ +#define TDM_MAX_CHIP_ID_LEN 40 +#define TDM_CURVE_SM2_ID 0x3 +#define TDM_PUBKEY_LEN 32 +#define TDM_MAX_USER_ID_LEN 126 +#define TDM_SIG_LEN 32 +#define TDM_HEADER_AND_PUBKEY_LEN 284 + +/*Macro definition for tdm report*/ +#define TDM_TASK_ALL 0xffffffff +#define TDM_REPORT_SUMMARY 0 +#define TDM_REPORT_DETAIL 1 + +/* CPU to psp command declaration */ +enum C2P_CMD_TYPE { + TDM_TASK_CREATE = 0x0, + TDM_TASK_VERIFY_AUTH, + TDM_TASK_QUERY, + TDM_TASK_DESTROY, + TDM_TASK_UPDATE, + TDM_TASK_STOP, + TDM_TASK_START, + TDM_FW_VERSION, + TDM_EXPORT_CERT, + TDM_GET_REPORT, + TDM_VPCR_AUDIT, + TDM_MAX_CMD +}; + +/* User interaction command declaration */ +enum USER_CMD_TYPE { + USER_EXPORT_CERT = 0x80, + USER_GET_REPORT, + USER_VPCR_AUDIT, + USER_SHOW_DEVICE, + USER_MAX_CMD +}; + +/*Public usage id definition for tdm certificate*/ +enum _tdm_key_usage_id { + TDM_INVALID_USAGE_ID = 0x1000, + TDM_CEK_USAGE_ID = 0x1004, + TDM_AK_USAGE_ID = 0x2001, + TDM_MAX_USAGE_ID +}; + +/*Public status ans type declaration*/ +enum TDM_TASK_STATUS { + DYN_INIT = 0x0, + DYN_TO_RUN, + DYN_RUN, + DYN_TO_STOP, + DYN_STOP +}; + +enum TDM_MEASURE_STATUS { + MER_NORMAL = 0x0, + MER_ERR +}; + +enum DYN_ERROR_TYPE { + DYN_NORMAL = 0x0, + DYN_NOT_EXIST, + DYN_AUTH_FAIL, + DYN_STATUS_NOT_SUIT, + DYN_BEYOND_MAX, + DYN_DA_PERIOD, + DYN_NULL_POINTER, + DYN_ERR_API, + DYN_EEXIST, + DYN_ERR_MEM, + DYN_ERR_AUTH_LEN, + DYN_ERR_KEY_ID, + DYN_NO_ALLOW_UPDATE, + DYN_ERR_HASH_ALGO, + DYN_ERR_REPORT_TYPE, + DYN_ERR_SIZE_SMALL, + DYN_ERR_ADDR_MAPPING, + DYN_ERR_PCR_NUM, + DYN_ERR_ORIG_TPM_PCR, + DYN_MAX_ERR_TYPE +}; + +/*Data structure declaration for measurement*/ +struct addr_info { + uint64_t addr_start; + uint64_t length; +} __packed; + +struct addr_range_info { + uint32_t count; + struct addr_info addr[]; +} __packed; + +struct measure_data { + uint32_t hash_algo; + uint8_t expected_measurement[32]; + uint32_t period_ms; + uint32_t pcr; +} __packed; + +struct authcode_2b { + uint16_t len; + uint8_t val[]; +} __packed; + +struct measure_status { + uint8_t status; + uint8_t error; + uint64_t count; +} __packed; + +struct measure_update_data { + uint32_t update_flag; + uint32_t algo; + uint8_t expected_measurement[32]; +} __packed; + +struct da_status { + uint64_t err_time; + uint16_t interval_time; + uint16_t err_cnt; +} __packed; + +struct tdm_version { + uint8_t api_major; + uint8_t api_minor; + uint32_t buildId; + uint32_t task_max; + uint32_t range_max_per_task; +} __packed; + +struct task_selection_2b { + uint16_t len; + uint8_t bitmap[]; +}; + +struct data_2b { + uint16_t len; + uint8_t val[]; +}; + +/*Data structure declaration for vpcr*/ +struct pcr_select { + uint16_t hash; + uint32_t pcr; +} __packed; + +union tpmu_ha { + uint8_t sha1[SHA1_DIGEST_SIZE]; + uint8_t sha256[SHA256_DIGEST_SIZE]; + uint8_t sha384[SHA384_DIGEST_SIZE]; + uint8_t sha512[SHA512_DIGEST_SIZE]; + uint8_t sm3_256[SM3_256_DIGEST_SIZE]; +}; + +struct tpm2b_digest { + uint16_t size; + uint8_t buffer[sizeof(union tpmu_ha)]; +} __packed; + +struct tdm_task_data { + uint32_t task_id; + uint8_t hash[32]; +} __packed; + +struct tdm_pcr_value_2b { + uint32_t task_nums; + struct tdm_task_data task_data[]; +} __packed; + +/*Data structure declaration for tdm certificate*/ +struct _tdm_ecc_pubkey { + uint32_t curve_id; + uint8_t pubkey_qx[TDM_PUBKEY_LEN]; + uint8_t pubkey_qy[TDM_PUBKEY_LEN]; + uint16_t user_id_len; + uint8_t user_id[TDM_MAX_USER_ID_LEN]; +} __packed; + +struct _tdm_ecc_signature { + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/* + ************************ Hygon TDM Certificate - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |7:0 |- |Reserved. Set to zero | + *|06h |7:0 |CHIP_ID_LEN | | + *|08h |319:0 |CHIP_ID |Unique ID of every chip. | + *|30h |31:0 |KEY_USAGE_ID |Usage id of the key. | + *|34h |63:0 |- |Reserved. Set to zero. | + *|3Ch |31:0 |CURVE_ID |ECC curve id | + *|40h |255:0 |Qx |Public key Qx | + *|60h |255:0 |Qy |Public key Qy | + *|80h |7:0 |USER_ID_LEN |GM user id len | + *|82h |1007:0 |USER_ID |GM user id | + *|100h|223:0 |- |Reserved. Set to zero. | + *|11Ch|31:0 |SIG1_KEY_USAGE_ID|Key type for sig1. | + *|120h|255:0 |SIG1_R |Signature R of key1. | + *|140h|255:0 |SIG1_S |Signature S of key1. | + *|160h|223:0 |- |Reserved. Set to zero | + *|17Ch|31:0 |SIG2_KEY_USAGE_ID|Key type for sig2. | + *|180h|255:0 |SIG2_R |Signature R of key2. | + *|1A0h|255:0 |SIG2_S |Signature S of key2. | + ************************************************************************************* + */ +struct tdm_cert { + uint32_t version; + uint8_t reserved_0[2]; + uint16_t chip_id_len; + uint8_t chip_id[TDM_MAX_CHIP_ID_LEN]; + uint32_t key_usage_id; + uint8_t reserved_1[8]; + struct _tdm_ecc_pubkey ecc_pubkey; + uint8_t reserved_2[28]; + uint32_t sig1_key_usage_id; + struct _tdm_ecc_signature ecc_sig1; + uint8_t reserved_3[28]; + uint32_t sig2_key_usage_id; + struct _tdm_ecc_signature ecc_sig2; +} __packed; + +/*Data structure declaration for tdm measurement report*/ +/* + ******************** Hygon TDM Report for Single Task - ECC256*********************** + *|+(00h) |31:0 |TASK_ID |Measured task ID | + *|+(04h) |31:0 |PERIOD_MS |Meaured period time for the related task | + *|+(08h) |63:0 |MEAURED_COUNT |Meaured count for the related task | + *|+(10h) |31:0 |LAST_MEASURE_ELAPSED_MS|Meaured time for last mesurement. | + *|+(14h) |95:0 |- |Reserved. Set to zero | + *|+(20h) |255:0 |MEASURED_HASH |Mesured hash for the related task. | + ************************************************************************************* + */ +struct tdm_detail_task_status { + uint32_t task_id; + uint32_t period_ms; + uint64_t measured_count; + uint32_t last_measure_elapsed_ms; + uint8_t reserved[12]; + uint8_t measured_hash[32]; +} __packed; + +/* + ************************ Hygon TDM Report - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |31:0 |FW_VERSION |Firmware verfion,BUILD_ID | + *|08h |7:0 |REPORT_TYPE |Summary report:0, Detailed report:1 | + *|09h |39:0 |- |Reserved. Set to zero. | + *|0Eh |15:0 |TASK_NUMS |ALL task numbers. | + *|10h |127:0 |TASK_BITMAP |ALL task bitmap. | + *|20h |127:0 |TASK_ERROR_BITMAP |Bitmap for error tasks | + *|30h |127:0 |TASK_RUNNING_BITMAP|Bitmap for runnint tasks | + *|40h |239:0 |- |Reserved. Set to zero. | + *|5Eh |15:0 |USER_DATA_LEN |User supplied data length. | + *|60h |255:0 |USER_DATA |User supplied data. | + *|80h |255:0 |AGGREGATE_HASH |Aggregate hash for tasks | + ************************************************************************************* + */ +struct tdm_report { + uint32_t version; + uint32_t fw_version; + uint8_t report_type; + uint8_t reserved_0[5]; + uint16_t task_nums; + uint8_t task_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_error_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_running_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t reserved_1[30]; + uint16_t user_supplied_data_len; + uint8_t user_supplied_data[TDM_MAX_NONCE_SIZE]; + uint8_t aggregate_hash[32]; + struct tdm_detail_task_status detailed_task_status[]; +} __packed; + +/* + ************************ Hygon TDM Report Signature - ECC256************************* + *|A0h |223:0 |- |Reserved. Set to zero | + *|BCh |31:0 |SIG_KEY_USAGE_ID |Key type for sig. | + *|C0h |255:0 |SIG_R |Signature R of key. | + *|E0h |255:0 |SIG_S |Signature S of key. | + ************************************************************************************* + */ +struct tdm_report_sig { + uint8_t reserved[28]; + uint32_t sig_key_usage_id; + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/*Data structure declaration for tdm command/response interface*/ +/* + * The following commands use this structure: + * psp_register_measure_exception_handler + * psp_destroy_measure_task + * psp_update_measure_task + * psp_startstop_measure_task + */ +struct tdm_common_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t code_len; + uint8_t code_val[AUTHCODE_MAX]; + uint8_t context_hash[32]; +} __packed; + +/*TASK_CREATE*/ +struct tdm_create_cmd { + uint32_t cmd_type; + uint32_t cmd_ctx_flag; + struct measure_data m_data; + uint16_t authcode_len; + uint8_t context_hash[32]; + struct addr_range_info range_info; +} __packed; + +struct tdm_create_resp { + uint32_t task_id; + uint16_t authcode_len; + uint8_t authcode_val[AUTHCODE_MAX]; +} __packed; + +/*TASK_VERIFY_AUTH*/ +struct tdm_register_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_QUERY*/ +struct tdm_query_cmd { + uint32_t cmd_type; + uint32_t task_id; +} __packed; + +struct tdm_query_resp { + struct measure_status m_status; +} __packed; + +/*TASK_DESTROY*/ +struct tdm_destroy_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_UPDATE*/ +struct tdm_update_cmd { + struct tdm_common_cmd cmd; + struct measure_update_data update_data; +} __packed; + +/*TASK_STOP,TASK_START*/ +struct tdm_startstop_cmd { + struct tdm_common_cmd cmd; +} __packed; + +struct tdm_startstop_resp { + struct measure_status m_status; +} __packed; + +/*TDM_VERSION*/ +struct tdm_fw_cmd { + uint32_t cmd_type; +} __packed; + +struct tdm_fw_resp { + struct tdm_version version; +} __packed; + +/*TDM_EXPORT_CERT*/ +struct tdm_export_cert_cmd { + uint32_t cmd_type; + uint32_t key_usage_id; +} __packed; + +struct tdm_export_cert_resp { + struct tdm_cert cert; +} __packed; + +/*TDM_GET_REPORT*/ +struct tdm_get_report_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t selection_len; + uint8_t selection_bitmap[TDM_MAX_TASK_BITMAP]; + uint16_t user_data_len; + uint8_t user_data_val[TDM_MAX_NONCE_SIZE]; + uint8_t report_type; + uint32_t key_usage_id; +} __packed; + +/* Resopnse: + * struct tdm_report measure_report; + * struct tdm_report_sig measure_report_sig; + */ + +struct tdm_user_report_cmd { + struct tdm_get_report_cmd report_cmd; + uint32_t needed_length; +} __packed; + +/*TDM_VPCR_AUDIT*/ +struct tdm_get_vpcr_cmd { + uint32_t cmd_type; + struct pcr_select pcr; +} __packed; + +struct tdm_get_vpcr_resp { + uint32_t pcr; + struct tpm2b_digest digest; + struct tdm_pcr_value_2b pcr_values; +} __packed; + +struct tdm_show_device { + struct tdm_version version; +} __packed; + +/*Public api definition for tdm*/ +typedef int (*measure_exception_handler_t)(uint32_t task_id); + +int psp_check_tdm_support(void); +int psp_get_fw_info(struct tdm_version *version); +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code); +int psp_query_measure_status(uint32_t task_id, struct measure_status *status); +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler); +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code); +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data); +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start); +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert); +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length); +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values); + +int tdm_dev_init(void); +int tdm_dev_destroy(void); +#endif /* __TDM_DEV_H__*/ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 9dca716bcee7..8e03851fc03d 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -23,6 +23,9 @@ #include "hsti.h" #include "hygon/psp-dev.h" +#ifdef CONFIG_TDM_DEV_HYGON +#include "hygon/tdm-dev.h" +#endif struct psp_device *psp_master; @@ -248,6 +251,14 @@ static int psp_init(struct psp_device *psp) if (ret) return ret; +#ifdef CONFIG_TDM_DEV_HYGON + if (is_vendor_hygon()) { + ret = tdm_dev_init(); + if (ret) + return ret; + } +#endif + return 0; } @@ -333,6 +344,11 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; +#ifdef CONFIG_TDM_DEV_HYGON + if (is_vendor_hygon()) + tdm_dev_destroy(); +#endif + sev_dev_destroy(psp); tee_dev_destroy(psp); From 419d4d1444321880f58ff5d3022fddb72745bc8c Mon Sep 17 00:00:00 2001 From: chench00 Date: Thu, 1 Aug 2024 21:03:12 +0800 Subject: [PATCH 54/99] crypto: tdm: Support dynamic protection for SCT and IDT by HYGON TDM hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9B9XS CVE: NA --------------------------- tdm_kernel_guard is an application that uses HYGON TDM technology to protect important data in the kernel. Through this application, the dynamic protection of SCT and IDT is completed in the system. In the future, more protection objects can be expanded based on this application Signed-off-by: chench Link: https://github.com/deepin-community/kernel/pull/366 (cherry picked from commit ab3f31a00923205097e680332e956ee109ce379d) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Kconfig | 11 + drivers/crypto/ccp/Makefile | 1 + drivers/crypto/ccp/hygon/tdm-kernel-guard.c | 352 ++++++++++++++++++++ 3 files changed, 364 insertions(+) create mode 100644 drivers/crypto/ccp/hygon/tdm-kernel-guard.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 41e7d5e9361c..9d5e7432441f 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -68,3 +68,14 @@ config TDM_DEV_HYGON depends on HYGON_PSP2CPU_CMD help Hygon TDM driver + +config TDM_KERNEL_GUARD + tristate "Hygon TDM kernel guard" + default y + depends on TDM_DEV_HYGON + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_SM3 + help + The key part of kernel is protected by TDM technology, SCT and IDT + are protected by default, and others are added later according to the + requirements. diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 4faf398075bd..09099b0a419b 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -31,3 +31,4 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o diff --git a/drivers/crypto/ccp/hygon/tdm-kernel-guard.c b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c new file mode 100644 index 000000000000..c3afe888ea04 --- /dev/null +++ b/drivers/crypto/ccp/hygon/tdm-kernel-guard.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM KERNEL GUARD module driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +static int eh_obj = -1; +module_param(eh_obj, int, 0644); +MODULE_PARM_DESC(eh_obj, "security enhance object for TDM"); + +/* Objects are protected by TDM now + * SCT: 0 + * IDT: 1 + */ +enum ENHANCE_OBJS { + SCT = 0, + IDT, + MAX_OBJ +}; + +static char *obj_names[MAX_OBJ] = { + "SCT", + "IDT", +}; + +struct tdm_security_enhance { + uint64_t vaddr; + uint32_t size; + struct addr_range_info *mem_range; + struct authcode_2b *authcode; + struct measure_data mdata; + uint32_t context; + uint32_t task_id; + char *obj_name; +} __packed; + +static struct tdm_security_enhance eh_objs[MAX_OBJ]; + +static int tdm_regi_callback_handler(uint32_t task_id) +{ + int i = 0; + int ret = 0; + + for (i = 0; i < MAX_OBJ; i++) { + if (task_id == eh_objs[i].task_id) { + pr_warn("Obj: %s, Task:%d, corruption detected!\n", eh_objs[i].obj_name, + task_id); + pr_warn("Please check if it's intended, or your machine may be on danger!\n"); + break; + } + } + return ret; +} + +static int calc_expected_hash(uint8_t *base_addr, uint32_t size, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + return ret; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret) { + pr_err("crypto_shash_init failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_update(sdesc, base_addr, size); + if (ret) { + pr_err("crypto_shash_update failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + pr_err("crypto_shash_final failed\n"); + ret = -1; + goto out; + } + } + +out: + crypto_free_shash(shash); + return ret; +} + +static int tdm_task_create_and_run(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + data->task_id = psp_create_measure_task(data->mem_range, &data->mdata, data->context, + data->authcode); + if (data->task_id < 0) { + ret = data->task_id < 0; + pr_err("create measurement task failed with 0x%x!\n", data->task_id); + goto end; + } + + ret = psp_register_measure_exception_handler(data->task_id, data->authcode, + tdm_regi_callback_handler); + if (ret < 0) { + pr_err("task_id %d callback function register failed with 0x%x\n", data->task_id, + ret); + goto release_task; + } + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, true); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d start failed with 0x%x\n", data->task_id, ret); + goto release_task; + } + + return ret; + +release_task: + psp_destroy_measure_task(data->task_id, data->authcode); +end: + return ret; +} + +int tdm_service_run(struct tdm_security_enhance *data) +{ + int ret = 0; + struct addr_range_info *addr_range = NULL; + + // Allocate memory for addr_range + addr_range = kzalloc(sizeof(struct addr_range_info) + sizeof(struct addr_info), GFP_KERNEL); + if (!addr_range) { + ret = -DYN_ERR_MEM; + pr_err("addr_range kzalloc memory failed\n"); + goto end; + } + + // Fill in addr_range + addr_range->count = 1; + addr_range->addr[0].addr_start = data->vaddr; + addr_range->addr[0].length = data->size; + data->mem_range = addr_range; + + // Context configuration + data->context |= TASK_CREATE_VADDR; + + // Allocate memory for authcode + data->authcode = kzalloc(sizeof(struct authcode_2b) + AUTHCODE_MAX, GFP_KERNEL); + if (!data->authcode) { + ret = -DYN_ERR_MEM; + pr_err("authcode_2b kzalloc memory failed\n"); + goto free_addr_range_info; + } + + data->authcode->len = AUTHCODE_MAX; + + // Measurement data configuration + data->mdata.hash_algo = HASH_ALGO_SM3; + data->mdata.period_ms = 0; + ret = calc_expected_hash((uint8_t *)data->vaddr, data->size, + data->mdata.expected_measurement); + if (ret) { + pr_err("calculate expected hash failed!\n"); + goto free_authcode; + } + + // Create and start tdm task + ret = tdm_task_create_and_run(data); + if (ret) { + pr_err("tdm_task_create_and_run failed!\n"); + goto free_authcode; + } + + return ret; + +free_authcode: + kfree(data->authcode); + data->authcode = NULL; +free_addr_range_info: + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +int tdm_service_exit(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, false); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d stop failed with 0x%x\n", data->task_id, ret); + goto end; + } + + // Waiting for the task to end + msleep(40); + + psp_destroy_measure_task(data->task_id, data->authcode); + + kfree(data->authcode); + data->authcode = NULL; + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) +static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) +{ + return 0; +} + +unsigned long kprobe_symbol_address_byname(const char *name) +{ + int p_ret; + struct kprobe p_kprobe; + unsigned long addr = 0; + + memset(&p_kprobe, 0, sizeof(p_kprobe)); + + p_kprobe.pre_handler = p_tmp_kprobe_handler; + p_kprobe.symbol_name = name; + + p_ret = register_kprobe(&p_kprobe); + if (p_ret < 0) { + pr_err("register_kprobe error [%d] :(\n", p_ret); + return 0; + } + + addr = (unsigned long)p_kprobe.addr; + unregister_kprobe(&p_kprobe); + + return addr; +} +#endif + +static int __init kernel_security_enhance_init(void) +{ + int i = 0; + int ret = 0; + unsigned long *sct_addr; + struct desc_ptr idtr; +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) + unsigned long (*f_kallsyms_lookup_name)(const char *); + + f_kallsyms_lookup_name = (unsigned long (*)(const char *))kprobe_symbol_address_byname( + "kallsyms_lookup_name"); + if (!f_kallsyms_lookup_name) { + ret = -DYN_ERR_API; + pr_err("kprobe_symbol_address_byname failed!"); + goto end; + } + + sct_addr = (unsigned long *)f_kallsyms_lookup_name("sys_call_table"); +#else + + sct_addr = (unsigned long *)kallsyms_lookup_name("sys_call_table"); +#endif + if (!sct_addr) { + ret = -DYN_ERR_API; + pr_err("kallsyms_lookup_name for sys_call_table failed!"); + goto end; + } + + asm("sidt %0":"=m"(idtr)); + + if (!psp_check_tdm_support()) + return 0; + + for (i = 0; i < MAX_OBJ; i++) { + memset(&eh_objs[i], 0, sizeof(eh_objs[i])); + eh_objs[i].context = CONTEXT_CHECK_MODNAME; + eh_objs[i].obj_name = obj_names[i]; + } + + if ((eh_obj == -1) || (eh_obj & (1 << SCT))) { + eh_objs[SCT].vaddr = (uint64_t)sct_addr; + eh_objs[SCT].size = NR_syscalls * sizeof(char *); + } + if ((eh_obj == -1) || (eh_obj & (1 << IDT))) { + eh_objs[IDT].vaddr = idtr.address; + eh_objs[IDT].size = idtr.size; + } + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_run(&eh_objs[i]); + } + + pr_info("Hygon TDM guard load successfully!\n"); + +end: + return ret; +} + +static void __exit kernel_security_enhance_exit(void) +{ + int i = 0; + + if (!psp_check_tdm_support()) + return; + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_exit(&eh_objs[i]); + } + pr_info("Hygon TDM guard unload successfully!\n"); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Kernel security enhancement module by TDM"); + +/* + * kernel_security_enhance_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(kernel_security_enhance_init); +module_exit(kernel_security_enhance_exit); From ba78541c0e58226becd986195a9dc270f296089c Mon Sep 17 00:00:00 2001 From: chench00 Date: Thu, 1 Aug 2024 21:05:41 +0800 Subject: [PATCH 55/99] linux: tpm: add Hygon TPM2 driver hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9B9XS CVE: NA --------------------------- Hygon CPU implemented a firmware-based TPM2 device, which runs on its internal secure processor named PSP. The device is fully compatible with TCG TPM2.0 spec (part 1 ~ 4) in the commands level, but underlying uses an unique private interface in the form of some hardware mailbox between X86 cores and PSP, which is for sure different from the TIS or CRB interfaces defined in the PTP spec. As such, to support this device we need a specialized driver which handles the basic send and receive operations required by the kernel TPM core layer. ACPI device info passed from underlying BIOS indicates the device presence by setting the _HID field (see TCG ACPI Sepcification, Family 1.2 and 2.0, Chapter 8 "ACPI Device") to "HYGT0101", which distinguishes it from the rest of devices. If the BIOS does not support this setting, the driver will not be activated and thus has no impact to the system at all. Signed-off-by: chench Link: https://github.com/deepin-community/kernel/pull/366 (cherry picked from commit 830e7458daf526bf8fe06ba1f3ecef0d4ac7b5f3) Signed-off-by: Wentao Guan Conflicts: drivers/char/tpm/Kconfig drivers/char/tpm/Makefile --- drivers/char/tpm/Kconfig | 12 +++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tpm_hygon.c | 186 +++++++++++++++++++++++++++++++++++ 3 files changed, 199 insertions(+) create mode 100644 drivers/char/tpm/tpm_hygon.c diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 8a8f692b6088..15e0742d1aec 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -254,5 +254,17 @@ config TCG_SVSM level (usually VMPL0). To compile this driver as a module, choose M here; the module will be called tpm_svsm. +config TCG_HYGON + tristate "Hygon TPM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 5b5cdc0d32e4..67446c071997 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -47,3 +47,4 @@ obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o +obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c new file mode 100644 index 000000000000..8e509df90290 --- /dev/null +++ b/drivers/char/tpm/tpm_hygon.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TPM2.0 device driver. + * + * Copyright (C) 2020 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TPM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TPM_BUF_LEN 4096 +#define MAX_CMD_BUF_LEN (MAX_TPM_BUF_LEN + sizeof(u32) + sizeof(u32)) + +struct tpm_hygon_priv { + u8 priv_buf[MAX_CMD_BUF_LEN]; +}; + +/* + * tpm header struct name is different in different kernel versions. + * so redefine it for driver porting. + */ +struct tpm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tpm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = cpu_to_be32(sizeof(priv->priv_buf)); + u32 cmd_size = cpu_to_be32((u32)count); + u8 *p = priv->priv_buf; + + *(u32 *)p = buf_size; + p += sizeof(buf_size); + *(u32 *)p = cmd_size; + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TPM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: sev do cmd error, %d\n", __func__, error); + ret = -EIO; + } + + return ret; +} + +static const struct tpm_class_ops tpm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_c_recv, + .send = tpm_c_send, +}; + +static int hygon_tpm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tpm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tpm_c_ops); + if (IS_ERR(chip)) { + pr_err("tpmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tpm_chip_register fail\n"); + goto err; + } + + pr_info("Hygon TPM2 detected\n"); + + return 0; + +err: + return ret; +} + +static void hygon_tpm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TPM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tpm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tpm2_device_ids[] = { + {"HYGT0101", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tpm2_device_ids); + +static struct acpi_driver hygon_tpm2_acpi_driver = { + .name = "tpm_hygon", + .ids = hygon_tpm2_device_ids, + .ops = { + .add = hygon_tpm2_acpi_add, + .remove = hygon_tpm2_acpi_remove, + }, + .drv = { + .pm = &tpm_hygon_pm, + }, +}; + +static int __init hygon_tpm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tpm2_acpi_driver); +} + +static void __exit hygon_tpm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tpm2_acpi_driver); +} + +/* + * hygon_tpm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tpm2_init); +module_exit(hygon_tpm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TPM2 device driver for Hygon PSP"); From 6af19c6f7db2575bd1a15f4af12f2198bf394fc6 Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Tue, 23 Dec 2025 10:44:56 +0800 Subject: [PATCH 56/99] tpm: hygon: Add bufsiz parameter to tpm_c_send() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is hygon version of this commit: commit a727bc0588e77efb502c52d7356e8ed036b6a83e Author: Nathan Chancellor Date: Wed Sep 17 10:09:00 2025 -0700 tpm: loongson: Add bufsiz parameter to tpm_loongson_send() Commit 5c83b07df9c5 ("tpm: Add a driver for Loongson TPM device") has a semantic conflict with commit 07d8004d6fb9 ("tpm: add bufsiz parameter in the .send callback"), as the former change was developed against a tree without the latter change. This results in a build error: drivers/char/tpm/tpm_loongson.c:48:17: error: initialization of 'int (*)(struct tpm_chip *, u8 *, size_t, size_t)' {aka 'int (*)(struct tpm_chip *, unsigned char *, long unsigned int, long unsigned int)'} from incompatible pointer type 'int (*)(struct tpm_chip *, u8 *, size_t)' {aka 'int (*)(struct tpm_chip *, unsigned char *, long unsigned int)'} [-Wincompatible-pointer-types] 48 | .send = tpm_loongson_send, | ^~~~~~~~~~~~~~~~~ drivers/char/tpm/tpm_loongson.c:48:17: note: (near initialization for 'tpm_loongson_ops.send') drivers/char/tpm/tpm_loongson.c:31:12: note: 'tpm_loongson_send' declared here 31 | static int tpm_loongson_send(struct tpm_chip *chip, u8 *buf, size_t count) | ^~~~~~~~~~~~~~~~~ Add the expected bufsiz parameter to tpm_loongson_send() to resolve the error. Fixes: 5c83b07df9c5 ("tpm: Add a driver for Loongson TPM device") Signed-off-by: Nathan Chancellor Reviewed-by: Jarkko Sakkinen Signed-off-by: Lee Jones Log: drivers/char/tpm/tpm_hygon.c:92:17: error: initialization of ‘int (*)(struct tpm_chip *, u8 *, size_t, size_t)’ {aka ‘int (*)(struct tpm_chip *, unsigned char *, long unsigned int, long unsigned int)’} from incompatible pointer type ‘int (*)(struct tpm_chip *, u8 *, size_t)’ {aka ‘int (*)(struct tpm_chip *, unsigned char *, long unsigned int)’} [-Werror=incompatible-pointer-types] 92 | .send = tpm_c_send, Link: https://github.com/deepin-community/kernel/pull/366 Signed-off-by: Wentao Guan --- drivers/char/tpm/tpm_hygon.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c index 8e509df90290..0effc1ab7654 100644 --- a/drivers/char/tpm/tpm_hygon.c +++ b/drivers/char/tpm/tpm_hygon.c @@ -63,7 +63,7 @@ static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) return ret; } -static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count) { int ret, error; struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); From 2efed4a32a7ea440112f0e8fe9fade6ffc38cad2 Mon Sep 17 00:00:00 2001 From: chench00 Date: Thu, 1 Aug 2024 21:07:51 +0800 Subject: [PATCH 57/99] linux: tcm: add Hygon TCM2 driver hygon inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I9B9XS CVE: NA --------------------------- Hygon CPU implemented a firmware-based TCM2 device, which runs on its internal secure processor named PSP. The device underlying uses an unique private interface in the form of some hardware mailbox between X86 cores and PSP, which is for sure different from the TIS or CRB interfaces defined in the PTP spec. As such, to support this device we need a specialized driver which handles the basic send and receive operations required by the kernel TPM core layer. ACPI device info passed from underlying BIOS indicates the device presence by setting the _HID field to "HYGT0201", which distinguishes it from the rest of devices. If the BIOS does not support this setting, the driver will not be activated and thus has no impact to the system at all. Signed-off-by: chench (cherry picked from commit 7b00cc8b67768b572e6cc8940927a18b24c7e999) Signed-off-by: Wentao Guan --- drivers/char/tpm/Kconfig | 12 ++ drivers/char/tpm/Makefile | 1 + drivers/char/tpm/tcm_hygon.c | 243 +++++++++++++++++++++++++++++++++++ 3 files changed, 256 insertions(+) create mode 100644 drivers/char/tpm/tcm_hygon.c diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 15e0742d1aec..fb17dcb5f6c8 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -266,5 +266,17 @@ config TCG_HYGON driver as a module, choose M here; the module will be called tpm_hygon. +config TCM_HYGON + tristate "Hygon TCM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TCM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tcm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 67446c071997..726d98d04ece 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -48,3 +48,4 @@ obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o obj-$(CONFIG_TCG_SVSM) += tpm_svsm.o obj-$(CONFIG_TCG_LOONGSON) += tpm_loongson.o obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o +obj-$(CONFIG_TCM_HYGON) += tcm_hygon.o diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c new file mode 100644 index 000000000000..63f5e61d9b3e --- /dev/null +++ b/drivers/char/tpm/tcm_hygon.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TCM2.0 device driver. + * + * Copyright (C) 2023 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TCM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TCM_BUF_LEN 4096 + +struct tcm_hygon_priv { + u8 priv_buf[MAX_TCM_BUF_LEN]; +}; + +struct tcm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tcm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = sizeof(priv->priv_buf); + u32 cmd_size = (u32)count; + u8 *p = priv->priv_buf; + + if (buf_size - sizeof(u32) - sizeof(u32) < count) { + ret = -E2BIG; + goto out; + } + + *(u32 *)p = cpu_to_be32(buf_size); + p += sizeof(buf_size); + *(u32 *)p = cpu_to_be32(cmd_size); + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TCM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, error); + ret = -EIO; + } + +out: + return ret; +} + +static const struct tpm_class_ops tcm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tcm_c_recv, + .send = tcm_c_send, +}; + +static void tcm_bios_log_teardown(struct tpm_chip *chip) +{ + int i; + struct inode *inode; + + /* securityfs_remove currently doesn't take care of handling sync + * between removal and opening of pseudo files. To handle this, a + * workaround is added by making i_private = NULL here during removal + * and to check it during open(), both within inode_lock()/unlock(). + * This design ensures that open() either safely gets kref or fails. + */ + for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { + if (chip->bios_dir[i]) { + inode = d_inode(chip->bios_dir[i]); + inode_lock(inode); + inode->i_private = NULL; + inode_unlock(inode); + securityfs_remove(chip->bios_dir[i]); + } + } +} + +static void tcm_chip_unregister(struct tpm_chip *chip) +{ + if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + hwrng_unregister(&chip->hwrng); + tcm_bios_log_teardown(chip); + cdev_del(&chip->cdevs); + put_device(&chip->devs); + cdev_device_del(&chip->cdev, &chip->dev); +} + +static int hygon_tcm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tcm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tcm_c_ops); + if (IS_ERR(chip)) { + pr_err("tcmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + ret = dev_set_name(&chip->dev, "tcm%d", chip->dev_num); + if (ret) { + pr_err("tcm device set name fail\n"); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tcm chip_register fail\n"); + goto err; + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + device_del(&chip->devs); + ret = dev_set_name(&chip->devs, "tcmrm%d", chip->dev_num); + if (ret) { + pr_err("tcmrm device set name fail\n"); + goto err_dev; + } + ret = device_add(&chip->devs); + if (ret) { + pr_err("devs add fail\n"); + goto err_dev; + } + } + + pr_info("Hygon TCM2 detected\n"); + + return 0; + +err_dev: + tcm_chip_unregister(chip); + +err: + return ret; +} + +static void hygon_tcm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TCM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tcm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tcm2_device_ids[] = { + {"HYGT0201", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tcm2_device_ids); + +static struct acpi_driver hygon_tcm2_acpi_driver = { + .name = "tcm_hygon", + .ids = hygon_tcm2_device_ids, + .ops = { + .add = hygon_tcm2_acpi_add, + .remove = hygon_tcm2_acpi_remove, + }, + .drv = { + .pm = &tcm_hygon_pm, + }, +}; + +static int __init hygon_tcm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tcm2_acpi_driver); +} + +static void __exit hygon_tcm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tcm2_acpi_driver); +} + +/* + * hygon_tcm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tcm2_init); +module_exit(hygon_tcm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TCM2 device driver for Hygon PSP"); From a8d739efe4098965461f0a4482052900ead9e3b0 Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Tue, 23 Dec 2025 11:17:33 +0800 Subject: [PATCH 58/99] tpm: hygon: don't bother with removal of files in directory we'll be removing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the following commit changed the tpm_chip struct, so modifiy hygon tcm: commit f42b8d78dee77107245ec5beee3eb01915bcae7f Author: Al Viro Date: Wed Jun 11 19:40:04 2025 -0400 tpm: don't bother with removal of files in directory we'll be removing FWIW, there is a reliable indication of removal - ->i_nlink going to 0 ;-) Signed-off-by: Al Viro Log: drivers/char/tpm/tcm_hygon.c: In function ‘tcm_bios_log_teardown’: drivers/char/tpm/tcm_hygon.c:109:21: error: used struct type value where scalar is required 109 | if (chip->bios_dir[i]) { | ^~~~ drivers/char/tpm/tcm_hygon.c:110:55: error: incompatible type for argument 1 of ‘d_inode’ 110 | inode = d_inode(chip->bios_dir[i]); | ~~~~~~~~~~~~~~^~~ | | | struct dentry In file included from ./include/linux/fs.h:9, from ./include/linux/compat.h:17, from ./arch/x86/include/asm/ia32.h:7, from ./arch/x86/include/asm/elf.h:10, from ./include/linux/elf.h:6, from ./include/linux/module.h:20, from drivers/char/tpm/tcm_hygon.c:13: ./include/linux/dcache.h:530:58: note: expected ‘const struct dentry *’ but argument is of type ‘struct dentry’ 530 | static inline struct inode *d_inode(const struct dentry *dentry) | ~~~~~~~~~~~~~~~~~~~~~^~~~~~ drivers/char/tpm/tcm_hygon.c:114:57: error: incompatible type for argument 1 of ‘securityfs_remove’ 114 | securityfs_remove(chip->bios_dir[i]); | ~~~~~~~~~~~~~~^~~ | | | struct dentry In file included from drivers/char/tpm/tcm_hygon.c:22: ./include/linux/security.h:2229:46: note: expected ‘struct dentry *’ but argument is of type ‘struct dentry’ 2229 | extern void securityfs_remove(struct dentry *dentry); | ~~~~~~~~~~~~~~~^~~~~~ make[5]: *** [scripts/Makefile.build:287:drivers/char/tpm/tcm_hygon.o] Link: https://github.com/deepin-community/kernel/pull/366 Signed-off-by: Wentao Guan --- drivers/char/tpm/tcm_hygon.c | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c index 63f5e61d9b3e..4d13c75de0f3 100644 --- a/drivers/char/tpm/tcm_hygon.c +++ b/drivers/char/tpm/tcm_hygon.c @@ -59,7 +59,7 @@ static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) return ret; } -static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t bufsiz, size_t count) { int ret, error; struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); @@ -96,24 +96,7 @@ static const struct tpm_class_ops tcm_c_ops = { static void tcm_bios_log_teardown(struct tpm_chip *chip) { - int i; - struct inode *inode; - - /* securityfs_remove currently doesn't take care of handling sync - * between removal and opening of pseudo files. To handle this, a - * workaround is added by making i_private = NULL here during removal - * and to check it during open(), both within inode_lock()/unlock(). - * This design ensures that open() either safely gets kref or fails. - */ - for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { - if (chip->bios_dir[i]) { - inode = d_inode(chip->bios_dir[i]); - inode_lock(inode); - inode->i_private = NULL; - inode_unlock(inode); - securityfs_remove(chip->bios_dir[i]); - } - } + securityfs_remove(chip->bios_dir); } static void tcm_chip_unregister(struct tpm_chip *chip) From 479f430f7a2a55a106a4cd76f2dfc4f971e9609c Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 16 Aug 2024 18:11:29 +0800 Subject: [PATCH 59/99] x86/mce: Add NMIs setup in machine_check func zhaoxin inclusion category: other ------------------- This will lead to console_owner_lock issue and HPET dead loop issue. For example, The HPET dead loop issue: CPU x CPU x ---- ---- read_hpet() arch_spin_trylock(&hpet.lock) [CPU x got the hpet.lock] #MCE happened do_machine_check() mce_panic() panic() kmsg_dump() pstore_dump() pstore_record_init() ktime_get_real_fast_ns() read_hpet() [dead loops] This may lead to read_hpet dead loops. The console_owner_lock issue is similar. To avoid these issues, add NMIs setup When Handling #MC Exceptions. Signed-off-by: leoliu-oc Link: https://github.com/deepin-community/kernel/pull/369 (cherry picked from commit 2421cdbdb21b49a840c5162911dd77858a3514c7) Signed-off-by: Wentao Guan --- arch/x86/kernel/cpu/mce/core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index c26582d1b084..2b9db4a10cef 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -2108,11 +2108,17 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs) static __always_inline void exc_machine_check_user(struct pt_regs *regs) { + irqentry_state_t irq_state; + + irq_state = irqentry_nmi_enter(regs); + irqentry_enter_from_user_mode(regs); do_machine_check(regs); irqentry_exit_to_user_mode(regs); + + irqentry_nmi_exit(regs, irq_state); } #ifdef CONFIG_X86_64 From ada5d2ced64cb30bd3df09086d09ae5676594f34 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 16 Aug 2024 17:20:47 +0800 Subject: [PATCH 60/99] x86/mce/zhaoxin: Update mcelog to decode PCIE, ZDI/ZPI and DRAM errors 1. Adjusted some code logic Avoid having no log information when a CPER_SEC_PROC_GENERIC type error occurs on non-Zhaoxin platforms. 2. Optimized some code Removed some redundant function parameters and adjusted the types of some function parameters. Signed-off-by: leoliu-oc Link: https://github.com/deepin-community/kernel/pull/367 (cherry picked from commit f561e385641ef90a21bcb4da7eec51458673edab) Signed-off-by: Wentao Guan Conflicts: arch/x86/kernel/cpu/mce/apei.c drivers/acpi/apei/ghes.c --- arch/x86/include/asm/mce.h | 4 ++-- arch/x86/kernel/acpi/apei.c | 14 +++++++++----- arch/x86/kernel/cpu/mce/apei.c | 18 ++++++++++-------- drivers/acpi/apei/apei-base.c | 3 ++- drivers/acpi/apei/ghes.c | 19 +++++++++---------- include/acpi/apei.h | 2 +- 6 files changed, 33 insertions(+), 27 deletions(-) diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index cfebc73e8349..c9943da7d72e 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -329,11 +329,11 @@ struct cper_sec_mem_err; extern void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); -extern void zx_apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err); +extern void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err); struct cper_sec_pcie; extern void zx_apei_mce_report_pcie_error(int corrected, struct cper_sec_pcie *pcie_err); struct cper_sec_proc_generic; -extern void zx_apei_mce_report_zdi_error(int corrected, struct cper_sec_proc_generic *zdi_err); +extern void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err); /* * Enumerate new IP types and HWID values in AMD processors which support diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c index 26d9963b66bd..e3782035d7c3 100644 --- a/arch/x86/kernel/acpi/apei.c +++ b/arch/x86/kernel/acpi/apei.c @@ -42,7 +42,7 @@ void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err) #ifdef CONFIG_X86_MCE if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) - zx_apei_mce_report_mem_error(sev, mem_err); + zx_apei_mce_report_mem_error(mem_err); else apei_mce_report_mem_error(sev, mem_err); #endif @@ -57,13 +57,17 @@ void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) #endif } -void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err) { #ifdef CONFIG_X86_MCE - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN || - boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR) - zx_apei_mce_report_zdi_error(sev, zdi_err); + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC))) { + zx_apei_mce_report_zdi_error(zdi_err); + return true; + } #endif + return false; } int arch_apei_report_x86_error(struct cper_ia_proc_ctx *ctx_info, u64 lapic_id) diff --git a/arch/x86/kernel/cpu/mce/apei.c b/arch/x86/kernel/cpu/mce/apei.c index 0e1845a11f0e..346e63524318 100644 --- a/arch/x86/kernel/cpu/mce/apei.c +++ b/arch/x86/kernel/cpu/mce/apei.c @@ -65,7 +65,7 @@ void apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) } EXPORT_SYMBOL_GPL(apei_mce_report_mem_error); -void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err) +void zx_apei_mce_report_mem_error(struct cper_sec_mem_err *mem_err) { struct mce_hw_err err; struct mce *m; @@ -96,18 +96,19 @@ void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err apei_error = apei_write_mce(m); break; case 8: - if (mem_err->requestor_id == 2) + if (mem_err->requestor_id == 2) { m->status = 0x98200040000400b0; - else if (mem_err->requestor_id == 3) { + } else if (mem_err->requestor_id == 3) { m->status = 0xba400000000600a0; apei_error = apei_write_mce(m); - } else if (mem_err->requestor_id == 4) + } else if (mem_err->requestor_id == 4) { m->status = 0x98200100000300b0; - else if (mem_err->requestor_id == 5) { + } else if (mem_err->requestor_id == 5) { m->status = 0xba000000000500b0; apei_error = apei_write_mce(m); - } else + } else { pr_info("Undefined Parity error\n"); + } break; case 10: if (mem_err->requestor_id == 6) { @@ -116,8 +117,9 @@ void zx_apei_mce_report_mem_error(int severity, struct cper_sec_mem_err *mem_err } else if (mem_err->requestor_id == 7) { m->status = 0xba000000000800b0; apei_error = apei_write_mce(m); - } else + } else { pr_info("Undefined dvad error\n"); + } break; case 13: m->status = 0x9c200040000100c0; @@ -169,7 +171,7 @@ void zx_apei_mce_report_pcie_error(int severity, struct cper_sec_pcie *pcie_err) } EXPORT_SYMBOL_GPL(zx_apei_mce_report_pcie_error); -void zx_apei_mce_report_zdi_error(int severity, struct cper_sec_proc_generic *zdi_err) +void zx_apei_mce_report_zdi_error(struct cper_sec_proc_generic *zdi_err) { struct mce_hw_err err; struct mce *m; diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index f652c46dc8dd..238c821af086 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -778,8 +778,9 @@ void __weak arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err) } EXPORT_SYMBOL_GPL(arch_apei_report_pcie_error); -void __weak arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err) +bool __weak arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err) { + return false; } EXPORT_SYMBOL_GPL(arch_apei_report_zdi_error); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index af092f4b2766..4a78877489e7 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -896,7 +896,7 @@ static void ghes_do_proc(struct ghes *ghes, atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); - arch_apei_report_mem_error(sec_sev, mem_err); + arch_apei_report_mem_error(sev, mem_err); queued = ghes_handle_memory_failure(gdata, sev, sync); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { struct cper_sec_pcie *pcie_err = acpi_hest_get_payload(gdata); @@ -921,17 +921,16 @@ static void ghes_do_proc(struct ghes *ghes, struct cxl_cper_event_rec *rec = acpi_hest_get_payload(gdata); cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec); - } else if (guid_equal(sec_type, &CPER_SEC_PROC_GENERIC)) { - struct cper_sec_proc_generic *zdi_err = acpi_hest_get_payload(gdata); - - arch_apei_report_zdi_error(sec_sev, zdi_err); } else { void *err = acpi_hest_get_payload(gdata); - ghes_defer_non_standard_event(gdata, sev); - log_non_standard_event(sec_type, fru_id, fru_text, - sec_sev, err, - gdata->error_data_length); + if (!arch_apei_report_zdi_error(sec_type, + (struct cper_sec_proc_generic *)err)) { + ghes_defer_non_standard_event(gdata, sev); + log_non_standard_event(sec_type, fru_id, fru_text, + sec_sev, err, + gdata->error_data_length); + } } } @@ -1359,7 +1358,7 @@ static int ghes_in_nmi_queue_one_entry(struct ghes *ghes, struct cper_sec_proc_generic *zdi_err = acpi_hest_get_payload(gdata); - arch_apei_report_zdi_error(sev, zdi_err); + arch_apei_report_zdi_error(sec_type, zdi_err); } } ghes_print_queued_estatus(); diff --git a/include/acpi/apei.h b/include/acpi/apei.h index fcb5814a3f43..808cfa7d16b1 100644 --- a/include/acpi/apei.h +++ b/include/acpi/apei.h @@ -53,7 +53,7 @@ int erst_clear(u64 record_id); int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data); void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err); void arch_apei_report_pcie_error(int sev, struct cper_sec_pcie *pcie_err); -void arch_apei_report_zdi_error(int sev, struct cper_sec_proc_generic *zdi_err); +bool arch_apei_report_zdi_error(guid_t *sec_type, struct cper_sec_proc_generic *zdi_err); #endif #endif From 65c96aaee8597c8562048c2f85a8c5a0178758b0 Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 16 Aug 2024 17:55:31 +0800 Subject: [PATCH 61/99] perf/x86/zhaoxin: fix warning log issue on KH-40000 There are some vulnerabilities present in KH40000 platforms with specific core counts, which may affect the monitoring of the uncore portion by the perf software and generate warning logs. This patch is intended to rectify this issue. Additionally, some code formatting adjustments have been made. Signed-off-by: leoliu-oc Link: https://github.com/deepin-community/kernel/pull/368 (cherry picked from commit 6d4eb575acef13cc73a54565becee0b4312c5d1a) Signed-off-by: Wentao Guan --- arch/x86/events/zhaoxin/core.c | 2 +- arch/x86/events/zhaoxin/uncore.c | 17 +++++++++-------- arch/x86/events/zhaoxin/uncore.h | 6 +++--- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index b6117c713a92..11c1fde6f943 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -659,7 +659,7 @@ __init int zhaoxin_pmu_init(void) if (boot_cpu_data.x86_model == 0x5b) pr_cont("Yongfeng events, "); - + if (boot_cpu_data.x86_model == 0x6b) pr_cont("Shijidadao events, "); diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c index 50b0591b4eae..f467b9109334 100644 --- a/arch/x86/events/zhaoxin/uncore.c +++ b/arch/x86/events/zhaoxin/uncore.c @@ -212,7 +212,7 @@ static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; #define KX7000_MC_B1_CHy_PMON_CTL0 0xee0 #define KX7000_MC_B1_CHy_PMON_BLK_CTL 0xef4 -#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX7000_ZDI_DL_MMIO_PMON_CTR0 0xf00 #define KX7000_ZDI_DL_MMIO_PMON_CTL0 0xf28 #define KX7000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 #define KX7000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 @@ -397,24 +397,24 @@ DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); static void zx_gen_core_map(void) { - int i, nr, cpu; + int cpu, i; int cluster_id, subnode_id; for_each_present_cpu(cpu) { cluster_id = zx_topology_cluster_id(cpu); - for (i = 0; i < 4; i++) { - nr = (cluster_id << 2) + i; - cpumask_set_cpu(nr, &per_cpu(zx_cluster_core_bits, cpu)); + for_each_present_cpu(i) { + if (zx_topology_cluster_id(i) == cluster_id) + cpumask_set_cpu(i, &per_cpu(zx_cluster_core_bits, cpu)); } } for_each_present_cpu(cpu) { subnode_id = zx_topology_subnode_id(cpu); - for (i = 0; i < 8; i++) { - nr = (subnode_id << 3) + i; - cpumask_set_cpu(nr, &per_cpu(zx_subnode_core_bits, cpu)); + for_each_present_cpu(i) { + if (zx_topology_subnode_id(i) == subnode_id) + cpumask_set_cpu(i, &per_cpu(zx_subnode_core_bits, cpu)); } } } @@ -2828,6 +2828,7 @@ static void kx7000_uncore_cpu_init(void) { u64 val; int cpu; + uncore_msr_uncores = kx7000_msr_uncores; /* clear bit 16 of MSR 0x1877 so that HIF can work normally */ diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h index d17c7d26944b..5f33ad0fc3b0 100644 --- a/arch/x86/events/zhaoxin/uncore.h +++ b/arch/x86/events/zhaoxin/uncore.h @@ -45,9 +45,9 @@ struct zhaoxin_uncore_type { unsigned int fixed_ctl; unsigned int box_ctl; union { - unsigned int msr_offset; - unsigned int mmio_offset; - }; + unsigned int msr_offset; + unsigned int mmio_offset; + }; unsigned int num_shared_regs:8; unsigned int single_fixed:1; unsigned int pair_ctr_ctl:1; From 9a12c59345229965bbeaa5b88f7e182e3ccbd4fd Mon Sep 17 00:00:00 2001 From: leoliu-oc Date: Fri, 16 Aug 2024 18:15:57 +0800 Subject: [PATCH 62/99] x86/hpet: Read HPET directly if panic in progress MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit zhaoxin inclusion category: other ------------------- When the clocksource of the system is HPET,a CPU executing read_hpet might be interrupted by #GP/#PF to executing the panic,this may lead to read_hpet dead loops: CPU x CPU x ---- ---- read_hpet() arch_spin_trylock(&hpet.lock) [CPU x got the hpet.lock] #GP/#PF happened panic() kmsg_dump() pstore_dump() pstore_record_init() ktime_get_real_fast_ns() read_hpet() [dead loops] To avoid this dead loops, read HPET directly if panic in progress. Signed-off-by: leoliu-oc Link: https://github.com/deepin-community/kernel/pull/370 (cherry picked from commit 5ae09fc66edcf40dc1337f564a0c4c04f76a5aab) Signed-off-by: Wentao Guan --- arch/x86/kernel/hpet.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index d6387dde3ff9..c15dd503e0d8 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -798,6 +798,12 @@ static u64 read_hpet(struct clocksource *cs) if (in_nmi()) return (u64)hpet_readl(HPET_COUNTER); + /* + * Read HPET directly if panic in progress. + */ + if (unlikely(atomic_read(&panic_cpu) != PANIC_CPU_INVALID)) + return (u64)hpet_readl(HPET_COUNTER); + /* * Read the current state of the lock and HPET value atomically. */ From 0e510a084b5df15d16e1b0d2a76bd053fc61b388 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Sat, 17 Aug 2024 15:16:08 +0800 Subject: [PATCH 63/99] can: phytium: Use phytium_can_of_ids only when CONFIG_OF enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix follow errors: drivers/net/can/phytium/phytium_can_platform.c: In function ‘phytium_can_plat_probe’: drivers/net/can/phytium/phytium_can_platform.c:113:41: error: ‘phytium_can_of_ids’ undeclared (first use in this function); did you mean ‘phytium_can_acpi_ids’? 113 | of_id = of_match_device(phytium_can_of_ids, &pdev->dev); | ^~~~~~~~~~~~~~~~~~ | phytium_can_acpi_ids drivers/net/can/phytium/phytium_can_platform.c:113:41: note: each undeclared identifier is reported only once for each function it appears in Signed-off-by: WangYuli Link: https://github.com/deepin-community/kernel/pull/374 (cherry picked from commit 759f31a09a18e826e6cd48bb85f898aabf8c8911) Signed-off-by: Wentao Guan --- drivers/net/can/phytium/phytium_can_platform.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/can/phytium/phytium_can_platform.c b/drivers/net/can/phytium/phytium_can_platform.c index 922e0021f8fb..d16584967ba0 100644 --- a/drivers/net/can/phytium/phytium_can_platform.c +++ b/drivers/net/can/phytium/phytium_can_platform.c @@ -76,7 +76,9 @@ static int phytium_can_plat_probe(struct platform_device *pdev) struct phytium_can_dev *cdev; struct phytium_can_plat *priv; struct resource *res; +# ifdef CONFIG_OF const struct of_device_id *of_id; +# endif /* CONFIG_OF */ const struct phytium_can_devtype *devtype = &phytium_can_data; u32 tx_fifo_depth; int ret; @@ -110,9 +112,11 @@ static int phytium_can_plat_probe(struct platform_device *pdev) } cdev->can.clock.freq = clk_get_rate(cdev->clk); +# ifdef CONFIG_OF of_id = of_match_device(phytium_can_of_ids, &pdev->dev); if (of_id && of_id->data) devtype = of_id->data; +# endif /* CONFIG_OF */ } else if (has_acpi_companion(&pdev->dev)) { ret = fwnode_property_read_u32(dev_fwnode(&pdev->dev), "clock-frequency", From 20f36a24913b767eb6f8ce574f17aad2eec57127 Mon Sep 17 00:00:00 2001 From: WangYuli Date: Sat, 17 Aug 2024 15:55:41 +0800 Subject: [PATCH 64/99] spi: Introduce dependencise for Phytium to avoid warnings Fix follow warnings in Kconfig: WARNING: unmet direct dependencies detected for SPI_PHYTIUM Depends on [n]: SPI [=y] && SPI_MASTER [=y] && (ARCH_PHYTIUM || COMPILE_TEST [=n]) Selected by [m]: - SPI_PHYTIUM_PLAT [=m] && SPI [=y] && SPI_MASTER [=y] - SPI_PHYTIUM_PCI [=m] && SPI [=y] && SPI_MASTER [=y] && PCI [=y] WARNING: unmet direct dependencies detected for SPI_PHYTIUM Depends on [n]: SPI [=y] && SPI_MASTER [=y] && (ARCH_PHYTIUM || COMPILE_TEST [=n]) Selected by [m]: - SPI_PHYTIUM_PLAT [=m] && SPI [=y] && SPI_MASTER [=y] - SPI_PHYTIUM_PCI [=m] && SPI [=y] && SPI_MASTER [=y] && PCI [=y] Signed-off-by: WangYuli Link: https://github.com/deepin-community/kernel/pull/376 (cherry picked from commit cc12f457b221b73370ca4e7dbc902eb5fb3d113d) Signed-off-by: Wentao Guan --- drivers/spi/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 95ef5b6de7e8..f173fe4f513d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -854,6 +854,7 @@ config SPI_PHYTIUM config SPI_PHYTIUM_PLAT tristate "Phytium SPI controller platform support" + depends on ARCH_PHYTIUM || COMPILE_TEST select SPI_PHYTIUM help This selects a platform driver for Phytium SPI controller. @@ -863,6 +864,7 @@ config SPI_PHYTIUM_PLAT config SPI_PHYTIUM_PCI tristate "Phytium SPI controller PCI support" + depends on ARCH_PHYTIUM || COMPILE_TEST depends on PCI select SPI_PHYTIUM help From 57fed69d2fbed07827835366ebbda0539044f96b Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 10 Jan 2021 14:57:21 -0500 Subject: [PATCH 65/99] KVM: SVM: Fix the available ASID range for CSV2 guest hygon inclusion category: feature CVE: NA --------------------------- All the ASIDs in range [1, max_sev_asid] are available for CSV2 guest, regardless of the value of min_sev_asid. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/372 (cherry picked from commit 1a693dea813433b1ffb5a3dd02532bdc282e64cf) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/svm/sev.c --- arch/x86/kvm/svm/sev.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index d4b65f847fee..67e845b35f04 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -210,6 +210,12 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type) min_asid = min_sev_asid; max_asid = max_sev_asid; } + /* + * No matter what the min_sev_asid is, all asids in range + * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. + */ + if (is_x86_vendor_hygon()) + max_asid = max_sev_asid; /* * The min ASID can end up larger than the max if basic SEV support is @@ -3105,14 +3111,23 @@ void __init sev_hardware_setup(void) goto out; } - /* Has the system been allocated ASIDs for SEV-ES? */ - if (min_sev_asid == 1) - goto out; - min_sev_es_asid = min_snp_asid = 1; max_sev_es_asid = max_snp_asid = min_sev_asid - 1; - sev_es_asid_count = min_sev_asid - 1; + + if (is_x86_vendor_hygon()) { + /* + * Ths ASIDs from 1 to max_sev_asid are available for hygon + * CSV2 guest. + */ + sev_es_asid_count = max_sev_asid; + } else { + /* Has the system been allocated ASIDs for SEV-ES? */ + if (min_sev_asid == 1) + goto out; + + sev_es_asid_count = min_sev_asid - 1; + } WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)); sev_es_supported = true; sev_snp_supported = sev_snp_enabled && cc_platform_has(CC_ATTR_HOST_SEV_SNP); @@ -3161,7 +3176,8 @@ void __init sev_hardware_setup(void) sev_es_supported ? min_sev_es_asid <= max_sev_es_asid ? "enabled" : "unusable" : "disabled", - min_sev_es_asid, max_sev_es_asid); + is_x86_vendor_hygon() ? 1 : (min_sev_asid), + is_x86_vendor_hygon() ? max_sev_asid : max_sev_es_asid); if (boot_cpu_has(X86_FEATURE_SEV_SNP)) pr_info("SEV-SNP %s (ASIDs %u - %u)\n", str_enabled_disabled(sev_snp_supported), From 885d7dcd1efc017f657b839f9a685927f22c60c4 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Thu, 6 Apr 2023 09:03:58 +0800 Subject: [PATCH 66/99] x86/csv2: Keep in atomic context when holding ghcb page if the #VC comes from userspace hygon inclusion category: bugfix CVE: NA --------------------------- In function vc_raw_handle_exception(), it will holds ghcb page and calls __sev_get_ghcb() <- holding ghcb page to communicate with host vc_init_em_etxt() vc_handle_exitcode() __sev_put_ghcb() <- no longer holding ghcb page after the communication to emulate instruction which cause #VC. When the #VC comes from userspace, the code path user_exc_vmm_communication() vc_raw_handle_exception() cannot keep memory access in atomic context, this may lead to direct page fault handling if the emulation process access userspace address which doesn't exist in memory. For userspace address page fault handling, if it's not in the atomic context or the caller doesn't call pagefault_disable(), the irq may be enabled and there is a risk of generating more #VC. So it's necessary to switch to atomic context before emulate instructions which cause #VC. Add __preempt_count_{add,sub}() pair to keep the code between __sev_get_ghcb() and __sev_put_ghcb() in atomic context if #VC comes from userspace. If memory access fails during emulating, the caller will construct page fault info and forward a page fault later. Fixes: be1a5408868a ("x86/sev: Split up runtime #VC handler for correct state tracking") Signed-off-by: hanliyang [ Fix filename rename ] Link: https://github.com/deepin-community/kernel/pull/372 (cherry picked from commit f54f22d4b0c7773ba622a8a2ea294b89c7eb9f5c) Signed-off-by: Wentao Guan Conflicts: arch/x86/kernel/sev.c --- arch/x86/coco/sev/vc-handle.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/coco/sev/vc-handle.c b/arch/x86/coco/sev/vc-handle.c index 7fc136a35334..1c7bc18e8a66 100644 --- a/arch/x86/coco/sev/vc-handle.c +++ b/arch/x86/coco/sev/vc-handle.c @@ -34,6 +34,7 @@ #include #include #include +#include static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, unsigned long vaddr, phys_addr_t *paddr) @@ -897,6 +898,15 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co struct ghcb *ghcb; bool ret = true; + /* + * Make sure the codes between __sev_get_ghcb() and __sev_put_ghcb() + * keep in atomic context. If #VC comes from kernel mode, then the + * codes here are in atomic context. If #VC comes from user mode, then + * it's necessary to switch to atomic context manually. + */ + if (is_x86_vendor_hygon() && !in_nmi()) + __preempt_count_add(HARDIRQ_OFFSET); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); @@ -907,6 +917,9 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co __sev_put_ghcb(&state); + if (is_x86_vendor_hygon() && !in_nmi()) + __preempt_count_sub(HARDIRQ_OFFSET); + /* Done - now check the result */ switch (result) { case ES_OK: From 0b9f80fcec28ac3eb8ea78e19cb8f076a4348727 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Sun, 7 Jan 2024 04:47:42 +0800 Subject: [PATCH 67/99] x86/head/64: Flush caches for .bss..decrypted section after CR3 switches to early_top_pgt hygon inclusion category: bugfix CVE: NA --------------------------- The memory region of .bss..decrypted section maybe mapped with encryption before early boot stage of Linux. If the correspond stale caches lives in earlier stage were not flushed before we access that memory region in later stages, then Linux will crash because the stale caches will pollute the memory. Fix this issue by flush the caches with encrypted mapping before we access .bss..decrypted section. Fixes: b3f0907c71e0 ("x86/mm: Add .bss..decrypted section to hold shared variables") Signed-off-by: hanliyang [commit 681e2901330c5c27ce8b58dfdd92a3c339e47caf Author: Ard Biesheuvel Date: Fri Apr 18 16:12:59 2025 +0200 x86/boot: Drop RIP_REL_REF() uses from early SEV code Now that the early SEV code is built with -fPIC, RIP_REL_REF() has no effect and can be dropped.] (cherry picked from commit 10c4f8d7315a400fb0ba368d72c6b911281fb0f0) Signed-off-by: Wentao Guan --- arch/x86/kernel/head64.c | 110 ++++++++++++++++++++++++++++++++++++++ arch/x86/kernel/head_64.S | 10 ++++ 2 files changed, 120 insertions(+) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index fd28b53dbac5..7b4c508177e6 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -67,6 +67,116 @@ EXPORT_SYMBOL(vmalloc_base); unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; EXPORT_SYMBOL(vmemmap_base); +#ifdef CONFIG_AMD_MEM_ENCRYPT + +extern bool bsp_flush_bss_decrypted_section_handled; + +/* Get CPUID data through GHCB MSR protocol */ +static int __cpuid_msr_protocol(u32 fn, int reg_idx, u32 *reg) +{ + unsigned int msr_idx = (unsigned int)MSR_AMD64_SEV_ES_GHCB; + struct msr m; + + m.q = GHCB_CPUID_REQ(fn, reg_idx); + + asm volatile("wrmsr" : : "c" (msr_idx), "a"(m.l), "d" (m.h) : "memory"); + VMGEXIT(); + asm volatile("rdmsr" : "=a" (m.l), "=d" (m.h) : "c" (msr_idx)); + + if (GHCB_RESP_CODE(m.q) != GHCB_MSR_CPUID_RESP) + return -EIO; + + *reg = m.h; + + return 0; +} + +static bool __should_do_clflush(void) +{ + u32 eax, ebx, ecx, edx; + int ret; + + /* Check if this is a Hygon CSV guest or an AMD SEV guest */ + if (!sme_get_me_mask() || + !(sev_status & MSR_AMD64_SEV_ENABLED)) + return false; + + /* Get cpuid vendor info, if cannot get vendor info, then return false */ + eax = 0x0; + if (!(sev_status & MSR_AMD64_SEV_ES_ENABLED)) { + native_cpuid(&eax, &ebx, &ecx, &edx); + } else { + /* + * Hygon CSV2 guest or AMD SEV-ES guest should use GHCB MSR + * protocol to get cpu vendor info. + */ + ret = __cpuid_msr_protocol(eax, GHCB_CPUID_REQ_EBX, &ebx); + ret = ret ? : __cpuid_msr_protocol(eax, GHCB_CPUID_REQ_ECX, &ecx); + ret = ret ? : __cpuid_msr_protocol(eax, GHCB_CPUID_REQ_EDX, &edx); + if (ret) + return false; + } + + /* Check if this is a Hygon CSV guest */ +#define STRING_Hygo 0x6f677948 +#define STRING_uine 0x656e6975 +#define STRING_nGen 0x6e65476e + + if (ebx != STRING_Hygo || ecx != STRING_uine || edx != STRING_nGen) + return false; + + return true; +} + +void __ref early_clflush_bss_decrypted_section(void); +__maybe_unused void __ref early_clflush_bss_decrypted_section(void) +{ + unsigned long vaddr, vaddr_end; + char *cl, *start, *end; + + /* Only allow bsp flush these caches and the bsp must at early boot stage */ + if (bsp_flush_bss_decrypted_section_handled) + return; + + if (read_cr3_pa() != __pa_nodebug(early_top_pgt)) + return; + + /* Only Hygon CSV guest should do the clflush */ + if (!__should_do_clflush()) + goto handled; + + /* + * The memory region of .bss..decrypted section maybe mapped + * with encryption in earlier stage. If the correspond stale + * caches lives in earlier stage were not flushed before we + * access that memory region, then Linux will crash later + * because the stale caches will pollute the memory. So we + * need flush the caches with encrypted mapping before we + * access .bss..decrypted section. + * + * The function __startup_64() have already filled the + * encrypted mapping for .bss..decrypted section, use that + * mapping here. + */ + vaddr = (unsigned long)__start_bss_decrypted - + __START_KERNEL_map + phys_base; + vaddr_end = (unsigned long)__end_bss_decrypted - + __START_KERNEL_map + phys_base; + + /* Hardcode cl-size to 64 at this stage. */ + start = (char *)(vaddr & ~63); + end = (char *)((vaddr_end + 63) & ~63); + + asm volatile("mfence" : : : "memory"); + for (cl = start; cl != end; cl += 64) + clflush(cl); + asm volatile("mfence" : : : "memory"); + +handled: + bsp_flush_bss_decrypted_section_handled = true; +} +#endif + /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 21816b48537c..6c0ef565f806 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -372,6 +372,14 @@ SYM_INNER_LABEL(common_startup_64, SYM_L_LOCAL) shrq $32, %rdx wrmsr +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Ensure .bss.decrypted memory's stale caches which lived in earlier + * stage to be flushed. + */ + call early_clflush_bss_decrypted_section +#endif + /* Setup and Load IDT */ call early_setup_idt @@ -479,6 +487,8 @@ SYM_CODE_END(vc_boot_ghcb) SYM_DATA(initial_code, .quad x86_64_start_kernel) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) +SYM_DATA(bsp_flush_bss_decrypted_section_handled, .byte 0x0) + .balign 8 #endif SYM_DATA(trampoline_lock, .quad 0); From 9074b92af90339d1d33ca3d9f259419759d3895a Mon Sep 17 00:00:00 2001 From: hanliyang Date: Mon, 13 Nov 2023 01:54:26 +0800 Subject: [PATCH 68/99] KVM: SVM: Unmap ghcb pages if they're still mapped when destroy guest MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hygon inclusion category: bugfix CVE: NA --------------------------- The ghcb pages might be mapped when KVM handling the VMGEXIT events, and these ghcb pages will be unmapped when prepare to switch to guest mode. If we try to kill the userspace VMM (e.g. qemu) of a guest, it's possible that the mapped ghcb pages will never be unmapped which will cause memory leak. We exposed a serious memory leak by creating and killing multiple qemu processes for state encrypted guests frequently. In order to solve this issue, unmap ghcb pages if they're sill mapped when destroy guest. Fixes: ce7ea0cfdc2e ("KVM: SVM: Move GHCB unmapping to fix RCU warning") Fixes: 291bd20d5d88 ("KVM: SVM: Add initial support for a VMGEXIT VMEXIT") Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/372 [commit 365e319208442a0807a96e9ea4d0b1fa338f1929 Author: Sean Christopherson Date: Thu Oct 10 11:23:35 2024 -0700 KVM: Pass in write/dirty to kvm_vcpu_map(), not kvm_vcpu_unmap() Now that all kvm_vcpu_{,un}map() users pass "true" for @dirty, have them pass "true" as a @writable param to kvm_vcpu_map(), and thus create a read-only mapping when possible. Note, creating read-only mappings can be theoretically slower, as they don't play nice with fast GUP due to the need to break CoW before mapping the underlying PFN. But practically speaking, creating a mapping isn't a super hot path, and getting a writable mapping for reading is weird and confusing. Tested-by: Alex Bennée Signed-off-by: Sean Christopherson Tested-by: Dmitry Osipenko Signed-off-by: Paolo Bonzini Message-ID: <20241010182427.1434605-34-seanjc@google.com> ] (cherry picked from commit 33d29df37822b484c9eafa8e86db599d618ac9d4) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/svm/sev.c --- arch/x86/kvm/svm/sev.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 67e845b35f04..27f4967e37f5 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -3330,6 +3330,10 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); + if (svm->sev_es.ghcb) { + kvm_vcpu_unmap(vcpu, &svm->sev_es.ghcb_map); + } + skip_vmsa_free: if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); From 5390f02e1a339914d382a5e2e6e8c9f752cc7ef2 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Fri, 8 Sep 2023 20:09:00 -0400 Subject: [PATCH 69/99] KVM: SVM: Add support for different CSV guests to reuse the same ASID hygon inclusion category: feature CVE: NA --------------------------- If user want to reuse one ASID for many CSV guests, he should provide a label (i.e. userid) and the length of the label when launch CSV guest. The reference count of the ASID will be increased if user launch a CSV guest with the label correspond to the ASID. When a CSV guest which launch with a label is destroyed, the reference count of the ASID correspond to the label will be decreased, and the ASID is freed only if the reference count becomes zero. The codes for reuse ASID is not compatible with CONFIG_CGROUP_MISC, we introduce CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID that depends on !CGROUP_MISC, the code take effect only when CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y. Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/372 (cherry picked from commit 71df94516e78abb6e462ded114033b717ede8d54) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/Kconfig arch/x86/kvm/svm/sev.c --- arch/x86/kvm/Kconfig | 10 +++ arch/x86/kvm/svm/csv.c | 27 +++++++++ arch/x86/kvm/svm/csv.h | 22 +++++++ arch/x86/kvm/svm/sev.c | 128 ++++++++++++++++++++++++++++++++++++--- include/uapi/linux/kvm.h | 5 ++ 5 files changed, 184 insertions(+), 8 deletions(-) diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 278f08194ec8..16630e2542bb 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -240,4 +240,14 @@ config KVM_MAX_NR_VCPUS the memory footprint of each KVM guest, regardless of how many vCPUs are created for a given VM. +config KVM_SUPPORTS_CSV_REUSE_ASID + def_bool y + bool "Reuse the same ASID for different HYGON CSV guests" + depends on KVM_AMD_SEV && CPU_SUP_HYGON && HYGON_CSV + depends on !CGROUP_MISC + help + Provide support for reuse the same ASID for difference HYGON + CSV guests, this allow the user to create more CSV guests on + HYGON CPUs with limited ASIDs. + endif # VIRTUALIZATION diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c index a98bb1a6bd63..12c489b205ef 100644 --- a/arch/x86/kvm/svm/csv.c +++ b/arch/x86/kvm/svm/csv.c @@ -1068,6 +1068,33 @@ static int csv_control_post_system_reset(struct kvm *kvm) return 0; } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + +struct csv_asid_userid *csv_asid_userid_array; + +int csv_alloc_asid_userid_array(unsigned int nr_asids) +{ + int ret = 0; + + csv_asid_userid_array = kcalloc(nr_asids, sizeof(struct csv_asid_userid), + GFP_KERNEL_ACCOUNT); + if (!csv_asid_userid_array) + ret = -ENOMEM; + + if (ret) + pr_warn("Fail to allocate array, reuse ASID is unavailable\n"); + + return ret; +} + +void csv_free_asid_userid_array(void) +{ + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; +} + +#endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + void csv_exit(void) { } diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h index 8ae2bc015f41..e5785d1aece3 100644 --- a/arch/x86/kvm/svm/csv.h +++ b/arch/x86/kvm/svm/csv.h @@ -32,6 +32,28 @@ struct csv_ringbuf_infos { int num; }; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + +#define ASID_USERID_LENGTH 20 + +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; + +extern struct csv_asid_userid *csv_asid_userid_array; + +int csv_alloc_asid_userid_array(unsigned int nr_asids); +void csv_free_asid_userid_array(void); + +#else + +static inline int csv_alloc_asid_userid_array(unsigned int nr_asids) { return -ENOMEM; } +static inline void csv_free_asid_userid_array(void) { } + +#endif /* CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID */ + #ifdef CONFIG_HYGON_CSV /* diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 27f4967e37f5..5004172bcbe7 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -190,7 +190,11 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) misc_cg_uncharge(type, sev->misc_cg, 1); } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type, const char *userid, u32 userid_len) +#else static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type) +#endif { /* * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. @@ -238,6 +242,34 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, check whether the userid exists */ + if (is_x86_vendor_hygon() && userid && userid_len && + !WARN_ON_ONCE(!csv_asid_userid_array)) { + int i = !min_sev_asid ? 1 : min_sev_asid; + + for (; i <= max_sev_asid; i++) { + /* skip ASIDs without correspond userid */ + if (!csv_asid_userid_array[i].userid_len) + continue; + + /* skip if length of userid is different */ + if (csv_asid_userid_array[i].userid_len != userid_len) + continue; + + if (!memcmp(csv_asid_userid_array[i].userid, + userid, userid_len)) { + pr_debug("Found reusable asid %d\n", i); + /* Increase reference count if userid exists */ + csv_asid_userid_array[i].refcnt++; + + mutex_unlock(&sev_bitmap_lock); + return i; + } + } + } +#endif + again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { @@ -252,6 +284,16 @@ static int sev_asid_new(struct kvm_sev_info *sev, unsigned long vm_type) __set_bit(asid, sev_asid_bitmap); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, initialize the new userid */ + if (is_x86_vendor_hygon() && userid && userid_len && + !WARN_ON_ONCE(!csv_asid_userid_array)) { + memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); + csv_asid_userid_array[asid].userid_len = userid_len; + csv_asid_userid_array[asid].refcnt = 1; + } +#endif + mutex_unlock(&sev_bitmap_lock); sev->asid = asid; @@ -275,7 +317,25 @@ static void sev_asid_free(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, decrease the reference count if userid exist */ + if (!is_x86_vendor_hygon() || !csv_asid_userid_array || + !csv_asid_userid_array[sev->asid].userid_len) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + } else { + /* If reach here, reference count should large than 0. */ + WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); + + if (--csv_asid_userid_array[sev->asid].refcnt == 0) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + + memset(&csv_asid_userid_array[sev->asid], 0, + sizeof(struct csv_asid_userid)); + } + } +#else __set_bit(sev->asid, sev_reclaim_asid_bitmap); +#endif for_each_possible_cpu(cpu) { sd = per_cpu_ptr(&svm_data, cpu); @@ -484,7 +544,44 @@ static int __sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp, if (snp_active) sev->vmsa_features |= SVM_SEV_FEAT_SNP_ACTIVE; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* Try reuse ASID iff userid array is available for HYGON CSV guests */ + if (is_x86_vendor_hygon() && csv_asid_userid_array) { + struct kvm_csv_init params; + void *csv_blob = NULL; + + memset(¶ms, 0, sizeof(params)); + + if (argp->data && + copy_from_user(¶ms, + (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + if (params.userid_addr) { + if (params.len >= ASID_USERID_LENGTH) { + pr_err("Invalid length of userid %d > %d\n", + params.len, ASID_USERID_LENGTH); + return -EINVAL; + } + + csv_blob = psp_copy_user_blob(params.userid_addr, params.len); + if (IS_ERR(csv_blob)) { + pr_err("Copy userid failed, %llx (%u)\n", + params.userid_addr, params.len); + return PTR_ERR(csv_blob); + } + } + + ret = sev_asid_new(sev, vm_type, (const char *)csv_blob, params.len); + + /* The buffer @csv_blob is no longer used, free it. */ + kfree(csv_blob); + } else { + ret = sev_asid_new(sev, vm_type, NULL, 0); + } +#else ret = sev_asid_new(sev, vm_type); +#endif if (ret) goto e_no_asid; @@ -3207,13 +3304,19 @@ void __init sev_hardware_setup(void) */ sev_install_hooks(); - /* - * Allocate a memory pool to speed up live migration of - * the CSV/CSV2 guests. If the allocation fails, no - * acceleration is performed at live migration. - */ - if (sev_enabled) + if (sev_enabled) { + /* + * Allocate a memory pool to speed up live migration of + * the CSV/CSV2 guests. If the allocation fails, no + * acceleration is performed at live migration. + */ csv_alloc_trans_mempool(); + /* + * Allocate a buffer to support reuse ASID, reuse ASID + * will not work if the allocation fails. + */ + csv_alloc_asid_userid_array(nr_asids); + } } #endif } @@ -3223,9 +3326,11 @@ void sev_hardware_unsetup(void) if (!sev_enabled) return; - /* Free the memory pool that allocated in sev_hardware_setup(). */ - if (is_x86_vendor_hygon()) + /* Free the memory that allocated in sev_hardware_setup(). */ + if (is_x86_vendor_hygon()) { csv_free_trans_mempool(); + csv_free_asid_userid_array(); + } /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); @@ -3653,6 +3758,13 @@ int pre_sev_run(struct vcpu_svm *svm, int cpu) /* Assign the asid allocated with this SEV guest */ svm->asid = asid; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* If ASID is shared with other guests, then flush TLB before VMRUN */ + if (is_x86_vendor_hygon() && csv_asid_userid_array && + csv_asid_userid_array[asid].userid_len) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; +#endif + /* * Flush guest TLB: * diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index d5793da178c2..7240d64cb657 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1653,6 +1653,11 @@ struct kvm_csv_receive_update_vmsa { __u32 trans_len; }; +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + /* ioctls for control vm during system reset, currently only for CSV */ #define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) #define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) From 27ee9824acceb559ad78e5c25b1b3a45be0a2627 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:36:36 +0800 Subject: [PATCH 70/99] crypto: ccp: Define CSV3 key management command id hygon inclusion category: feature CVE: NA --------------------------- Define Hygon CSV3 key management command id and structure. CSV3 is the technology for Hygon secure virtualization to improve security of guest with secure isolated memory technology in hardware. The command definition is available in CSV3 spec. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/375 (cherry picked from commit 5657473d46df4da226c8df9a6bcd4b8eb365922f) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 9 ++ include/linux/psp-hygon.h | 178 +++++++++++++++++++++++++++++ 2 files changed, 187 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index c0a9a17fe951..392040835bd8 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -46,6 +46,15 @@ int csv_cmd_buffer_len(int cmd) switch (cmd) { case CSV_CMD_HGSC_CERT_IMPORT: return sizeof(struct csv_data_hgsc_cert_import); case CSV_CMD_RING_BUFFER: return sizeof(struct csv_data_ring_buffer); + case CSV3_CMD_LAUNCH_ENCRYPT_DATA: return sizeof(struct csv3_data_launch_encrypt_data); + case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: return sizeof(struct csv3_data_launch_encrypt_vmcb); + case CSV3_CMD_UPDATE_NPT: return sizeof(struct csv3_data_update_npt); + case CSV3_CMD_SET_SMR: return sizeof(struct csv3_data_set_smr); + case CSV3_CMD_SET_SMCR: return sizeof(struct csv3_data_set_smcr); + case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: + return sizeof(struct csv3_data_set_guest_private_memory); + case CSV3_CMD_DBG_READ_VMSA: return sizeof(struct csv3_data_dbg_read_vmsa); + case CSV3_CMD_DBG_READ_MEM: return sizeof(struct csv3_data_dbg_read_mem); default: return 0; } } diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index b87ad363e91a..efb618a0eb6e 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -28,6 +28,34 @@ enum csv_cmd { CSV_CMD_MAX, }; +/** + * Guest/platform management commands for CSV3 + */ +enum csv3_cmd { + /* Guest launch commands */ + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, + CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + /* Guest NPT(Nested Page Table) management commands */ + CSV3_CMD_UPDATE_NPT = 0x203, + + /* Guest migration commands */ + CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, + CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, + CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, + CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, + + /* Guest debug commands */ + CSV3_CMD_DBG_READ_VMSA = 0x220, + CSV3_CMD_DBG_READ_MEM = 0x221, + + /* Platform secure memory management commands */ + CSV3_CMD_SET_SMR = 0x230, + CSV3_CMD_SET_SMCR = 0x231, + + CSV3_CMD_MAX, +}; + /** * CSV communication state */ @@ -127,6 +155,156 @@ struct csv_data_ring_buffer { u16 int_on_empty; /* In */ } __packed; +/** + * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command + * + * @handle: handle of the VM to update + * @gpa: guest address where data is copied + * @length: len of memory to be encrypted + * @data_blocks: memory regions to hold data page address + */ +struct csv3_data_launch_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u32 length; /* In */ + u32 reserved1; /* In */ + u64 data_blocks[8]; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command + * + * @handle: handle of the VM + * @vcpu_id: id of vcpu per vmsa/vmcb + * @vmsa_addr: memory address of initial vmsa data + * @vmsa_len: len of initial vmsa data + * @shadow_vmcb_addr: memory address of shadow vmcb data + * @shadow_vmcb_len: len of shadow vmcb data + * @secure_vmcb_addr: memory address of secure vmcb data + * @secure_vmcb_len: len of secure vmcb data + */ +struct csv3_data_launch_encrypt_vmcb { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 vcpu_id; /* In */ + u32 reserved1; /* In */ + u64 vmsa_addr; /* In */ + u32 vmsa_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_addr; /* In */ + u32 shadow_vmcb_len; /* In */ + u32 reserved3; /* In */ + u64 secure_vmcb_addr; /* Out */ + u32 secure_vmcb_len; /* Out */ +} __packed; + +/** + * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command + * + * @handle: handle assigned to the VM + * @error_code: nested page fault error code + * @gpa: guest page address where npf happens + * @spa: physical address which maps to gpa in host page table + * @level: page level which can be mapped in nested page table + * @page_attr: page attribute for gpa + * @page_attr_mask: which page attribute bit should be set + * @npages: number of pages from gpa is handled. + */ +struct csv3_data_update_npt { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 error_code; /* In */ + u32 reserved1; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u64 level; /* In */ + u64 page_attr; /* In */ + u64 page_attr_mask; /* In */ + u32 npages; /* In/Out */ +} __packed; + +/** + * struct csv3_data_mem_region - define a memory region + * + * @base_address: base address of a memory region + * @size: size of memory region + */ +struct csv3_data_memory_region { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * command parameters + * + * @handle: handle assigned to the VM + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_guest_private_memory { + u32 handle; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters + * + * @smr_entry_size: size of SMR entry + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_smr { + u32 smr_entry_size; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters + * + * @base_address: start address of SMCR memory + * @size: size of SMCR memory + */ +struct csv3_data_set_smcr { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters + * + * @handle: handle assigned to the VM + * @spa: system physical address of memory to get vmsa of the specific vcpu + * @size: size of the host memory + * @vcpu_id: the specific vcpu + */ +struct csv3_data_dbg_read_vmsa { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 spa; /* In */ + u32 size; /* In */ + u32 vcpu_id; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters + * + * @handle: handle assigned to the VM + * @gpa: guest physical address of the memory to access + * @spa: system physical address of memory to get data from gpa + * @size: size of guest memory to access + */ +struct csv3_data_dbg_read_mem { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u32 size; /* In */ +} __packed; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); From 2b9f34b3241c7ed6ba04679bbe67dd7f27d47754 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 10:52:56 +0800 Subject: [PATCH 71/99] x86/mm: Manage CSV3 guest's private memory by CMA hygon inclusion category: feature CVE: NA --------------------------- The private memory of a CSV3 guest is isolated from VMM and has to be physically contiguous. CMA (Contiguous Memory Allocator) is a memory allocator within the kernel for contiguous physical memory. Use the CMA for the CSV3 private memory management. In order to support CSV3, select MMU and CMA when CONIFG_HYGON_CSV is configured. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/375 (cherry picked from commit 16d15f77d644bf78756d6b3cf1304c6c527f6ede) Signed-off-by: Wentao Guan Conflicts: arch/x86/kernel/setup.c include/linux/cma.h mm/cma.c mm/cma.h --- arch/x86/Kconfig | 2 + arch/x86/include/asm/csv.h | 53 +++++ arch/x86/kernel/setup.c | 5 + arch/x86/mm/mem_encrypt_hygon.c | 373 ++++++++++++++++++++++++++++++++ include/linux/cma.h | 1 + mm/cma.c | 30 ++- mm/cma.h | 2 +- 7 files changed, 461 insertions(+), 5 deletions(-) create mode 100644 arch/x86/include/asm/csv.h diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 54bca1f6c3d1..8ac2a950ccef 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1979,6 +1979,8 @@ config HYGON_CSV bool "Hygon secure virtualization CSV support" default y depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT + select MMU + select CMA help Hygon CSV integrates secure processor, memory encryption and memory isolation to provide the ability to protect guest's private diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h new file mode 100644 index 000000000000..fc575d2f00cf --- /dev/null +++ b/arch/x86/include/asm/csv.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#ifndef __ASM_X86_CSV_H__ +#define __ASM_X86_CSV_H__ + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_HYGON_CSV + +struct csv_mem { + uint64_t start; + uint64_t size; +}; + +#define CSV_MR_ALIGN_BITS (28) + +extern struct csv_mem *csv_smr; +extern unsigned int csv_smr_num; + +void __init early_csv_reserve_mem(void); + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align); +void csv_release_to_contiguous(phys_addr_t pa, size_t size); + +uint32_t csv_get_smr_entry_shift(void); + +#else /* !CONFIG_HYGON_CSV */ + +#define csv_smr NULL +#define csv_smr_num 0U + +static inline void __init early_csv_reserve_mem(void) { } + +static inline phys_addr_t +csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) { return 0; } +static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } + +static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1b2edd07a3e1..7c25ffc595ef 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -56,6 +56,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest directly mapped pfn < 4 GB @@ -1187,6 +1188,10 @@ void __init setup_arch(char **cmdline_p) x86_flattree_get_config(); initmem_init(); + + /* Try to reserve contiguous memory to support CSV3 */ + early_csv_reserve_mem(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); if (boot_cpu_has(X86_FEATURE_GBPAGES)) { diff --git a/arch/x86/mm/mem_encrypt_hygon.c b/arch/x86/mm/mem_encrypt_hygon.c index 4c8a7f24aa41..adcd970de30a 100644 --- a/arch/x86/mm/mem_encrypt_hygon.c +++ b/arch/x86/mm/mem_encrypt_hygon.c @@ -16,6 +16,15 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include void print_hygon_cc_feature_info(void) { @@ -37,3 +46,367 @@ void print_hygon_cc_feature_info(void) if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) pr_info(" HYGON CSV2"); } + +/* + * Check whether host supports CSV3 in hygon platform. + * Called in the guest, it always returns false. + */ +static bool __init __maybe_unused csv3_check_cpu_support(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + u64 msr; + bool csv3_enabled; + + if (!is_x86_vendor_hygon()) + return false; + + if (sev_status) + return false; + + /* Check for the SME/CSV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return false; + +#define HYGON_SME_BIT BIT(0) +#define HYGON_CSV3_BIT BIT(30) + /* + * Check for the CSV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - SME support + * - Bit 1 - CSV support + * - Bit 3 - CSV2 support + * - Bit 30 - CSV3 support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & HYGON_SME_BIT)) + return false; + + csv3_enabled = !!(eax & HYGON_CSV3_BIT); + + me_mask = 1UL << (ebx & 0x3f); + + /* No SME if Hypervisor bit is set */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return false; + + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_AMD64_SYSCFG); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + return false; + + return !!me_mask && csv3_enabled; +} + +/******************************************************************************/ +/**************************** CSV3 CMA interfaces *****************************/ +/******************************************************************************/ + +/* 0 percent of total memory by default*/ +static unsigned char csv_mem_percentage; +static unsigned long csv_mem_size; + +static int __init cmdline_parse_csv_mem_size(char *str) +{ + unsigned long size; + char *endp; + + if (str) { + size = memparse(str, &endp); + csv_mem_size = size; + if (!csv_mem_size) + csv_mem_percentage = 0; + } + + return 0; +} +early_param("csv_mem_size", cmdline_parse_csv_mem_size); + +static int __init cmdline_parse_csv_mem_percentage(char *str) +{ + unsigned char percentage; + int ret; + + if (!str) + return 0; + + ret = kstrtou8(str, 10, &percentage); + if (!ret) { + csv_mem_percentage = min_t(unsigned char, percentage, 80); + if (csv_mem_percentage != percentage) + pr_warn("csv_mem_percentage is limited to 80.\n"); + } else { + /* Disable CSV CMA. */ + csv_mem_percentage = 0; + pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n"); + } + + return ret; +} +early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); + +#define NUM_SMR_ENTRIES (8 * 1024) +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) + +struct csv_mem *csv_smr; +EXPORT_SYMBOL_GPL(csv_smr); + +unsigned int csv_smr_num; +EXPORT_SYMBOL_GPL(csv_smr_num); + +struct csv_cma { + int fast; + struct cma *cma; +}; + +struct cma_array { + unsigned long count; + struct csv_cma csv_cma[]; +}; + +static unsigned int smr_entry_shift; +static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + +static void csv_set_smr_entry_shift(unsigned int shift) +{ + smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); + pr_info("CSV-CMA: SMR entry size is 0x%x\n", 1 << smr_entry_shift); +} + +unsigned int csv_get_smr_entry_shift(void) +{ + return smr_entry_shift; +} +EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); + +static unsigned long __init present_pages_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long nr_present = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) + nr_present += range_end_pfn - range_start_pfn; + + return nr_present; +} + +static phys_addr_t __init csv_early_percent_memory_on_node(int nid) +{ + return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; +} + +static void __init csv_cma_reserve_mem(void) +{ + int node, i; + unsigned long size; + int idx = 0; + int count; + int cma_array_size; + unsigned long max_spanned_size = 0; + + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("CSV-CMA: Fail to allocate csv_smr\n"); + return; + } + + for_each_node_state(node, N_ONLINE) { + int ret; + char name[CMA_MAX_NAME]; + struct cma_array *array; + unsigned long spanned_size; + unsigned long start = 0, end = 0; + struct csv_cma *csv_cma; + + size = csv_early_percent_memory_on_node(node); + count = DIV_ROUND_UP(size, 1 << CSV_CMA_SHIFT); + if (!count) + continue; + + cma_array_size = count * sizeof(*csv_cma) + sizeof(*array); + array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!array) { + pr_err("CSV-CMA: Fail to allocate cma_array\n"); + continue; + } + + array->count = 0; + csv_contiguous_pernuma_area[node] = array; + + for (i = 0; i < count; i++) { + csv_cma = &array->csv_cma[i]; + csv_cma->fast = 1; + snprintf(name, sizeof(name), "csv-n%dc%d", node, i); + ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0, + 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, + false, name, &(csv_cma->cma), node); + if (ret) { + pr_warn("CSV-CMA: Fail to reserve memory size 0x%x node %d\n", + 1 << CSV_CMA_SHIFT, node); + break; + } + + if (start > cma_get_base(csv_cma->cma) || !start) + start = cma_get_base(csv_cma->cma); + + if (end < cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma)) + end = cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma); + } + + if (!i) + continue; + + array->count = i; + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("CSV-CMA: Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", + node, (unsigned long)i * CSV_CMA_SIZE, size); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); +} + +#define CSV_CMA_AREAS 2458 + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + if (cma_alloc_areas(CSV_CMA_AREAS)) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_mem_size) { + if (csv_mem_size < (total_pages << PAGE_SHIFT)) { + csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT); + if (csv_mem_percentage > 80) + csv_mem_percentage = 80; /* Maximum percentage */ + } else + csv_mem_percentage = 80; /* Maximum percentage */ + } + + if (!csv_mem_percentage) { + pr_warn("CSV-CMA: Don't reserve any memory\n"); + return; + } + + csv_cma_reserve_mem(); +} + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + int nid; + int nr_nodes; + struct page *page = NULL; + phys_addr_t phys_addr; + int count; + struct csv_cma *csv_cma; + int fast = 1; + + if (!nodes_allowed || size > CSV_CMA_SIZE) { + pr_err("CSV-CMA: Invalid params, size = 0x%lx, nodes_allowed = %p\n", + size, nodes_allowed); + return 0; + } + + align = min_t(unsigned int, align, get_order(CSV_CMA_SIZE)); +retry: + nr_nodes = nodes_weight(*nodes_allowed); + + /* Traverse from current node */ + nid = numa_node_id(); + if (!node_isset(nid, *nodes_allowed)) + nid = next_node_in(nid, *nodes_allowed); + + for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) { + struct cma_array *array = csv_contiguous_pernuma_area[nid]; + + if (!array) + continue; + + count = array->count; + while (count) { + csv_cma = &array->csv_cma[count - 1]; + + /* + * The value check of csv_cma->fast is lockless, but + * that's ok as this don't affect functional correntness + * whatever the value of csv_cma->fast. + */ + if (fast && !csv_cma->fast) { + count--; + continue; + } + page = cma_alloc(csv_cma->cma, PAGE_ALIGN(size) >> PAGE_SHIFT, + align, true); + if (page) { + page->private = (unsigned long)csv_cma; + if (!csv_cma->fast) + csv_cma->fast = 1; + goto success; + } else + csv_cma->fast = 0; + + count--; + } + } + + if (fast) { + fast = 0; + goto retry; + } else { + pr_err("CSV-CMA: Fail to alloc secure memory(size = 0x%lx)\n", size); + return 0; + } + +success: + phys_addr = page_to_phys(page); + clflush_cache_range(__va(phys_addr), size); + + return phys_addr; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ + struct csv_cma *csv_cma; + struct page *page = pfn_to_page(pa >> PAGE_SHIFT); + + WARN_ON(!page); + if (likely(page)) { + csv_cma = (struct csv_cma *)page->private; + WARN_ON(!csv_cma); + if (likely(csv_cma)) { + page->private = 0; + csv_cma->fast = 1; + cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT); + } + } +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf6326..0274da142066 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -77,4 +77,5 @@ static inline bool cma_validate_zones(struct cma *cma) } #endif +extern int __init cma_alloc_areas(unsigned int max_cma_size); #endif diff --git a/mm/cma.c b/mm/cma.c index 813e6dc7b095..dedf5ef78eac 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -33,8 +33,11 @@ #include "internal.h" #include "cma.h" -struct cma cma_areas[MAX_CMA_AREAS]; -unsigned int cma_area_count; +static struct cma cma_areas_data[MAX_CMA_AREAS]; +static unsigned int cma_areas_size = MAX_CMA_AREAS; +struct cma *cma_areas = cma_areas_data; + +unsigned cma_area_count; phys_addr_t cma_get_base(const struct cma *cma) { @@ -220,7 +223,7 @@ static int __init cma_new_area(const char *name, phys_addr_t size, { struct cma *cma; - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -251,6 +254,25 @@ static void __init cma_drop_area(struct cma *cma) cma_area_count--; } +int __init cma_alloc_areas(unsigned int max_cma_size) +{ + struct cma *data; + + if (max_cma_size <= MAX_CMA_AREAS) + return 0; + + if (cma_area_count || cma_areas != cma_areas_data) + return -EPERM; + + data = memblock_alloc(max_cma_size * sizeof(*cma_areas), SMP_CACHE_BYTES); + if (!data) + return -ENOMEM; + + cma_areas = data; + cma_areas_size = max_cma_size; + return 0; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -439,7 +461,7 @@ static int __init __cma_declare_contiguous_nid(phys_addr_t *basep, pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } diff --git a/mm/cma.h b/mm/cma.h index c70180c36559..2e5a235ca1a4 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -71,7 +71,7 @@ enum cma_flags { CMA_ACTIVATED, }; -extern struct cma cma_areas[MAX_CMA_AREAS]; +extern struct cma *cma_areas; extern unsigned int cma_area_count; static inline unsigned long cma_bitmap_maxno(struct cma *cma, From b7c23c4acf8effd84cfd8456bb3215ad86be40f2 Mon Sep 17 00:00:00 2001 From: Xin Jiang Date: Mon, 11 Mar 2024 11:03:41 +0800 Subject: [PATCH 72/99] crypto: ccp: Add SET_SMR/SET_SMCR commands for CSV3 hygon inclusion category: feature CVE: NA --------------------------- Set guest memory regions in hygon hardware with SET_SMR command. Secure memory control region(SMCR) is a special memory region which is dedicated for CSV3 guest's meta data. SET_SMCR command is used to set SMCR memory in hygon hardware. Both SET_SMR and SET_SMCR should be issued early during platform initialization. Signed-off-by: Xin Jiang Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/375 (cherry picked from commit 5da4c8417333ea787e34d60097cf43739926deb7) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/csv-dev.c | 100 +++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/csv-dev.h | 1 + drivers/crypto/ccp/hygon/psp-dev.h | 1 + drivers/crypto/ccp/sev-dev.c | 6 ++ 4 files changed, 108 insertions(+) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 392040835bd8..2bf6d1801650 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -15,6 +15,8 @@ #include #include +#include + #include "psp-dev.h" #include "csv-dev.h" #include "ring-buffer.h" @@ -567,3 +569,101 @@ int csv_check_stat_queue_status(int *psp_ret) return 0; } EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + int ret = 0; + unsigned int i = 0; + struct csv3_data_set_smr *cmd_set_smr; + struct csv3_data_set_smcr *cmd_set_smcr; + struct csv3_data_memory_region *smr_regions; + + if (!hygon_psp_hooks.sev_dev_hooks_installed) { + ret = -ENODEV; + goto l_end; + } + + if (!csv_smr || !csv_smr_num) { + ret = -EINVAL; + goto l_end; + } + + cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); + if (!cmd_set_smr) { + ret = -ENOMEM; + goto l_end; + } + + smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + if (!smr_regions) { + ret = -ENOMEM; + goto e_free_cmd_set_smr; + } + + for (i = 0; i < csv_smr_num; i++) { + smr_regions[i].base_address = csv_smr[i].start; + smr_regions[i].size = csv_smr[i].size; + } + cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smr_num; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) { + pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); + goto e_free_smr_area; + } + + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } + + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = hygon_psp_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("set smcr ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + } + +e_free_cmd_set_smcr: + kfree((void *)cmd_set_smcr); +e_free_smr_area: + kfree((void *)smr_regions); +e_free_cmd_set_smr: + kfree((void *)cmd_set_smr); + +l_end: + if (ret) + dev_warn(sev->dev, + "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); + + return ret; +} + +#else /* !CONFIG_HYGON_CSV */ + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + dev_warn(sev->dev, + "CSV3: needs CONFIG_HYGON_CSV, CSV3 support unavailable\n"); + return -EFAULT; +} + +#endif /* CONFIG_HYGON_CSV */ diff --git a/drivers/crypto/ccp/hygon/csv-dev.h b/drivers/crypto/ccp/hygon/csv-dev.h index 92df6b723b59..416221cd7e8d 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.h +++ b/drivers/crypto/ccp/hygon/csv-dev.h @@ -36,6 +36,7 @@ extern const struct file_operations csv_fops; void csv_update_api_version(struct sev_user_data_status *status); int csv_cmd_buffer_len(int cmd); void csv_restore_mailbox_mode_postprocess(void); +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); static inline bool csv_version_greater_or_equal(u32 build) { diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index f5679c1559a9..480b3c36a002 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -39,6 +39,7 @@ extern struct hygon_psp_hooks_table { int (*__sev_platform_shutdown_locked)(int *error); int (*sev_wait_cmd_ioc)(struct sev_device *sev, unsigned int *reg, unsigned int timeout); + int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 7ef7cbc23ef9..8c1cbce2ed74 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -2722,6 +2722,7 @@ static void sev_dev_install_hooks(void) hygon_psp_hooks.__sev_do_cmd_locked = __sev_do_cmd_locked; hygon_psp_hooks.__sev_platform_init_locked = __sev_platform_init_locked; hygon_psp_hooks.__sev_platform_shutdown_locked = __sev_platform_shutdown_locked; + hygon_psp_hooks.sev_do_cmd = sev_do_cmd; hygon_psp_hooks.sev_wait_cmd_ioc = sev_wait_cmd_ioc; hygon_psp_hooks.sev_ioctl = sev_ioctl; @@ -2900,6 +2901,7 @@ void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; u8 api_major, api_minor, build; + int error; if (!sev) return; @@ -2922,6 +2924,10 @@ void sev_pci_init(void) api_major, api_minor, build, sev->api_major, sev->api_minor, sev->build); + /* Set SMR for HYGON CSV3 */ + if (is_vendor_hygon() && boot_cpu_has(X86_FEATURE_CSV3)) + csv_platform_cmd_set_secure_memory_region(sev, &error); + return; err: From c0742c7c8d38e152e917bb17a9cd8855ad6bef17 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:13:46 +0800 Subject: [PATCH 73/99] crypto: ccp: Support SM2 algorithm for hygon ccp. hygon inclusion category: feature -------------------------------- In order to add SM2 driver for hygon ccp, relating to SM2_sign, SM2_verify, SM2_encrypt and SM2_decrypt. Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit 7deb74f7891735bb87a900170ae3a1745ef0b612) Signed-off-by: Wentao Guan --- arch/x86/configs/deepin_x86_desktop_defconfig | 1 + drivers/crypto/ccp/Kconfig | 7 + drivers/crypto/ccp/Makefile | 3 + drivers/crypto/ccp/ccp-crypto-main.c | 15 + drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 1054 +++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 39 + drivers/crypto/ccp/ccp-dev-v5.c | 45 + drivers/crypto/ccp/ccp-dev.h | 8 + drivers/crypto/ccp/ccp-ops.c | 94 ++ include/linux/ccp.h | 49 + 10 files changed, 1315 insertions(+) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm2-hygon.c diff --git a/arch/x86/configs/deepin_x86_desktop_defconfig b/arch/x86/configs/deepin_x86_desktop_defconfig index 12769841cd3b..0f56eb38e597 100644 --- a/arch/x86/configs/deepin_x86_desktop_defconfig +++ b/arch/x86/configs/deepin_x86_desktop_defconfig @@ -5295,6 +5295,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CRYPTO_DEV_SAFEXCEL=m CONFIG_CRYPTO_DEV_AMLOGIC_GXL=m CONFIG_CRYPTO_DEV_TSSE=m +CONFIG_HYGON_GM=y CONFIG_PKCS8_PRIVATE_KEY_PARSER=m CONFIG_SIGNED_PE_FILE_VERIFICATION=y CONFIG_SYSTEM_EXTRA_CERTIFICATE=y diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 9d5e7432441f..12c5c729e264 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,6 +46,13 @@ config CRYPTO_DEV_SP_PSP along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. +config HYGON_GM + bool "Hygon GM (sm2/sm3/sm4) Interface" + default y + depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + help + Hygon GM ccp driver + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 09099b0a419b..cf3a4d9ab272 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -31,4 +31,7 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o + obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o + +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index bc90aba5162a..0c3023608d17 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -42,6 +42,10 @@ static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); +static unsigned int sm_disable; +module_param(sm_disable, uint, 0444); +MODULE_PARM_DESC(sm_disable, "Disable use of SM2/SM3/SM4 - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); @@ -325,6 +329,17 @@ static int ccp_register_algs(void) { int ret; +#ifdef CONFIG_HYGON_GM + if (!sm_disable && boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = ccp_register_sm2_hygon_algs(&akcipher_algs); + if (ret) + return ret; + + /* Return on hygon platform */ + return 0; + } +#endif + if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c new file mode 100644 index 000000000000..b1662953f541 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -0,0 +1,1054 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon Cryptographic Coprocessor (CCP) SM2 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 sm2_ecc_a[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, +}; + +static const u8 sm2_ecc_b[CCP_SM2_OPERAND_LEN] = { + 0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, 0x34, + 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, 0x09, 0xA7, + 0xF3, 0x97, 0x89, 0xF5, 0x15, 0xAB, 0x8F, 0x92, + 0xDD, 0xBC, 0xBD, 0x41, 0x4D, 0x94, 0x0E, 0x93, +}; + +static const u8 sm2_ecc_n_sub_1[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x72, 0x03, 0xDF, 0x6B, 0x21, 0xC6, 0x05, 0x2B, + 0x53, 0xBB, 0xF4, 0x09, 0x39, 0xD5, 0x41, 0x22, +}; + +static const u8 sm2_ecc_gx[CCP_SM2_OPERAND_LEN] = { + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, + 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x8F, 0xE3, 0x0B, 0xBF, 0xF2, 0x66, 0x0B, 0xE1, + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, +}; + +static const u8 sm2_ecc_gy[CCP_SM2_OPERAND_LEN] = { + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, + 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xD0, 0xA9, 0x87, 0x7C, 0xC6, 0x2A, 0x47, 0x40, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, +}; + +struct ccp_sm2_verify_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* input data r */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* input data s */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_lp_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_kg_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_sign_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* private key */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_mmul_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* mulplicand */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* mulplicator */ +}; + +struct ccp_sm2_dst { + union { + u8 result[CCP_SM2_OPERAND_LEN]; + u32 status; + } u; + u8 result_r[CCP_SM2_OPERAND_LEN]; + u8 result_s[CCP_SM2_OPERAND_LEN]; + u8 result_t[CCP_SM2_OPERAND_LEN]; +}; + +static bool ccp_sm2_is_zero(const u64 *data, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++) { + if (data[i]) + return false; + } + + return true; +} + +/* Return: + * 1: a > b + * -1: a < b + * 0: a = b + */ +static int ccp_sm2_fp_cmp(const u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu; + u32 i; + + for (i = 0; i < count; i++) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + if (a_cpu > b_cpu) + return 1; + else if (a_cpu < b_cpu) + return -1; + } + + return 0; +} + +/* a = a + b */ +static void ccp_sm2_fp_add(u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu, c_cpu, d_cpu; + u32 carry = 0; + s32 i; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + c_cpu = a_cpu + b_cpu; + d_cpu = c_cpu + carry; + a[i] = cpu_to_be64(d_cpu); + + if (c_cpu < a_cpu) + carry = 1; + else if (carry && !d_cpu) + carry = 1; + else + carry = 0; + } +} + +/* a = -a */ +static void ccp_sm2_fp_neg(u64 *a, u32 count) +{ + u64 a_cpu, c_cpu; + s32 i; + + for (i = 0; i <= count - 1; i++) + a[i] = ~a[i]; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + c_cpu = a_cpu + 1; + a[i] = cpu_to_be64(c_cpu); + + if (a_cpu < c_cpu) + break; + } +} + +/* a = a - b */ +static void ccp_sm2_fp_sub(u64 *a, u64 *b, u32 count) +{ + ccp_sm2_fp_neg(b, count); + ccp_sm2_fp_add(a, b, count); +} + +/* a and tmp must be 64B, b and c must be 32B + * a = b * c + */ +static void ccp_sm2_fp_mmul32(u8 *a, const u32 *b, const u32 *c, u8 *tmp) +{ + u64 b_cpu, c_cpu, m_cpu; + u32 rem_cpu; + u32 *base, *m_cur; + int i, j, iter; + + memset(a, 0, CCP_SM2_MMUL_LEN); + + iter = 7; + base = (u32 *)(tmp + CCP_SM2_MMUL_LEN - sizeof(u32)); + for (i = iter; i >= 0; i--) { + b_cpu = be32_to_cpu(b[i]); + memset(tmp, 0, CCP_SM2_MMUL_LEN); + + rem_cpu = 0; + m_cur = base; + for (j = iter; j >= 0; j--) { + c_cpu = be32_to_cpu(c[j]); + + m_cpu = b_cpu * c_cpu + rem_cpu; + rem_cpu = (u32)(m_cpu >> 32); + *m_cur = cpu_to_be32((u32)(m_cpu)); + m_cur--; + } + *m_cur = cpu_to_be32(rem_cpu); + ccp_sm2_fp_add((u64 *)a, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + + base--; + } +} + +/* mmul, dst, tmp must be 64B, remainder in mmul[32-63] + * high:low mod p + * = high*2^256+low mod p + * = high*(p+h)+low mod p + * = high*h+low mod p + * = high*(2^224+2^96-2^64+1)+low mod p + * iterating 8 times + */ +static void ccp_sm2_fast_mod_p(u8 *mmul, u8 *dst, u8 *tmp) +{ + u8 *mmul_high, *mmul_low; + u32 count; + int i, iter, ret; + + mmul_high = mmul; + mmul_low = mmul + CCP_SM2_OPERAND_LEN; + count = CCP_SM2_MMUL_LEN / sizeof(u64); + + iter = 8; + for (i = 0; i < iter; i++) { + /* dst = high * 2^224 */ + memset(dst, 0, CCP_SM2_MMUL_LEN); + memcpy(dst + 4, mmul_high, CCP_SM2_OPERAND_LEN); + + /* dst += high * 2^96 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 20, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 2^64 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 24, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_sub((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 1 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += low */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_low, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* copy dst to mmul */ + memcpy(mmul, dst, CCP_SM2_MMUL_LEN); + } + + do { + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + ret = ccp_sm2_fp_cmp( + (u64 *)mmul, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + if (ret < 0) + break; + + ccp_sm2_fp_sub((u64 *)mmul, (u64 *)tmp, count); + } while (1); +} + +static int ccp_sm2_is_privkey_valid(const u8 *priv_key) +{ + u64 last, last_cpu; + bool zero; + int ret; + + /* private key is satisfied with(1, n-1) */ + zero = ccp_sm2_is_zero((const u64 *)priv_key, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64) - 1); + if (zero) { + last = *(const u64 *) + (priv_key + CCP_SM2_PRIVATE_KEY_LEN - sizeof(u64)); + last_cpu = be64_to_cpu(last); + if (last_cpu <= 1) + return -EINVAL; + } + + ret = ccp_sm2_fp_cmp((const u64 *)priv_key, + (const u64 *)sm2_ecc_n_sub_1, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_setprivkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + int ret; + + if (!key || keylen != CCP_SM2_PRIVATE_KEY_LEN) + return -EINVAL; + + ret = ccp_sm2_is_privkey_valid(key); + if (ret < 0) + return ret; + + memcpy(sm2->pri_key, key, CCP_SM2_PRIVATE_KEY_LEN); + sm2->pri_key_len = CCP_SM2_PRIVATE_KEY_LEN; + + return 0; +} + +static int ccp_sm2_post_cmd(struct ccp_sm2_req_ctx *rctx, + u32 src_size, enum ccp_sm2_mode mode, u32 rand) +{ + struct akcipher_request *req = rctx->req; + struct ccp_sm2_engine *sm2 = NULL; + int ret; + + sg_init_one(&rctx->src_sg, rctx->src, src_size); + memset(rctx->dst, 0, CCP_SM2_DST_SIZE); + sg_init_one(&rctx->dst_sg, rctx->dst, CCP_SM2_DST_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM2; + + sm2 = &rctx->cmd.u.sm2; + sm2->mode = mode; + sm2->rand = rand; /* whether read operand_k from trng */ + sm2->src = &rctx->src_sg; + sm2->src_len = src_size; + sm2->dst = &rctx->dst_sg; + sm2->dst_len = CCP_SM2_DST_SIZE; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm2_pubkey_strict_valid(const u8 *px, const u8 *py) +{ + u64 buf[CCP_SM2_OPERAND_LEN / sizeof(u64)]; + int ret1, ret2; + + /* private key is 1, corresponding public key is invalid */ + ret1 = memcmp(px, sm2_ecc_gx, CCP_SM2_OPERAND_LEN); + ret2 = memcmp(py, sm2_ecc_gy, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + /* private key is n - 1, corresponding public key is invalid */ + memcpy(buf, py, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add(buf, (const u64 *)sm2_ecc_gy, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + ret2 = memcmp(buf, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_is_pubkey_valid(struct ccp_sm2_req_ctx *rctx, bool strict) +{ + const u8 *px, *py; + u8 *tmp; + bool zero; + int ret; + + px = rctx->src + CCP_SM2_LP_SRC_SIZE; + py = px + CCP_SM2_OPERAND_LEN; + + zero = ccp_sm2_is_zero((u64 *)px, CCP_SM2_PUBLIC_KEY_LEN / sizeof(u64)); + if (zero) + return -EINVAL; + + /* x < p */ + ret = ccp_sm2_fp_cmp((u64 *)px, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + /* y < p */ + ret = ccp_sm2_fp_cmp((u64 *)py, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + if (strict) { + ret = ccp_sm2_pubkey_strict_valid(px, py); + if (ret < 0) + return ret; + } + + /* check whether y^2 = x^3 + ax + b */ + tmp = rctx->dst + CCP_SM2_MMUL_LEN; + /* y * y */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)py, (u32 *)py, tmp); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* x * x + a */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)px, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_a, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src, rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* (x * x + a) * x + b */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)rctx->src, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_b, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + + ret = memcmp(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + if (ret) + return -EINVAL; + + /* Because the cofactor of the ECC group is 1, + * the checking that [n]P=O is not required. + */ + + return 0; +} + +static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + struct ccp_sm2_req_ctx *rctx = NULL; + int ret; + + if (!key || keylen != CCP_SM2_PUBLIC_KEY_LEN) + return -EINVAL; + + /* check whether public key is valid */ + rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); + if (!rctx) + return -ENOMEM; + + memcpy(rctx->src + CCP_SM2_LP_SRC_SIZE, key, CCP_SM2_PUBLIC_KEY_LEN); + ret = ccp_sm2_is_pubkey_valid(rctx, true); + kfree(rctx); + if (ret < 0) + return ret; + + /* public key is valid */ + memcpy(sm2->pub_key, key, CCP_SM2_PUBLIC_KEY_LEN); + sm2->pub_key_len = CCP_SM2_PUBLIC_KEY_LEN; + + return 0; +} + +static unsigned int ccp_sm2_maxsize(struct crypto_akcipher *tfm) +{ + return CCP_SM2_DST_SIZE; +} + +static int ccp_sm2_compute_c3(struct crypto_shash *shash, + struct scatterlist *sg, u32 mlen, + u8 *c3, const u8 *x2, const u8 *y2) +{ + unsigned int len, remain; + int ret; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret < 0) + return ret; + + /* update X2 */ + ret = crypto_shash_update(sdesc, x2, CCP_SM2_OPERAND_LEN); + if (ret < 0) + return ret; + + /* update M */ + remain = mlen; + while (sg) { + len = sg->length; + if (len > remain) + len = remain; + ret = crypto_shash_update(sdesc, (u8 *)sg_virt(sg), len); + if (ret < 0) + return ret; + + remain -= len; + if (!remain) + break; + + sg = sg_next(sg); + } + + /* ccp_sm2_encrypt should have checked length */ + if (unlikely(!sg)) + return -EINVAL; + + /* update Y2 */ + ret = crypto_shash_finup(sdesc, y2, CCP_SM2_OPERAND_LEN, c3); + + return ret; +} + +static bool ccp_sm2_msg_xor_t(u8 *msg, const u8 *t, u32 len) +{ + u64 *msg_cur, *msg_last, *t_cur; + u32 zero_cnt = 0; + u32 rem; + int i; + + msg_cur = (u64 *)msg; + t_cur = (u64 *)t; + msg_last = msg_cur + (len / sizeof(u64)); + while (msg_cur != msg_last) { + if (likely(*t_cur)) + *msg_cur = *msg_cur ^ *t_cur; + else + zero_cnt += sizeof(u64); + + msg_cur++; + t_cur++; + } + + msg = (u8 *)msg_cur; + t = (const u8 *)t_cur; + rem = len % sizeof(u64); + for (i = 0; i < rem; i++) { + if (likely(t[i])) + msg[i] = msg[i] ^ t[i]; + else + zero_cnt++; + } + + return zero_cnt == len; +} + +static int ccp_sm2_kdf_xor(struct crypto_shash *shash, + struct scatterlist *src, u32 src_offset, u32 src_len, + struct scatterlist *dst, u32 dst_offset, + u8 *x2_y2_ct, bool *all_zero, struct ccp_sm2_req_ctx *rctx) +{ + u32 *be_ct = NULL; + u32 ct, len, remain; + bool zero; + int ret = 0; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + *all_zero = true; + ct = 1; + be_ct = (u32 *)(x2_y2_ct + CCP_SM2_PUBLIC_KEY_LEN); + remain = src_len; + while (remain) { + len = SM3_DIGEST_SIZE; + if (len > remain) + len = remain; + *be_ct = cpu_to_be32(ct); + ret = crypto_shash_digest(sdesc, x2_y2_ct, + CCP_SM2_PUBLIC_KEY_LEN + sizeof(*be_ct), rctx->src); + if (ret < 0) + break; + + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, src, + src_offset, len, 0); + zero = ccp_sm2_msg_xor_t(rctx->src + SM3_DIGEST_SIZE, + rctx->src, len); + if (zero == false) + *all_zero = false; + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, dst, + dst_offset, len, 1); + + remain -= len; + src_offset += len; + dst_offset += len; + ct++; + } + + return ret; +} + +static void ccp_sm2_enc_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + scatterwalk_map_and_copy(rctx->src, req->src, 0, req->src_len, 0); + + /* C2 = M ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, + req->dst, CCP_SM2_ENCRYPT_EXT_LEN, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (unlikely(all_zero)) { + ret = -EAGAIN; + goto e_hash; + } + + /* C3 */ + ret = ccp_sm2_compute_c3(shash, req->src, req->src_len, rctx->src, + dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* save C3 */ + scatterwalk_map_and_copy(rctx->src, req->dst, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 1); + +e_hash: + crypto_free_shash(shash); + +e_complete: + req->base.complete(&req->base, ret); +} + +static void ccp_sm2_enc_lp(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int ret; + + /* save C1 */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_PUBLIC_KEY_LEN, 1); + /* operand_k used by kg is placed in dst->result_t */ + memcpy(src->operand_k, dst->result_t, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + rctx->phase = CCP_SM2_ENC_PH_LP; + + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + if (ret != -EBUSY && ret != -EINPROGRESS) + req->base.complete(&req->base, ret); +} + +static int ccp_sm2_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (!req->src_len || + req->dst_len < CCP_SM2_ENCRYPT_EXT_LEN + req->src_len) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + rctx->req = req; + rctx->phase = CCP_SM2_ENC_PH_KG; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_KG_SRC_SIZE, CCP_SM2_MODE_KG, 1); + + return ret; +} + +static void ccp_sm2_dec_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* M' = C2 ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, CCP_SM2_ENCRYPT_EXT_LEN, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, req->dst, 0, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (all_zero) { + ret = -EBADMSG; + goto e_hash; + } + + /* u */ + ret = ccp_sm2_compute_c3(shash, req->dst, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, + rctx->src, dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* load and compare C3 */ + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, req->src, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 0); + ret = memcmp(rctx->src, rctx->src + SM3_DIGEST_SIZE, SM3_DIGEST_SIZE); + if (ret) + ret = -EBADMSG; + +e_hash: + crypto_free_shash(shash); + +e_complete: + /* clear private key, plain, and dC1 */ + memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); + memset(dst, 0, CCP_SM2_DST_SIZE); + req->base.complete(&req->base, ret); +} + +static int ccp_sm2_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len <= (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE)) + return -EINVAL; + + if (req->dst_len < req->src_len - CCP_SM2_ENCRYPT_EXT_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + /* load C1 */ + scatterwalk_map_and_copy(rctx->src + CCP_SM2_LP_SRC_SIZE, + req->src, 0, CCP_SM2_PUBLIC_KEY_LEN, 0); + ret = ccp_sm2_is_pubkey_valid(rctx, false); + if (ret < 0) + return -EBADMSG; + + /* do kP */ + memcpy(src->operand_k, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + memcpy(src->operand_px, rctx->src + CCP_SM2_LP_SRC_SIZE, + CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, rctx->src + CCP_SM2_LP_SRC_SIZE + + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + rctx->req = req; + rctx->phase = CCP_SM2_DEC_PH_LP; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + + return ret; +} + +static int ccp_sm2_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN, 0); + memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_SIGN_PH_SIGN; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, + CCP_SM2_MODE_SIGN, 1); + + return ret; +} + +static int ccp_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN * 3) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; +} + +static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (dst->u.status) + return -EBADMSG; + + return 0; +} + +static int ccp_sm2_sign_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + struct akcipher_request *req = rctx->req; + + if (unlikely(dst->u.status)) + return -EAGAIN; + + /* save signature */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_OPERAND_LEN * 2, 1); + /* clear private key */ + memset(src->operand_d, 0, CCP_SM2_PRIVATE_KEY_LEN); + + return 0; +} + +static int ccp_sm2_enc_kg_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + /* random operand_k is not satisfied with[1, n-1], try again */ + if (unlikely(dst->u.status)) + return -EAGAIN; + + INIT_WORK(&rctx->work, ccp_sm2_enc_lp); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_enc_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_enc_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_dec_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_dec_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = + container_of(async_req, struct akcipher_request, base); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + switch (rctx->phase) { + case CCP_SM2_SIGN_PH_SIGN: + ret = ccp_sm2_sign_handle(rctx); + break; + case CCP_SM2_VERIFY_PH_VERIFY: + ret = ccp_sm2_verify_handle(rctx); + break; + case CCP_SM2_ENC_PH_KG: + ret = ccp_sm2_enc_kg_handle(rctx); + break; + case CCP_SM2_ENC_PH_LP: + ret = ccp_sm2_enc_lp_handle(rctx); + break; + case CCP_SM2_DEC_PH_LP: + ret = ccp_sm2_dec_lp_handle(rctx); + break; + } + + return ret; +} + +static int ccp_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_sm2_req_ctx)); + ctx->complete = ccp_sm2_complete; + + return 0; +} + +static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ +} + +static struct akcipher_alg ccp_sm2_defaults = { + .sign = ccp_sm2_sign, + .verify = ccp_sm2_verify, + .encrypt = ccp_sm2_encrypt, + .decrypt = ccp_sm2_decrypt, + .set_pub_key = ccp_sm2_setpubkey, + .set_priv_key = ccp_sm2_setprivkey, + .max_size = ccp_sm2_maxsize, + .init = ccp_sm2_init_tfm, + .exit = ccp_sm2_exit_tfm, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm2_def { + unsigned int version; + const char *name; + const char *driver_name; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_sm2_def sm2_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm2", + .driver_name = "sm2-ccp", + .alg_defaults = &ccp_sm2_defaults, + } +}; + +static int ccp_register_sm2_hygon_alg(struct list_head *head, + const struct ccp_sm2_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm2_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm2_algs); i++) { + if (sm2_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm2_hygon_alg(head, &sm2_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index e42450d07168..5133b921a5f5 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -258,6 +258,43 @@ struct ccp_rsa_req_ctx { #define CCP_RSA_MAXMOD (4 * 1024 / 8) #define CCP5_RSA_MAXMOD (16 * 1024 / 8) +/***** SM2 related defines *****/ +#define CCP_SM2_OPERAND_LEN 32 +#define CCP_SM2_PRIVATE_KEY_LEN CCP_SM2_OPERAND_LEN +#define CCP_SM2_PUBLIC_KEY_LEN (CCP_SM2_OPERAND_LEN * 2) +#define CCP_SM2_ENCRYPT_EXT_LEN (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE) +#define CCP_SM2_MMUL_LEN (CCP_SM2_OPERAND_LEN * 2) + +struct ccp_sm2_ctx { + u32 pri_key_len; + u32 pub_key_len; + u8 pri_key[CCP_SM2_PRIVATE_KEY_LEN]; + u8 pub_key[CCP_SM2_PUBLIC_KEY_LEN]; +}; + +enum ccp_sm2_op_phase { + CCP_SM2_SIGN_PH_SIGN, + CCP_SM2_VERIFY_PH_VERIFY, + CCP_SM2_ENC_PH_KG, + CCP_SM2_ENC_PH_LP, + CCP_SM2_DEC_PH_LP +}; + +struct ccp_sm2_req_ctx { + enum ccp_sm2_op_phase phase; + struct akcipher_request *req; + + u8 src[CCP_SM2_VERIFY_SRC_SIZE]; + u8 dst[CCP_SM2_DST_SIZE]; + + struct scatterlist src_sg; + struct scatterlist dst_sg; + + struct work_struct work; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -267,6 +304,7 @@ struct ccp_ctx { struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; + struct ccp_sm2_ctx sm2; } u; }; @@ -282,5 +320,6 @@ int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); +int ccp_register_sm2_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7b73332d6aa1..2c144fa64e88 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,6 +131,11 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; u16 raw; }; @@ -151,6 +156,8 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -584,6 +591,43 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = saddr->length; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1103,6 +1147,7 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 83350e2d9821..2b45309b78fa 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -334,6 +334,7 @@ struct ccp_cmd_queue { unsigned long total_rsa_ops; unsigned long total_pt_ops; unsigned long total_ecc_ops; + unsigned long total_sm2_ops; } ____cacheline_aligned; struct ccp_device { @@ -528,6 +529,11 @@ struct ccp_ecc_op { enum ccp_ecc_function function; }; +struct ccp_sm2_op { + u32 rand; + enum ccp_sm2_mode mode; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -551,6 +557,7 @@ struct ccp_op { struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; + struct ccp_sm2_op sm2; } u; }; @@ -657,6 +664,7 @@ struct ccp_actions { int (*rsa)(struct ccp_op *); int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); + int (*sm2)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index d78865d9d5f0..a53d895f639e 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2473,6 +2473,97 @@ ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } +static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm2_engine *sm2 = &cmd->u.sm2; + struct ccp_data src, dst; + struct ccp_op op; + int ret; + + if (!sm2->src || !sm2->dst) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.init = 1; + op.eom = 1; + op.u.sm2.rand = sm2->rand & 0x1; + op.u.sm2.mode = sm2->mode; + + memset(&src, 0, sizeof(src)); + ret = ccp_init_sg_workarea(&src.sg_wa, cmd_q->ccp->dev, + sm2->src, sm2->src_len, DMA_TO_DEVICE); + if (ret) + return ret; + + /* if src isn't contiguous, should copy to a contiguous buffer */ + if (src.sg_wa.dma_count == 1) { + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + } else { + ccp_sg_free(&src.sg_wa); + ret = ccp_init_dm_workarea(&src.dm_wa, cmd_q, sm2->src_len, + DMA_TO_DEVICE); + if (ret) + goto e_src; + + ccp_set_dm_area(&src.dm_wa, 0, sm2->src, 0, sm2->src_len); + op.src.u.dma.address = src.dm_wa.dma.address; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.offset = 0; + op.src.u.dma.length = sm2->src_len; + op.src.u.dma.dir = DMA_TO_DEVICE; + + memset(&dst, 0, sizeof(dst)); + ret = ccp_init_sg_workarea(&dst.sg_wa, cmd_q->ccp->dev, + sm2->dst, sm2->dst_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + /* if dst isn't contiguous, should copy to a contiguous buffer */ + if (dst.sg_wa.dma_count == 1) { + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + } else { + ccp_sg_free(&dst.sg_wa); + ret = ccp_init_dm_workarea(&dst.dm_wa, cmd_q, sm2->dst_len, + DMA_FROM_DEVICE); + if (ret) + goto e_dst; + + op.dst.u.dma.address = dst.dm_wa.dma.address; + } + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = sm2->dst_len; + op.dst.u.dma.dir = DMA_FROM_DEVICE; + + ret = cmd_q->ccp->vdata->perform->sm2(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (dst.dm_wa.address) { + ccp_get_dm_area(&dst.dm_wa, 0, sm2->dst, 0, sm2->dst_len); + memset(dst.dm_wa.address, 0, sm2->dst_len); + } + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + if (src.dm_wa.address) + memset(src.dm_wa.address, 0, sm2->src_len); + + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2517,6 +2608,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM2: + ret = ccp_run_sm2_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5a1..bd947cb8d41f 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ccp_device; struct ccp_cmd; @@ -587,6 +588,51 @@ struct ccp_ecc_engine { u16 ecc_result; }; +/***** SM2 engine *****/ +#define CCP_SM2_VERIFY_SRC_SIZE 160 +#define CCP_SM2_LP_SRC_SIZE 96 +#define CCP_SM2_KG_SRC_SIZE 32 +#define CCP_SM2_SIGN_SRC_SIZE 96 +#define CCP_SM2_MMUL_SRC_SIZE 64 +#define CCP_SM2_DST_SIZE 128 + +/** + * ccp_sm2_mode - SM2 operation mode + * + * @CCP_SM2_MODE_VERIFY: Verify mode + * @CCP_SM2_MODE_LP: LP mode + * @CCP_SM2_MODE_KG: KG mode + * @CCP_SM2_MODE_SIGN: SIGN mode + * @CCP_SM2_MODE_MMUL: MMUL mode + */ +enum ccp_sm2_mode { + CCP_SM2_MODE_VERIFY, + CCP_SM2_MODE_LP, + CCP_SM2_MODE_KG, + CCP_SM2_MODE_SIGN, + CCP_SM2_MODE_MMUL, + CCP_SM2_MODE__LAST, +}; + +/** + * struct ccp_sm2_engine - CCP SM2 operation + * @mode: SM2 operation mode + * @rand: indicateing that operand_k is from TRNG or not + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @dst_len: length in bytes of data produced by this operation + */ +struct ccp_sm2_engine { + enum ccp_sm2_mode mode; + u32 rand; + + struct scatterlist *src; + u32 src_len; + + struct scatterlist *dst; + u32 dst_len; +}; /** * ccp_engine - CCP operation identifiers @@ -599,6 +645,7 @@ struct ccp_ecc_engine { * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation + * @CCP_ENGINE_SM2: SM2 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -609,6 +656,7 @@ enum ccp_engine { CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, + CCP_ENGINE_SM2 = 8, /* fixed value */ CCP_ENGINE__LAST, }; @@ -657,6 +705,7 @@ struct ccp_cmd { struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; + struct ccp_sm2_engine sm2; } u; /* Completion callback support */ From 3408ae42ae63a0eb58feb8c4fdd604967074a644 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:20:16 +0800 Subject: [PATCH 74/99] crypto: ccp: Support SM3 algorithm for hygon ccp. hygon inclusion category: feature -------------------------------- In order to add SM3 driver for hygon ccp, include sm3-hmac. Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit 3d136acda00cde4662b2dadc689218bbc0fb551b) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-crypto-main.c | 4 + drivers/crypto/ccp/ccp-crypto-sm3-hygon.c | 489 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 49 +++ drivers/crypto/ccp/ccp-dev-v5.c | 45 ++ drivers/crypto/ccp/ccp-dev.h | 10 + drivers/crypto/ccp/ccp-ops.c | 157 +++++++ include/linux/ccp.h | 44 ++ 8 files changed, 800 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm3-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index cf3a4d9ab272..13231ba2da5a 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -34,4 +34,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o -ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ + ccp-crypto-sm3-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 0c3023608d17..938231fa295c 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -335,6 +335,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm3_hygon_algs(&hash_algs); + if (ret) + return ret; + /* Return on hygon platform */ return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c new file mode 100644 index 000000000000..9eca6a96cf14 --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c @@ -0,0 +1,489 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon Cryptographic Coprocessor (CCP) SM3 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static int ccp_sm3_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if (ret) + goto e_free; + + rctx->msg_bits += (rctx->hash_cnt << 3); + if (rctx->hash_rem) { + /* save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + if (rctx->final) { + if (req->result) + memcpy(req->result, rctx->ctx, SM3_DIGEST_SIZE); + + memset(rctx->ctx, 0, SM3_DIGEST_SIZE); + } + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sm3_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg = req->src; + struct ccp_sm3_engine *sm3 = NULL; + unsigned int sg_count; + gfp_t gfp; + u64 len, msg_bits = 0; + int nents; + int ret; + + /* must check length of src, + * otherwise will result in NullPointer exception in ccp_sm3_complete + */ + if (nbytes) { + nents = sg_nents_for_len(req->src, nbytes); + if (nents < 0) + return -EINVAL; + } + + len = (u64)rctx->buf_count + (u64)nbytes; + if (len <= SM3_BLOCK_SIZE) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + if (!final) + return 0; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = &rctx->buf_sg; + } else { + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (rctx->buf_count) { + /* build the scatterlist table: (buffer and input data) */ + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add( + &rctx->data_sg, &rctx->buf_sg); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg = ccp_crypto_sg_table_add(&rctx->data_sg, + req->src); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else { + sg = req->src; + } + } + + rctx->final = final; + if (final) { + rctx->hash_rem = 0; + rctx->hash_cnt = len; + msg_bits = rctx->msg_bits + (len << 3); + } else { + rctx->hash_rem = len & (SM3_BLOCK_SIZE - 1); + rctx->hash_cnt = len - rctx->hash_rem; + rctx->src = req->src; + rctx->nbytes = nbytes; + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM3; + + sm3 = &rctx->cmd.u.sm3; + sm3->type = CCP_SM3_TYPE_256; + sm3->ctx = &rctx->ctx_sg; + sm3->ctx_len = SM3_DIGEST_SIZE; + sm3->src = sg; + sm3->src_len = rctx->hash_cnt; + sm3->first = rctx->msg_bits ? 0 : 1; + sm3->final = final; + sm3->msg_bits = msg_bits; + if (final && ctx->u.sm3.key_len) { + sm3->opad = &ctx->u.sm3.opad_sg; + sm3->opad_len = SM3_BLOCK_SIZE; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_sm3_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if ((crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) && + (!ctx->u.sm3.key_len)) + return -ENOKEY; + + memset(rctx, 0, sizeof(*rctx)); + if (ctx->u.sm3.key_len) { + /* buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sm3.ipad, SM3_BLOCK_SIZE); + rctx->buf_count = SM3_BLOCK_SIZE; + } + + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + + return 0; +} + +static int ccp_sm3_update(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 0); +} + +static int ccp_sm3_final(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, 0, 1); +} + +static int ccp_sm3_finup(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 1); +} + +static int ccp_sm3_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sm3_init(req); + if (unlikely(ret)) + return ret; + + return ccp_sm3_finup(req); +} + +static int ccp_sm3_export(struct ahash_request *req, void *out) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!out) + return -EINVAL; + + /* don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + + state.msg_bits = rctx->msg_bits; + memcpy(state.ctx, rctx->ctx, SM3_DIGEST_SIZE); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, SM3_BLOCK_SIZE); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_import(struct ahash_request *req, const void *in) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!in) + return -EINVAL; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->msg_bits = state.msg_bits; + memcpy(rctx->ctx, state.ctx, SM3_DIGEST_SIZE); + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, SM3_BLOCK_SIZE); + + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sm3.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + int i, ret; + + /* set to zero until complete */ + ctx->u.sm3.key_len = 0; + if (!key) + return -EINVAL; + + if (!key_len) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + /* clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sm3.key, 0, SM3_BLOCK_SIZE); + + if (key_len > SM3_BLOCK_SIZE) { + /* must hash the input key */ + sdesc->tfm = shash; + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sm3.key); + if (ret) { + crypto_ahash_set_flags( + tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + key_len = SM3_DIGEST_SIZE; + } else { + memcpy(ctx->u.sm3.key, key, key_len); + } + + for (i = 0; i < SM3_BLOCK_SIZE; i++) { + ctx->u.sm3.ipad[i] = ctx->u.sm3.key[i] ^ HMAC_IPAD_VALUE; + ctx->u.sm3.opad[i] = ctx->u.sm3.key[i] ^ HMAC_OPAD_VALUE; + } + + sg_init_one(&ctx->u.sm3.opad_sg, ctx->u.sm3.opad, SM3_BLOCK_SIZE); + + ctx->u.sm3.key_len = key_len; + + return 0; +} + +static int ccp_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sm3_complete; + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sm3_req_ctx)); + + return 0; +} + +static void ccp_sm3_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sm3.hmac_tfm = hmac_tfm; + + return ccp_sm3_cra_init(tfm); +} + +static void ccp_hmac_sm3_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sm3.hmac_tfm) + crypto_free_shash(ctx->u.sm3.hmac_tfm); + + ccp_sm3_cra_exit(tfm); +} + +struct ccp_sm3_def { + unsigned int version; + const char *name; + const char *drv_name; + enum ccp_sm3_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sm3_def sm3_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm3", + .drv_name = "sm3-ccp", + .type = CCP_SM3_TYPE_256, + .digest_size = SM3_DIGEST_SIZE, + .block_size = SM3_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sm3_setkey; + + base = &alg->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + base->cra_init = ccp_hmac_sm3_cra_init; + base->cra_exit = ccp_hmac_sm3_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sm3_init; + alg->update = ccp_sm3_update; + alg->final = ccp_sm3_final; + alg->finup = ccp_sm3_finup; + alg->digest = ccp_sm3_digest; + alg->export = ccp_sm3_export; + alg->import = ccp_sm3_import; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sm3_exp_ctx); + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_init = ccp_sm3_cra_init; + base->cra_exit = ccp_sm3_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_sm3_hygon_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sm3_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm3_algs); i++) { + if (sm3_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm3_hygon_alg(head, &sm3_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 5133b921a5f5..33e54fcbca53 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -295,6 +295,53 @@ struct ccp_sm2_req_ctx { struct ccp_cmd cmd; }; +/***** SM3 related defines *****/ +struct ccp_sm3_ctx { + u32 key_len; + u8 key[SM3_BLOCK_SIZE]; + + u8 ipad[SM3_BLOCK_SIZE]; + + u8 opad[SM3_BLOCK_SIZE]; + struct scatterlist opad_sg; + + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sm3_req_ctx { + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + u32 nbytes; + + u64 hash_cnt; + u32 hash_rem; + + struct sg_table data_sg; + struct scatterlist *src_sg; + + struct scatterlist ctx_sg; + u8 ctx[SM3_DIGEST_SIZE]; + + struct scatterlist buf_sg; + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_sm3_exp_ctx { + u64 msg_bits; + + u8 ctx[SM3_DIGEST_SIZE]; + + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -305,6 +352,7 @@ struct ccp_ctx { struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; struct ccp_sm2_ctx sm2; + struct ccp_sm3_ctx sm3; } u; }; @@ -321,5 +369,6 @@ int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); +int ccp_register_sm3_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2c144fa64e88..7038be74bbb6 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -136,6 +136,11 @@ union ccp_function { u16 rsvd:11; u16 mode:3; } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; u16 raw; }; @@ -158,6 +163,7 @@ union ccp_function { #define CCP_ECC_AFFINE(p) ((p)->ecc.one) #define CCP_SM2_RAND(p) ((p)->sm2.rand) #define CCP_SM2_MODE(p) ((p)->sm2.mode) +#define CCP_SM3_TYPE(p) ((p)->sm3.type) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -193,6 +199,8 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -628,6 +636,42 @@ static int ccp5_perform_sm2(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1148,6 +1192,7 @@ static const struct ccp_actions ccp5_actions = { .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2b45309b78fa..2d6c4c404539 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -335,6 +335,7 @@ struct ccp_cmd_queue { unsigned long total_pt_ops; unsigned long total_ecc_ops; unsigned long total_sm2_ops; + unsigned long total_sm3_ops; } ____cacheline_aligned; struct ccp_device { @@ -534,6 +535,11 @@ struct ccp_sm2_op { enum ccp_sm2_mode mode; }; +struct ccp_sm3_op { + enum ccp_sm3_type type; + u64 msg_bits; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -558,6 +564,7 @@ struct ccp_op { struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; struct ccp_sm2_op sm2; + struct ccp_sm3_op sm3; } u; }; @@ -606,6 +613,7 @@ struct dword3 { union dword4 { u32 dst_lo; /* NON-SHA */ u32 sha_len_lo; /* SHA */ + __le32 sm3_len_lo; /* SM3 */ }; union dword5 { @@ -616,6 +624,7 @@ union dword5 { unsigned int fixed:1; } fields; u32 sha_len_hi; + __le32 sm3_len_hi; }; struct dword7 { @@ -665,6 +674,7 @@ struct ccp_actions { int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); int (*sm2)(struct ccp_op *op); + int (*sm3)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index a53d895f639e..57da09d6cd3c 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2564,6 +2564,160 @@ static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm3_engine *sm3 = &cmd->u.sm3; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B, + }; + + if ((sm3->ctx == NULL) || (sm3->ctx_len != SM3_DIGEST_SIZE)) + return -EINVAL; + + if (sg_nents_for_len(sm3->ctx, SM3_DIGEST_SIZE) < 0) + return -EINVAL; + + if (sm3->final && sm3->first) { + if (!sm3->src_len) { + scatterwalk_map_and_copy( + (void *)sm3_zero_message_hash, + sm3->ctx, 0, SM3_DIGEST_SIZE, 1); + return 0; + } + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.init = sm3->first & 0x1; + op.u.sm3.type = sm3->type; + op.u.sm3.msg_bits = sm3->msg_bits; + + memset(&ctx, 0, sizeof(ctx)); + ret = ccp_init_dm_workarea(&ctx, cmd_q, SM3_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (!sm3->first) { + /* load iv */ + ccp_set_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + ret = ccp_init_data(&src, cmd_q, sm3->src, sm3->src_len, + SM3_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + /* send data to the CCP SM3 engine */ + if (sm3->src_len) { + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, SM3_BLOCK_SIZE, + false); + if (!src.sg_wa.bytes_left && sm3->final) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ccp_process_data(&src, NULL, &op); + } + } else { + /* do sm3 padding */ + src.dm_wa.address[0] = 0x80; + *(__be64 *)&src.dm_wa.address[56] = cpu_to_be64(sm3->msg_bits); + + op.soc = 0; + op.ioc = 1; + op.eom = 0; + op.src.u.dma.address = src.dm_wa.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = SM3_BLOCK_SIZE; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (sm3->final && sm3->opad) { + /* HMAC operation, recursively perform final SM3 */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf = NULL; + + hmac_buf = kmalloc( + SM3_BLOCK_SIZE + SM3_DIGEST_SIZE, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + scatterwalk_map_and_copy(hmac_buf, sm3->opad, + 0, SM3_BLOCK_SIZE, 0); + memcpy(hmac_buf + SM3_BLOCK_SIZE, ctx.address, + SM3_DIGEST_SIZE); + sg_init_one(&sg, hmac_buf, SM3_BLOCK_SIZE + SM3_DIGEST_SIZE); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SM3; + hmac_cmd.u.sm3.type = sm3->type; + hmac_cmd.u.sm3.ctx = sm3->ctx; + hmac_cmd.u.sm3.ctx_len = sm3->ctx_len; + hmac_cmd.u.sm3.src = &sg; + hmac_cmd.u.sm3.src_len = SM3_BLOCK_SIZE + SM3_DIGEST_SIZE; + hmac_cmd.u.sm3.opad = NULL; + hmac_cmd.u.sm3.opad_len = 0; + hmac_cmd.u.sm3.first = 1; + hmac_cmd.u.sm3.final = 1; + hmac_cmd.u.sm3.msg_bits = + (SM3_BLOCK_SIZE + SM3_DIGEST_SIZE) << 3; + + ret = ccp_run_sm3_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } else { + ccp_get_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2611,6 +2765,9 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM2: ret = ccp_run_sm2_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM3: + ret = ccp_run_sm3_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index bd947cb8d41f..cda875cf3c71 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -634,6 +634,47 @@ struct ccp_sm2_engine { u32 dst_len; }; +/***** SM3 engine *****/ +/** + * ccp_sm3_type - type of SM3 operation + * + * @CCP_SM3_TYPE_256: SM3 operation + */ +enum ccp_sm3_type { + CCP_SM3_TYPE_256 = 2, + CCP_SM3_TYPE__LAST, +}; + +/** + * struct ccp_sm3_engine - CCP SM3 operation + * @type: Type of SM3 operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SM3 operation + * @final: indicates final SM3 operation + * @msg_bits: total length of the message in bits used in final SM3 operation + */ +struct ccp_sm3_engine { + enum ccp_sm3_type type; + + struct scatterlist *ctx; + u32 ctx_len; + + struct scatterlist *src; + u64 src_len; + + struct scatterlist *opad; + u32 opad_len; + + u32 first; + u32 final; + u64 msg_bits; +}; + /** * ccp_engine - CCP operation identifiers * @@ -646,6 +687,7 @@ struct ccp_sm2_engine { * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation * @CCP_ENGINE_SM2: SM2 operation + * @CCP_ENGINE_SM3: SM3 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -657,6 +699,7 @@ enum ccp_engine { CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, CCP_ENGINE_SM2 = 8, /* fixed value */ + CCP_ENGINE_SM3, CCP_ENGINE__LAST, }; @@ -706,6 +749,7 @@ struct ccp_cmd { struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; struct ccp_sm2_engine sm2; + struct ccp_sm3_engine sm3; } u; /* Completion callback support */ From d888a27f78088b6e6859fd80e907a41953421911 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 18:25:26 +0800 Subject: [PATCH 75/99] crypto: ccp: Support SM4 algorithm for hygon ccp. hygon inclusion category: feature -------------------------------- In order to add SM4 driver for hygon ccp, relating to sm4 mode of ecb/ecb_hs, cbc/cbc_hs, cfb, ofb and ctr Signed-off-by: Yabin Li Signed-off-by: yangdepei [ add crypto/internal/skcipher.h to ccp-crypto-sm4-hygon.c include] (cherry picked from commit 88ae1ee43818bbd460e6bbbc9e1b52fd71bbba77) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-crypto-main.c | 4 + drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 326 ++++++++++++++++++++++ drivers/crypto/ccp/ccp-crypto.h | 17 ++ drivers/crypto/ccp/ccp-dev-v5.c | 127 ++++++++- drivers/crypto/ccp/ccp-dev.h | 18 ++ drivers/crypto/ccp/ccp-ops.c | 230 +++++++++++++++ include/linux/ccp.h | 114 ++++++++ 8 files changed, 837 insertions(+), 2 deletions(-) create mode 100644 drivers/crypto/ccp/ccp-crypto-sm4-hygon.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 13231ba2da5a..7b803028ceaf 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -35,4 +35,5 @@ ccp-crypto-objs := ccp-crypto-main.o \ obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ - ccp-crypto-sm3-hygon.o + ccp-crypto-sm3-hygon.o \ + ccp-crypto-sm4-hygon.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index 938231fa295c..f3a6ce7a9754 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -339,6 +339,10 @@ static int ccp_register_algs(void) if (ret) return ret; + ret = ccp_register_sm4_hygon_algs(&skcipher_algs); + if (ret) + return ret; + /* Return on hygon platform */ return 0; } diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c new file mode 100644 index 000000000000..1c2a31c8d3bf --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +enum ccp_sm4_alg_mode { + CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, + CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, + CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE__LAST, +}; + +static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret) +{ + struct skcipher_request *req = skcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + + if (ret) + return ret; + + if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE); + memset(rctx->iv, 0, SM4_BLOCK_SIZE); + } + + return 0; +} + +static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + /* key_len is checked by crypto_ablkcipher_type, + * but key isn't checked + */ + if (!key) + return -EINVAL; + + memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + + ctx->u.sm4.key_len = SM4_KEY_SIZE; + + return 0; +} + +static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + struct ccp_cmd *cmd = NULL; + enum ccp_sm4_alg_mode mode; + enum ccp_sm4_action action; + int ret; + + if (!ctx->u.sm4.key_len) + return -ENOKEY; + + mode = ctx->u.sm4.mode; + if ((mode != CCP_SM4_ALG_MODE_CTR) && + (mode != CCP_SM4_ALG_MODE_OFB) && + (mode != CCP_SM4_ALG_MODE_CFB) && + (req->cryptlen & (SM4_BLOCK_SIZE - 1))) + return -EINVAL; + + if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + if (!req->iv) + return -EINVAL; + + memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE); + } + + cmd = &rctx->cmd; + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->entry); + action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT; + if (mode == CCP_SM4_ALG_MODE_CTR) { + cmd->engine = CCP_ENGINE_SM4_CTR; + cmd->u.sm4_ctr.action = action; + cmd->u.sm4_ctr.size = 63; + cmd->u.sm4_ctr.step = 1; + + cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg; + cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE; + cmd->u.sm4_ctr.iv = iv_sg; + cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE; + + cmd->u.sm4_ctr.src = req->src; + cmd->u.sm4_ctr.dst = req->dst; + cmd->u.sm4_ctr.src_len = req->cryptlen; + + } else { + cmd->engine = CCP_ENGINE_SM4; + cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK; + cmd->u.sm4.action = action; + if (mode & CCP_SM4_MODE_HS_SEL) + cmd->u.sm4.select = 1; + + cmd->u.sm4.key = &ctx->u.sm4.key_sg; + cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.iv = iv_sg; + cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; + + cmd->u.sm4.src = req->src; + cmd->u.sm4.dst = req->dst; + cmd->u.sm4.src_len = req->cryptlen; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_encrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, true); +} + +static int ccp_sm4_decrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, false); +} + +static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm) +{ + struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->complete = ccp_sm4_complete; + ctx->u.sm4.mode = alg->mode; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static const struct skcipher_alg ccp_sm4_defaults = { + .setkey = ccp_sm4_setkey, + .encrypt = ccp_sm4_encrypt, + .decrypt = ccp_sm4_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .init = ccp_sm4_init_tfm, + + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = SM4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_priority = CCP_CRA_PRIORITY, + .base.cra_module = THIS_MODULE, +}; + +struct ccp_sm4_def { + enum ccp_sm4_alg_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + const struct skcipher_alg *alg_defaults; +}; + +static struct ccp_sm4_def sm4_algs[] = { + { + .mode = CCP_SM4_ALG_MODE_ECB, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_OFB, + .version = CCP_VERSION(5, 0), + .name = "ofb(sm4)", + .driver_name = "ofb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CFB, + .version = CCP_VERSION(5, 0), + .name = "cfb(sm4)", + .driver_name = "cfb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CTR, + .version = CCP_VERSION(5, 0), + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccp", + .blocksize = 1, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, +}; + +static int ccp_register_sm4_hygon_alg(struct list_head *head, + const struct ccp_sm4_def *def) +{ + struct ccp_crypto_skcipher_alg *ccp_alg; + struct skcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->ivsize = def->ivsize; + + ret = crypto_register_skcipher(alg); + if (ret) { + pr_err("%s skcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (sm4_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index 33e54fcbca53..58b2950f9100 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -342,6 +342,21 @@ struct ccp_sm3_exp_ctx { u8 buf[SM3_BLOCK_SIZE]; }; +/***** SM4 related defines *****/ +struct ccp_sm4_ctx { + struct scatterlist key_sg; + u8 key[SM4_KEY_SIZE]; + u32 key_len; + u32 mode; +}; + +struct ccp_sm4_req_ctx { + struct scatterlist iv_sg; + u8 iv[SM4_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -353,6 +368,7 @@ struct ccp_ctx { struct ccp_des3_ctx des3; struct ccp_sm2_ctx sm2; struct ccp_sm3_ctx sm3; + struct ccp_sm4_ctx sm4; } u; }; @@ -370,5 +386,6 @@ int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); int ccp_register_sm2_hygon_algs(struct list_head *head); int ccp_register_sm3_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7038be74bbb6..08c8d72aaf79 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -141,6 +141,18 @@ union ccp_function { u16 type:4; u16 rsvd2:1; } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; u16 raw; }; @@ -164,6 +176,12 @@ union ccp_function { #define CCP_SM2_RAND(p) ((p)->sm2.rand) #define CCP_SM2_MODE(p) ((p)->sm2.mode) #define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -672,6 +690,90 @@ static int ccp5_perform_sm3(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -1150,6 +1252,26 @@ static void ccp5_destroy(struct ccp_device *ccp) } } +static int ccp5_get_trng_mask_param(void) +{ + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + +#ifdef CONFIG_HYGON_GM + /* for sm4 HS */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 16; +#endif + + /* for AES HS */ + return 12; +} + static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1160,12 +1282,13 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; + int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < 12; i++) { + for (i = 0; i < len; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1193,6 +1316,8 @@ static const struct ccp_actions ccp5_actions = { .ecc = ccp5_perform_ecc, .sm2 = ccp5_perform_sm2, .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 2d6c4c404539..92b859dae7c6 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -336,6 +336,8 @@ struct ccp_cmd_queue { unsigned long total_ecc_ops; unsigned long total_sm2_ops; unsigned long total_sm3_ops; + unsigned long total_sm4_ops; + unsigned long total_sm4_ctr_ops; } ____cacheline_aligned; struct ccp_device { @@ -540,6 +542,18 @@ struct ccp_sm3_op { u64 msg_bits; }; +struct ccp_sm4_op { + enum ccp_sm4_action action; + enum ccp_sm4_mode mode; + u32 select; +}; + +struct ccp_sm4_ctr_op { + u32 size; + enum ccp_sm4_action action; + u32 step; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -565,6 +579,8 @@ struct ccp_op { struct ccp_ecc_op ecc; struct ccp_sm2_op sm2; struct ccp_sm3_op sm3; + struct ccp_sm4_op sm4; + struct ccp_sm4_ctr_op sm4_ctr; } u; }; @@ -675,6 +691,8 @@ struct ccp_actions { int (*ecc)(struct ccp_op *); int (*sm2)(struct ccp_op *op); int (*sm3)(struct ccp_op *op); + int (*sm4)(struct ccp_op *op); + int (*sm4_ctr)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 57da09d6cd3c..4babb5f89879 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2718,6 +2718,230 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) return ret; } +static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_engine *sm4 = &cmd->u.sm4; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4->src == NULL || sm4->dst == NULL) + return -EINVAL; + + if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4.action = sm4->action; + op.u.sm4.mode = sm4->mode; + op.u.sm4.select = sm4->select; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4->src) == sg_virt(sm4->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4 engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_process_data(&src, &dst, &op); + } + + if (sm4->mode != CCP_SM4_MODE_ECB) { + /* retrieve the SM4 iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + } + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL) + return -EINVAL; + + if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4_ctr.size = sm4_ctr->size; + op.u.sm4_ctr.action = sm4_ctr->action; + op.u.sm4_ctr.step = sm4_ctr->step; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst, + sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4_CTR engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_process_data(&src, &dst, &op); + } + + /* retrieve the SM4_CTR iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2768,6 +2992,12 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_SM3: ret = ccp_run_sm3_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM4: + ret = ccp_run_sm4_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4_CTR: + ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/include/linux/ccp.h b/include/linux/ccp.h index cda875cf3c71..8e34f05bc6b1 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -675,6 +675,116 @@ struct ccp_sm3_engine { u64 msg_bits; }; +/***** SM4 engine *****/ +#define SM4_BLOCK_SIZE 16 +#define SM4_KEY_SIZE 16 +#define CCP_SM4_MODE_MASK 0x0F +#define CCP_SM4_MODE_HS_SEL 0x10 + +/** + * ccp_sm4_mode - SM4 operation mode + * + * @CCP_SM4_MODE_ECB: ECB mode + * @CCP_SM4_MODE_CBC: CBC mode + * @CCP_SM4_MODE_OFB: OFB mode + * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_CTR: CTR mode + */ +enum ccp_sm4_mode { + CCP_SM4_MODE_ECB = 0, + CCP_SM4_MODE_CBC, + CCP_SM4_MODE_OFB, + CCP_SM4_MODE_CFB, + CCP_SM4_MODE_CTR, + CCP_SM4_MODE__LAST, +}; + +/** + * ccp_sm4_action - SM4 operation + * + * @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation + * @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation + */ +enum ccp_sm4_action { + CCP_SM4_ACTION_DECRYPT = 0, + CCP_SM4_ACTION_ENCRYPT, + CCP_SM4_ACTION__LAST, +}; + +/** + * struct ccp_sm4_engine - CCP SM4 operation + * @mode: SM4 operation mode + * @action: SM4 operation (decrypt/encrypt) + * @select: Indicating that high-secure engine is selected + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - mode, action, select, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - key_len and iv_len must be 16B + * - src_len must be multiple of 16B + * - high-secure engine only for ECB and CBC mode + * + * The iv variable is used as both input and output. On completion of the + * SM4 operation the new IV overwrites the old IV. + */ +struct ccp_sm4_engine { + enum ccp_sm4_mode mode; + enum ccp_sm4_action action; + u32 select; /* Indicating that high-secure engine is selected */ + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** SM4_CTR engine *****/ +/** + * struct ccp_sm4_ctr_engine - CCP SM4_CTR operation + * @action: SM4_CTR operation (decrypt/encrypt) + * @size: counter bit size + * @step: counter increase step + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, size, step, key, key_len, iv, iv_len, src, dst, src_len + * - key_len and iv_len must be 16B + * + * The iv variable is used as both input and output. On completion of the + * SM4_CTR operation the new IV overwrites the old IV. + */ +struct ccp_sm4_ctr_engine { + enum ccp_sm4_action action; + u32 size; + u32 step; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + /** * ccp_engine - CCP operation identifiers * @@ -700,6 +810,8 @@ enum ccp_engine { CCP_ENGINE_ECC, CCP_ENGINE_SM2 = 8, /* fixed value */ CCP_ENGINE_SM3, + CCP_ENGINE_SM4, + CCP_ENGINE_SM4_CTR, CCP_ENGINE__LAST, }; @@ -750,6 +862,8 @@ struct ccp_cmd { struct ccp_ecc_engine ecc; struct ccp_sm2_engine sm2; struct ccp_sm3_engine sm3; + struct ccp_sm4_engine sm4; + struct ccp_sm4_ctr_engine sm4_ctr; } u; /* Completion callback support */ From 27eb8c02037cc05b97dceda799d474cf1458df37 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Tue, 19 Mar 2024 20:13:22 +0800 Subject: [PATCH 76/99] crypto: ccp: fix sm2 not return due to wrong complete callback parameter hygon inclusion category: bugfix -------------------------------- the complete callback 'crypto_req_done' has changed its input parameter, we need update input in ccp-crypto implement. Fixes: acafe30ff58a ("crypto: ccp: Support SM2 algorithm for hygon ccp.") Signed-off-by: yangdepei (cherry picked from commit 9ced14936ea7f49c0bc907b627497ad10dcbab07) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index b1662953f541..52693c2a068b 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -646,7 +646,7 @@ static void ccp_sm2_enc_compute(struct work_struct *work) crypto_free_shash(shash); e_complete: - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static void ccp_sm2_enc_lp(struct work_struct *work) @@ -672,7 +672,7 @@ static void ccp_sm2_enc_lp(struct work_struct *work) ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); if (ret != -EBUSY && ret != -EINPROGRESS) - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static int ccp_sm2_encrypt(struct akcipher_request *req) @@ -749,7 +749,7 @@ static void ccp_sm2_dec_compute(struct work_struct *work) /* clear private key, plain, and dC1 */ memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); memset(dst, 0, CCP_SM2_DST_SIZE); - req->base.complete(&req->base, ret); + req->base.complete(req->base.data, ret); } static int ccp_sm2_decrypt(struct akcipher_request *req) From 9da05b591295126af4f158acf443cad17c17e947 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 20:02:26 +0800 Subject: [PATCH 77/99] crypto: ccp: It prompt ILLEGAL_MEM_ADDR when using PSPCCP. hygon inclusion category: bugfix -------------------------------- ccp_find_lsb_regions check from vq_1 but status value start from vq_0. Fixes: 4b394a232df7 ("crypto: ccp - Let a v5 CCP provide the same function as v3") Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit 3eb66e32a898025426845435063d3409dda53a07) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-dev-v5.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 08c8d72aaf79..2fc4f08698df 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -783,6 +783,7 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ + status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); From 47bd25cbb300a2eb01c257a9b9c38f5127b90908 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sat, 7 May 2022 20:40:53 +0800 Subject: [PATCH 78/99] crypto: ccp: Only handle interrupts by completion. hygon inclusion category: bugfix -------------------------------- fix the repetitive interrupt (INT_COMPLETION & INT_EMPTY_QUEUE) in one cmd process. Fixes: 6263b51eb319 ("crypto: ccp - Change ISR handler method for a v5 CCP") Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit e0c92f7dfa4d9c9ababd02d7020ca9b1b2b24ffe) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-dev-v5.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2fc4f08698df..2179da3c9483 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -935,7 +935,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status) { + if (status & SUPPORTED_INTERRUPTS) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); From 580f2b84bc67d48f1d3ecc8849006102171715b8 Mon Sep 17 00:00:00 2001 From: Xiangyu Xu Date: Mon, 22 Aug 2022 10:47:25 +0800 Subject: [PATCH 79/99] crypto: ccp: Fix a problem that vq thread may stuck when do multi process test. hygon inclusion category: bugfix -------------------------------- we shuld clear interrupt status before set int_revd flag, otherwise, it will cause vq thread stuck when process multi command. Fixes: 4b394a232df7 ("crypto: ccp - Let a v5 CCP provide the same function as v3") Signed-off-by: Xiangyu Xu Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit bbc1b5733464a248d54a071ac9cee5c4393b6a8b) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-dev-v5.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 2179da3c9483..0a304b0ce99d 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -944,10 +944,9 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - cmd_q->int_rcvd = 1; - /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } From 96fc3a58b0172070a706b8fdc0f3fe48b9b35419 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Fri, 17 Nov 2023 16:21:57 +0800 Subject: [PATCH 80/99] crypto: ccp: fix sm2 test failed in testmgr because of missing DER coding hygon inclusion category: bugfix -------------------------------- Add DER coding support for ccp sm2 sign interface. Fixes: acafe30ff58a ("crypto: ccp: Support SM2 algorithm for hygon ccp.") Signed-off-by: liulanyi Signed-off-by: yangdepei (cherry picked from commit 10a3ac385b683f112e9a7d1a14543efed56f0524) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Makefile | 6 +- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 120 ++++++++++++++++++++-- drivers/crypto/ccp/ccp_sm2_sign.asn1 | 4 + 3 files changed, 121 insertions(+), 9 deletions(-) create mode 100644 drivers/crypto/ccp/ccp_sm2_sign.asn1 diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 7b803028ceaf..9c3e29729ee0 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -34,6 +34,10 @@ ccp-crypto-objs := ccp-crypto-main.o \ obj-$(CONFIG_TDM_KERNEL_GUARD) += hygon/tdm-kernel-guard.o +$(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h +$(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h + ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ ccp-crypto-sm3-hygon.o \ - ccp-crypto-sm4-hygon.o + ccp-crypto-sm4-hygon.o \ + ccp_sm2_sign.asn1.o diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index 52693c2a068b..675f4c58eb7d 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -19,6 +19,7 @@ #include #include "ccp-crypto.h" +#include "ccp_sm2_sign.asn1.h" static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, @@ -101,6 +102,47 @@ struct ccp_sm2_dst { u8 result_t[CCP_SM2_OPERAND_LEN]; }; +struct sm2_signature_ctx { + const u8 *sig_r; + const u8 *sig_s; + size_t r_len; + size_t s_len; +}; + +int ccp_sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_r = value; + sig->r_len = vlen; + + if (!sig->sig_r) + return -ENOMEM; + + return 0; +} + +int ccp_sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_s = value; + sig->s_len = vlen; + + if (!sig->sig_s) + return -ENOMEM; + + return 0; +} + static bool ccp_sm2_is_zero(const u64 *data, u32 count) { u32 i; @@ -450,11 +492,21 @@ static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; struct ccp_sm2_req_ctx *rctx = NULL; + const unsigned char *cflag = (const unsigned char *)key; int ret; - if (!key || keylen != CCP_SM2_PUBLIC_KEY_LEN) + if (!key || keylen < CCP_SM2_PUBLIC_KEY_LEN) return -EINVAL; + /* When the length of sm2 public key is 65, + * content of key should be 04 || X || Y, from GM/T0009-2012. + */ + if (keylen > CCP_SM2_PUBLIC_KEY_LEN) { + if (*cflag != 0x04) + return -EINVAL; + key = key + 1; + } + /* check whether public key is valid */ rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); if (!rctx) @@ -831,21 +883,71 @@ static int ccp_sm2_verify(struct akcipher_request *req) struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int siglen; int nents; int ret; + struct sm2_signature_ctx sig; + unsigned char *buffer; if (!ctx->u.sm2.pub_key_len) return -ENOKEY; - if (req->src_len != CCP_SM2_OPERAND_LEN * 3) - return -EINVAL; + if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with non-encoded signature from user space */ + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; - nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); - if (nents < 0) - return -EINVAL; + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; + } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with usage like sm2 test of testmgr */ + siglen = req->src_len; + if (req->dst_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + } else { + /* deal with der encoding signature from user space */ + siglen = req->src_len - CCP_SM2_OPERAND_LEN; + } + + buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), + buffer, siglen + CCP_SM2_OPERAND_LEN, 0); + + sig.sig_r = NULL; + sig.sig_s = NULL; + ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, + buffer, siglen); + + if (ret) + goto error; + + memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); + + if (sig.r_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); + + if (sig.s_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); - scatterwalk_map_and_copy(src->operand_e, req->src, 0, - CCP_SM2_OPERAND_LEN * 3, 0); memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); @@ -855,6 +957,8 @@ static int ccp_sm2_verify(struct akcipher_request *req) ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, CCP_SM2_MODE_VERIFY, 0); +error: + kfree(buffer); return ret; } diff --git a/drivers/crypto/ccp/ccp_sm2_sign.asn1 b/drivers/crypto/ccp/ccp_sm2_sign.asn1 new file mode 100644 index 000000000000..7e83e6799cb4 --- /dev/null +++ b/drivers/crypto/ccp/ccp_sm2_sign.asn1 @@ -0,0 +1,4 @@ +Sm2Signature ::= SEQUENCE { + sig_r INTEGER ({ ccp_sm2_get_signature_r }), + sig_s INTEGER ({ ccp_sm2_get_signature_s }) +} From 794d6e5e448bcfbee16c422e98f30f4566565fef Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Tue, 23 Dec 2025 15:53:12 +0800 Subject: [PATCH 81/99] crypto: hygon - Drop sign/verify operations See this commit: commit 5b553e06b3215fa97d222ebddc2bc964f1824c5b Author: Lukas Wunner Date: Tue Sep 10 16:30:19 2024 +0200 crypto: virtio - Drop sign/verify operations The virtio crypto driver exposes akcipher sign/verify operations in a user space ABI. This blocks removal of sign/verify from akcipher_alg. Herbert opines: "I would say that this is something that we can break. Breaking it is no different to running virtio on a host that does not support these algorithms. After all, a software implementation must always be present. I deliberately left akcipher out of crypto_user because the API is still in flux. We should not let virtio constrain ourselves." https://lore.kernel.org/all/ZtqoNAgcnXnrYhZZ@gondor.apana.org.au/ "I would remove virtio akcipher support in its entirety. This API was never meant to be exposed outside of the kernel." https://lore.kernel.org/all/Ztqql_gqgZiMW8zz@gondor.apana.org.au/ Drop sign/verify support from virtio crypto. There's no strong reason to also remove encrypt/decrypt support, so keep it. A key selling point of virtio crypto is to allow guest access to crypto accelerators on the host. So far the only akcipher algorithm supported by virtio crypto is RSA. Dropping sign/verify merely means that the PKCS#1 padding is now always generated or verified inside the guest, but the actual signature generation/verification (which is an RSA decrypt/encrypt operation) may still use an accelerator on the host. Generating or verifying the PKCS#1 padding is cheap, so a hardware accelerator won't be of much help there. Which begs the question whether virtio crypto support for sign/verify makes sense at all. It would make sense for the sign operation if the host has a security chip to store asymmetric private keys. But the kernel doesn't even have an asymmetric_key_subtype yet for hardware-based private keys. There's at least one rudimentary driver for such chips (atmel-ecc.c for ATECC508A), but it doesn't implement the sign operation. The kernel would first have to grow support for a hardware asymmetric_key_subtype and at least one driver implementing the sign operation before exposure to guests via virtio makes sense. Signed-off-by: Lukas Wunner Signed-off-by: Herbert Xu Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 118 ---------------------- 1 file changed, 118 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index 675f4c58eb7d..efa9d46876c1 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -846,122 +846,6 @@ static int ccp_sm2_decrypt(struct akcipher_request *req) return ret; } -static int ccp_sm2_sign(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); - struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); - struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; - int nents; - int ret; - - if (!ctx->u.sm2.pri_key_len) - return -ENOKEY; - - if (req->src_len != CCP_SM2_OPERAND_LEN) - return -EINVAL; - - nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); - if (nents < 0) - return -EINVAL; - - scatterwalk_map_and_copy(src->operand_e, req->src, 0, - CCP_SM2_OPERAND_LEN, 0); - memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); - - rctx->req = req; - rctx->phase = CCP_SM2_SIGN_PH_SIGN; - ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, - CCP_SM2_MODE_SIGN, 1); - - return ret; -} - -static int ccp_sm2_verify(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); - struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); - struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; - int siglen; - int nents; - int ret; - struct sm2_signature_ctx sig; - unsigned char *buffer; - - if (!ctx->u.sm2.pub_key_len) - return -ENOKEY; - - if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { - /* Compatible with non-encoded signature from user space */ - nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); - if (nents < 0) - return -EINVAL; - - scatterwalk_map_and_copy(src->operand_e, req->src, 0, - CCP_SM2_OPERAND_LEN * 3, 0); - memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); - memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, - CCP_SM2_OPERAND_LEN); - - rctx->req = req; - rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; - ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, - CCP_SM2_MODE_VERIFY, 0); - - return ret; - } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { - /* Compatible with usage like sm2 test of testmgr */ - siglen = req->src_len; - if (req->dst_len != CCP_SM2_OPERAND_LEN) - return -EINVAL; - } else { - /* deal with der encoding signature from user space */ - siglen = req->src_len - CCP_SM2_OPERAND_LEN; - } - - buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - sg_pcopy_to_buffer(req->src, - sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), - buffer, siglen + CCP_SM2_OPERAND_LEN, 0); - - sig.sig_r = NULL; - sig.sig_s = NULL; - ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, - buffer, siglen); - - if (ret) - goto error; - - memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); - - if (sig.r_len > CCP_SM2_OPERAND_LEN) - memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); - else - memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); - - if (sig.s_len > CCP_SM2_OPERAND_LEN) - memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); - else - memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); - - memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); - memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, - CCP_SM2_OPERAND_LEN); - - rctx->req = req; - rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; - ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, - CCP_SM2_MODE_VERIFY, 0); - -error: - kfree(buffer); - return ret; -} - static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) { struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; @@ -1075,8 +959,6 @@ static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) } static struct akcipher_alg ccp_sm2_defaults = { - .sign = ccp_sm2_sign, - .verify = ccp_sm2_verify, .encrypt = ccp_sm2_encrypt, .decrypt = ccp_sm2_decrypt, .set_pub_key = ccp_sm2_setpubkey, From c876f3350abbd58b52d65e5849711d6947b99538 Mon Sep 17 00:00:00 2001 From: yangdepei Date: Mon, 18 Mar 2024 14:53:46 +0800 Subject: [PATCH 82/99] crypto: ccp: fix bug that SM2 encryption of long data causes kernel crash hygon inclusion category: bugfix -------------------------------- long data sm2 encryption may cause out of bounds memory access Fixes: acafe30ff58a ("crypto: ccp: Support SM2 algorithm for hygon ccp.") Signed-off-by: liulanyi Signed-off-by: yangdepei (cherry picked from commit 11d188b8bea43301f3877fd90264178c8de0a95a) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-crypto-sm2-hygon.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c index efa9d46876c1..d6dc5964e17e 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -671,8 +671,6 @@ static void ccp_sm2_enc_compute(struct work_struct *work) goto e_complete; } - scatterwalk_map_and_copy(rctx->src, req->src, 0, req->src_len, 0); - /* C2 = M ^ t */ ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, req->dst, CCP_SM2_ENCRYPT_EXT_LEN, From 5325ab11246c4109c2499323d0e2f55ff37fa459 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sun, 8 May 2022 14:19:29 +0800 Subject: [PATCH 83/99] crypto: ccp: Modify value of COMMANDS_PER_QUEUE from 16 to 8192. hygon inclusion category: feature -------------------------------- change command queue size to 8192 to support multipule cmd in hygon ccp Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit 79c6196b5ec40e4ce0fe749efb41d52747660d40) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-dev-v5.c | 32 ++++++++++++++++++++++++-------- drivers/crypto/ccp/ccp-dev.h | 11 +++++++---- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 0a304b0ce99d..62e07c9eb793 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -227,6 +227,17 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) +static inline unsigned int command_per_queue(void) +{ +#ifdef CONFIG_HYGON_GM + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + HYGON_COMMANDS_PER_QUEUE : + COMMANDS_PER_QUEUE; +#else + return COMMANDS_PER_QUEUE; +#endif +} + static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -240,15 +251,16 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start; + u32 head_lo, queue_start, command_per_q; + command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + n = head_idx + command_per_q - cmd_q->qidx - 1; - return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ + return n % command_per_q; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -256,10 +268,11 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail; + u32 tail, command_per_q; int i; int ret = 0; + command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -273,7 +286,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; /* The data used by this command must be flushed to memory */ wmb(); @@ -974,7 +987,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi; + u32 status_lo, status_hi, command_per_q, queue_size_val; int ret; /* Find available queues */ @@ -991,6 +1004,9 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } + command_per_q = command_per_queue(); + queue_size_val = QUEUE_SIZE_VAL(command_per_q); + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -1017,7 +1033,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -1104,7 +1120,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 92b859dae7c6..5dec502f3c5d 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -99,12 +99,15 @@ #define CMD5_Q_MEM_LOCATION 0x4 #define CMD5_Q_SIZE 0x1F #define CMD5_Q_SHIFT 3 + #define COMMANDS_PER_QUEUE 16 -#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ - CMD5_Q_SIZE) -#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) +#define HYGON_COMMANDS_PER_QUEUE 8192 + #define Q_DESC_SIZE sizeof(struct ccp5_desc) -#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) +#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) +#define Q_SIZE(c, n) ((c)*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 From d78f86cda1ae0868c1483d217667fe08ef4c042e Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Sun, 8 May 2022 18:57:08 +0800 Subject: [PATCH 84/99] crypto: ccp: Process multiple VQ commands once for SM3 ccp. hygon inclusion category: feature -------------------------------- optimize sm3 processing performance, the physical page of each sg list corresponds to a CCP cmd, all cmd prepared, then start ccp. Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit 7f18fe7cccf8a6b66a3fd4fcfc195c079fa59096) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-dev-v5.c | 73 ++++++++++++++++++++++++++++++++- drivers/crypto/ccp/ccp-dev.h | 1 + drivers/crypto/ccp/ccp-ops.c | 19 +++++++++ 3 files changed, 92 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 62e07c9eb793..b14d18162ebc 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -263,6 +263,76 @@ static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) return n % command_per_q; /* Always one unused spot */ } +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + u32 command_per_q; + + command_per_q = command_per_queue(); + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { @@ -700,7 +770,7 @@ static int ccp5_perform_sm3(struct ccp_op *op) CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); } - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp5_perform_sm4(struct ccp_op *op) @@ -1334,6 +1404,7 @@ static const struct ccp_actions ccp5_actions = { .sm3 = ccp5_perform_sm3, .sm4 = ccp5_perform_sm4, .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 5dec502f3c5d..e1aa68f4044c 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -696,6 +696,7 @@ struct ccp_actions { int (*sm3)(struct ccp_op *op); int (*sm4)(struct ccp_op *op); int (*sm4_ctr)(struct ccp_op *op); + int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 4babb5f89879..462d4a7beb16 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2634,12 +2634,25 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left && sm3->final) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm3(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + ccp_process_data(&src, NULL, &op); } } else { @@ -2659,6 +2672,12 @@ static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) cmd->engine_error = cmd_q->cmd_error; goto e_data; } + + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } } ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, From cea3dd33b0a3d94423c1edb13c106cfc7829d576 Mon Sep 17 00:00:00 2001 From: Yabin Li Date: Mon, 9 May 2022 07:02:32 +0800 Subject: [PATCH 85/99] crypto: ccp: Process multiple VQ commands once for SM4/SM4-CTR ccp. hygon inclusion category: feature -------------------------------- optimize sm4 processing performance by starting ccp only after all cmd has been prepared Signed-off-by: Yabin Li Signed-off-by: yangdepei (cherry picked from commit bca09bd8f79fde5d06a1484eb7f584831a0d7416) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-dev-v5.c | 4 ++-- drivers/crypto/ccp/ccp-ops.c | 26 ++++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index b14d18162ebc..e5c129c3e049 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -812,7 +812,7 @@ static int ccp5_perform_sm4(struct ccp_op *op) CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp5_perform_sm4_ctr(struct ccp_op *op) @@ -854,7 +854,7 @@ static int ccp5_perform_sm4_ctr(struct ccp_op *op) CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - return ccp5_do_cmd(&desc, op->cmd_q); + return ccp5_do_multi_cmds(&desc, op->cmd_q); } static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 462d4a7beb16..4f31e8e6a277 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2817,12 +2817,25 @@ static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm4(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_iv_key; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + ccp_process_data(&src, &dst, &op); } @@ -2928,12 +2941,25 @@ static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) if (!src.sg_wa.bytes_left) op.eom = 1; + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_iv_key; } + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + ccp_process_data(&src, &dst, &op); } From 987bffea24acbb79cbcfcd695099d12cb1ab45fb Mon Sep 17 00:00:00 2001 From: yangdepei Date: Mon, 8 Apr 2024 17:09:39 +0800 Subject: [PATCH 86/99] crypto: ccp: remove repeated sm4-hs mode hygon inclusion category: bugfix -------------------------------- remove the repeated sm4-hs mode definition, otherwise, it will caused ccp-crypto module load err in the following version of kernel-6.6, eg. 6.6.20 Fixes: 474d2ff69261 ("crypto: ccp: Support SM4 algorithm for hygon ccp.") Signed-off-by: yangdepei (cherry picked from commit 1ebe003a2573c89bc9a6ef1c4567ebaf595d68ab) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/ccp-crypto-sm4-hygon.c | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c index 1c2a31c8d3bf..1c6be927f7b4 100644 --- a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -207,15 +207,6 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = 0, .alg_defaults = &ccp_sm4_defaults, }, - { - .mode = CCP_SM4_ALG_MODE_ECB_HS, - .version = CCP_VERSION(5, 0), - .name = "ecb(sm4)", - .driver_name = "ecb-sm4-hs-ccp", - .blocksize = SM4_BLOCK_SIZE, - .ivsize = 0, - .alg_defaults = &ccp_sm4_defaults, - }, { .mode = CCP_SM4_ALG_MODE_CBC, .version = CCP_VERSION(5, 0), @@ -234,15 +225,6 @@ static struct ccp_sm4_def sm4_algs[] = { .ivsize = SM4_BLOCK_SIZE, .alg_defaults = &ccp_sm4_defaults, }, - { - .mode = CCP_SM4_ALG_MODE_CBC_HS, - .version = CCP_VERSION(5, 0), - .name = "cbc(sm4)", - .driver_name = "cbc-sm4-hs-ccp", - .blocksize = SM4_BLOCK_SIZE, - .ivsize = SM4_BLOCK_SIZE, - .alg_defaults = &ccp_sm4_defaults, - }, { .mode = CCP_SM4_ALG_MODE_OFB, .version = CCP_VERSION(5, 0), From 8224eed249913666c5610a4a9ec825e9efd32fbb Mon Sep 17 00:00:00 2001 From: yangdepei Date: Tue, 16 Apr 2024 20:15:13 +0800 Subject: [PATCH 87/99] crypto: ccp: support sm2 on Hygon generation 4th CPU hygon inclusion category: feature -------------------------------- 1. support sm2 on 4th cpu 2. create new ccp-dev-v5.c file for hygon ccp only 3. restore original ccp-dev-v5.c file Signed-off-by: yangdepei (cherry picked from commit 42d2b65e2c4eddcff4ed58bcb1dc28556f65b3dd) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Kconfig | 1 + drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/ccp-dev-v5.c | 326 +------ drivers/crypto/ccp/ccp-dev.h | 14 +- drivers/crypto/ccp/hygon/ccp-dev-v5.c | 1236 +++++++++++++++++++++++++ drivers/crypto/ccp/hygon/sp-pci.c | 6 +- 6 files changed, 1263 insertions(+), 323 deletions(-) create mode 100644 drivers/crypto/ccp/hygon/ccp-dev-v5.c diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 12c5c729e264..6dcf89f9fb9a 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -50,6 +50,7 @@ config HYGON_GM bool "Hygon GM (sm2/sm3/sm4) Interface" default y depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + select CRYPTO_SM3_GENERIC help Hygon GM ccp driver diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 9c3e29729ee0..c8ae9e64a444 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -5,7 +5,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ ccp-dev-v5.o \ - ccp-dmaengine.o + ccp-dmaengine.o \ + hygon/ccp-dev-v5.o ccp-$(CONFIG_CRYPTO_DEV_CCP_DEBUGFS) += ccp-debugfs.o ccp-$(CONFIG_PCI) += sp-pci.o \ hygon/sp-pci.o diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index e5c129c3e049..7b73332d6aa1 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,28 +131,6 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; - struct { - u16 rand:1; - u16 rsvd:11; - u16 mode:3; - } sm2; - struct { - u16 rsvd:10; - u16 type:4; - u16 rsvd2:1; - } sm3; - struct { - u16 rsvd:7; - u16 encrypt:1; - u16 mode:4; - u16 select:1; - u16 rsvd2:2; - } sm4; - struct { - u16 size:7; - u16 encrypt:1; - u16 step:7; - } sm4_ctr; u16 raw; }; @@ -173,15 +151,6 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) -#define CCP_SM2_RAND(p) ((p)->sm2.rand) -#define CCP_SM2_MODE(p) ((p)->sm2.mode) -#define CCP_SM3_TYPE(p) ((p)->sm3.type) -#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) -#define CCP_SM4_MODE(p) ((p)->sm4.mode) -#define CCP_SM4_SELECT(p) ((p)->sm4.select) -#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) -#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) -#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -217,8 +186,6 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) -#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) -#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -227,17 +194,6 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) -static inline unsigned int command_per_queue(void) -{ -#ifdef CONFIG_HYGON_GM - return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? - HYGON_COMMANDS_PER_QUEUE : - COMMANDS_PER_QUEUE; -#else - return COMMANDS_PER_QUEUE; -#endif -} - static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -251,86 +207,15 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start, command_per_q; + u32 head_lo, queue_start; - command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + command_per_q - cmd_q->qidx - 1; + n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; - return n % command_per_q; /* Always one unused spot */ -} - -static int ccp5_do_multi_cmds(struct ccp5_desc *desc, - struct ccp_cmd_queue *cmd_q) -{ - u32 *mP; - __le32 *dP; - int i; - u32 command_per_q; - - command_per_q = command_per_queue(); - - cmd_q->total_ops++; - - if (CCP5_CMD_SOC(desc)) { - CCP5_CMD_IOC(desc) = 1; - CCP5_CMD_SOC(desc) = 0; - } - - mutex_lock(&cmd_q->q_mutex); - - mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; - dP = (__le32 *) desc; - for (i = 0; i < 8; i++) - mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - - cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; - - mutex_unlock(&cmd_q->q_mutex); - - return 0; -} - -static int ccp5_do_run_cmd(struct ccp_op *op) -{ - struct ccp_cmd_queue *cmd_q = op->cmd_q; - u32 tail; - int ret = 0; - - mutex_lock(&cmd_q->q_mutex); - - /* The data used by this command must be flushed to memory */ - wmb(); - - /* Write the new tail address back to the queue register */ - tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); - iowrite32(tail, cmd_q->reg_tail_lo); - - /* Turn the queue back on using our cached control register */ - iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); - mutex_unlock(&cmd_q->q_mutex); - - if (op->ioc) { - /* Wait for the job to complete */ - ret = wait_event_interruptible(cmd_q->int_queue, - cmd_q->int_rcvd); - if (ret || cmd_q->cmd_error) { - /* Log the error and flush the queue by - * moving the head pointer - */ - if (cmd_q->cmd_error) - ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); - iowrite32(tail, cmd_q->reg_head_lo); - if (!ret) - ret = -EIO; - } - cmd_q->int_rcvd = 0; - } - - return ret; + return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -338,11 +223,10 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail, command_per_q; + u32 tail; int i; int ret = 0; - command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -356,7 +240,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; /* The data used by this command must be flushed to memory */ wmb(); @@ -700,163 +584,6 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } -static int ccp5_perform_sm2(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - struct ccp_dma_info *saddr = &op->src.u.dma; - struct ccp_dma_info *daddr = &op->dst.u.dma; - - op->cmd_q->total_sm2_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; - - CCP5_CMD_SOC(&desc) = 0; - CCP5_CMD_IOC(&desc) = 1; - CCP5_CMD_INIT(&desc) = 1; - CCP5_CMD_EOM(&desc) = 1; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM2_RAND(&function) = op->u.sm2.rand; - CCP_SM2_MODE(&function) = op->u.sm2.mode; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - /* Length of source data must match with mode */ - CCP5_CMD_LEN(&desc) = saddr->length; - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - return ccp5_do_cmd(&desc, op->cmd_q); -} - -static int ccp5_perform_sm3(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - - op->cmd_q->total_sm3_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM3_TYPE(&function) = op->u.sm3.type; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - if (op->eom) { - CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); - CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); - } - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - -static int ccp5_perform_sm4(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; - - op->cmd_q->total_sm4_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; - CCP_SM4_MODE(&function) = op->u.sm4.mode; - CCP_SM4_SELECT(&function) = op->u.sm4.select; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - -static int ccp5_perform_sm4_ctr(struct ccp_op *op) -{ - struct ccp5_desc desc; - union ccp_function function; - u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; - - op->cmd_q->total_sm4_ctr_ops++; - - memset(&desc, 0, Q_DESC_SIZE); - - CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; - - CCP5_CMD_SOC(&desc) = op->soc; - CCP5_CMD_IOC(&desc) = op->ioc; - CCP5_CMD_INIT(&desc) = op->init; - CCP5_CMD_EOM(&desc) = op->eom; - CCP5_CMD_PROT(&desc) = 0; - - function.raw = 0; - CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; - CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; - CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; - CCP5_CMD_FUNCTION(&desc) = function.raw; - - CCP5_CMD_LEN(&desc) = op->src.u.dma.length; - - CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); - CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); - CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; - - CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); - CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); - CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; - - CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); - CCP5_CMD_KEY_HI(&desc) = 0; - CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; - - return ccp5_do_multi_cmds(&desc, op->cmd_q); -} - static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -866,7 +593,6 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ - status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); @@ -1018,7 +744,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status & SUPPORTED_INTERRUPTS) { + if (status) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); @@ -1027,9 +753,10 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + cmd_q->int_rcvd = 1; + /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); - cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } @@ -1057,7 +784,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi, command_per_q, queue_size_val; + u32 status_lo, status_hi; int ret; /* Find available queues */ @@ -1074,9 +801,6 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } - command_per_q = command_per_queue(); - queue_size_val = QUEUE_SIZE_VAL(command_per_q); - for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -1103,7 +827,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -1190,7 +914,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; + cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); @@ -1338,26 +1062,6 @@ static void ccp5_destroy(struct ccp_device *ccp) } } -static int ccp5_get_trng_mask_param(void) -{ - /* According to spec description for SM4 high secure module, - * which need 64 bytes data, so the initialize times of writing - * mask register must be 16 or a multiple of 16. - * - * The AES algorithem need 48 bytes, so the initialize times will - * be 12 or a multiple of 12. - */ - -#ifdef CONFIG_HYGON_GM - /* for sm4 HS */ - if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) - return 16; -#endif - - /* for AES HS */ - return 12; -} - static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1368,13 +1072,12 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; - int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < len; i++) { + for (i = 0; i < 12; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1400,11 +1103,6 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, - .sm2 = ccp5_perform_sm2, - .sm3 = ccp5_perform_sm3, - .sm4 = ccp5_perform_sm4, - .sm4_ctr = ccp5_perform_sm4_ctr, - .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index e1aa68f4044c..46518c80f8ca 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -101,13 +101,12 @@ #define CMD5_Q_SHIFT 3 #define COMMANDS_PER_QUEUE 16 -#define HYGON_COMMANDS_PER_QUEUE 8192 +#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) #define Q_DESC_SIZE sizeof(struct ccp5_desc) - -#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) -#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) -#define Q_SIZE(c, n) ((c)*(n)) +#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 @@ -362,6 +361,9 @@ struct ccp_device { bool use_tasklet; struct tasklet_struct irq_tasklet; + /* This flag mark if the ccp support both sm2 and ecc function */ + uint32_t support_sm2_ecc; + /* I/O area used for device communication. The register mapping * starts at an offset into the mapped bar. * The CMD_REQx registers and the Delete_Cmd_Queue_Job register @@ -709,5 +711,7 @@ extern const struct ccp_vdata ccpv3_platform; extern const struct ccp_vdata ccpv3; extern const struct ccp_vdata ccpv5a; extern const struct ccp_vdata ccpv5b; +extern const struct ccp_vdata ccpv5a_hygon; +extern const struct ccp_vdata ccpv5b_hygon; #endif diff --git a/drivers/crypto/ccp/hygon/ccp-dev-v5.c b/drivers/crypto/ccp/hygon/ccp-dev-v5.c new file mode 100644 index 000000000000..35e9fc5135d0 --- /dev/null +++ b/drivers/crypto/ccp/hygon/ccp-dev-v5.c @@ -0,0 +1,1236 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Secure Processor interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Depei Yang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include + +#include "../ccp-dev.h" + +/* Allocate the requested number of contiguous LSB slots + * from the LSB bitmap. Look in the private range for this + * queue first; failing that, check the public area. + * If no space is available, wait around. + * Return: first slot number + */ +static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) +{ + struct ccp_device *ccp; + int start; + + /* First look at the map for the queue */ + if (cmd_q->lsb >= 0) { + start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, + LSB_SIZE, + 0, count, 0); + if (start < LSB_SIZE) { + bitmap_set(cmd_q->lsbmap, start, count); + return start + cmd_q->lsb * LSB_SIZE; + } + } + + /* No joy; try to get an entry from the shared blocks */ + ccp = cmd_q->ccp; + for (;;) { + mutex_lock(&ccp->sb_mutex); + + start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, + MAX_LSB_CNT * LSB_SIZE, + 0, + count, 0); + if (start <= MAX_LSB_CNT * LSB_SIZE) { + bitmap_set(ccp->lsbmap, start, count); + + mutex_unlock(&ccp->sb_mutex); + return start; + } + + ccp->sb_avail = 0; + + mutex_unlock(&ccp->sb_mutex); + + /* Wait for KSB entries to become available */ + if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) + return 0; + } +} + + +/* Free a number of LSB slots from the bitmap, starting at + * the indicated starting slot number. + */ +static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, + unsigned int count) +{ + if (!start) + return; + + if (cmd_q->lsb == start) { + /* An entry from the private LSB */ + bitmap_clear(cmd_q->lsbmap, start, count); + } else { + /* From the shared LSBs */ + struct ccp_device *ccp = cmd_q->ccp; + + mutex_lock(&ccp->sb_mutex); + bitmap_clear(ccp->lsbmap, start, count); + ccp->sb_avail = 1; + mutex_unlock(&ccp->sb_mutex); + wake_up_interruptible_all(&ccp->sb_queue); + } +} + +/* Hygon CCP version 5: Union to define the function field (cmd_reg1/dword0) */ +union ccp_function { + struct { + u16 byteswap:2; + u16 bitwise:3; + u16 reflect:2; + u16 rsvd:8; + } pt; + struct { + u16 rand:1; + u16 rsvd:10; + u16 mode:3; + u16 ecc_mode:1; + } sm2_ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; + u16 raw; +}; + +#define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) +#define CCP_PT_BITWISE(p) ((p)->pt.bitwise) + +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) + +/* For ccp support both sm2 and ecc */ +#define CCP_SM2_ECC_RAND(p) ((p)->sm2_ecc.rand) +#define CCP_SM2_ECC_MODE(p) ((p)->sm2_ecc.mode) +#define CCP_SM2_ECC_ECC_MODE(p) ((p)->sm2_ecc.ecc_mode) + +#define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) + +/* Word 0 */ +#define CCP5_CMD_DW0(p) ((p)->dw0) +#define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) +#define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) +#define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) +#define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) +#define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) +#define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) +#define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) + +/* Word 1 */ +#define CCP5_CMD_DW1(p) ((p)->length) +#define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) + +/* Word 2 */ +#define CCP5_CMD_DW2(p) ((p)->src_lo) +#define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) + +/* Word 3 */ +#define CCP5_CMD_DW3(p) ((p)->dw3) +#define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) +#define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) +#define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) +#define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) + +/* Words 4/5 */ +#define CCP5_CMD_DW4(p) ((p)->dw4) +#define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) +#define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) +#define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) +#define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) +#define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) + +/* Word 6/7 */ +#define CCP5_CMD_DW6(p) ((p)->key_lo) +#define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) +#define CCP5_CMD_DW7(p) ((p)->dw7) +#define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) +#define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) + +#define CCP5_COMMANDS_PER_QUEUE 8192 +#define CCP5_QUEUE_SIZE_VAL ((ffs(CCP5_COMMANDS_PER_QUEUE) - 2) & \ + CMD5_Q_SIZE) +#define CCP5_Q_PTR_MASK (2 << (CCP5_QUEUE_SIZE_VAL + 5) - 1) +#define CCP5_Q_SIZE(n) (CCP5_COMMANDS_PER_QUEUE * (n)) + +/* indicates whether there is ECC engine for Hygon CCP */ +#define RI_ECC_PRESENT 0x0400 + +/** + * Hygon CCP from 4th generation support both sm2 & ecc, + * but its input content is different from previous version. + * the previous requries only one src buffer which include + * hash + key. Now, hash and key should passed separately. To + * compatible with previous driver, we parse hash and key + * from src buffer which same as previous input + */ +#define SM2_ECC_OPERAND_LEN 32 +#define SM2_ECC_KG_SRC_SIZE 32 +#define SM2_ECC_LP_SRC_SIZE 32 +#define SM2_ECC_SIGN_SRC_SIZE 64 +#define SM2_ECC_VERIFY_SRC_SIZE 96 + +static inline int ccp5_get_keyinfo(struct ccp_op *op, dma_addr_t *kaddr, u32 *slen) +{ + struct ccp_dma_info *sinfo = &op->src.u.dma; + dma_addr_t saddr = sinfo->address + sinfo->offset; + int ret = 0; + + switch (op->u.sm2.mode) { + case CCP_SM2_MODE_SIGN: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_SIGN_SRC_SIZE; + break; + case CCP_SM2_MODE_VERIFY: + *kaddr = saddr + SM2_ECC_VERIFY_SRC_SIZE; + *slen = SM2_ECC_VERIFY_SRC_SIZE; + break; + case CCP_SM2_MODE_KG: + *kaddr = 0; /* unused for KG */ + *slen = SM2_ECC_KG_SRC_SIZE; + break; + case CCP_SM2_MODE_LP: + *kaddr = saddr + SM2_ECC_OPERAND_LEN; + *slen = SM2_ECC_LP_SRC_SIZE; + break; + default: + pr_err("Invalid sm2 operation, mode = %d\n", op->u.sm2.mode); + ret = -EINVAL; + break; + } + + return ret; +} + +static inline u32 low_address(unsigned long addr) +{ + return (u64)addr & 0x0ffffffff; +} + +static inline u32 high_address(unsigned long addr) +{ + return ((u64)addr >> 32) & 0x00000ffff; +} + +static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) +{ + unsigned int head_idx, n; + u32 head_lo, queue_start; + + queue_start = low_address(cmd_q->qdma_tail); + head_lo = ioread32(cmd_q->reg_head_lo); + head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); + + n = head_idx + CCP5_COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + + return n % CCP5_COMMANDS_PER_QUEUE; /* Always one unused spot */ +} + +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_do_cmd(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + __le32 *mP; + u32 *dP; + u32 tail; + int i; + int ret = 0; + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + mutex_lock(&cmd_q->q_mutex); + + mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; + dP = (u32 *)desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % CCP5_COMMANDS_PER_QUEUE; + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (CCP5_CMD_IOC(desc)) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, + cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; +} + +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + dma_addr_t kaddr; + unsigned int slen = saddr->length; + int ret = 0; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + + /* + * ccp support both sm2 and ecc, the rand,mode filed are different + * with previous, and run on ecc or sm2 also should be indicated + */ + if (op->cmd_q->ccp->support_sm2_ecc) { + ret = ccp5_get_keyinfo(op, &kaddr, &slen); + if (ret) + return ret; + + CCP_SM2_ECC_RAND(&function) = op->u.sm2.rand; + CCP_SM2_ECC_MODE(&function) = op->u.sm2.mode; + CCP_SM2_ECC_ECC_MODE(&function) = 0; /* 0: SM2 1: ECC */ + } else { + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + } + + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = slen; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->cmd_q->ccp->support_sm2_ecc && + op->u.sm2.mode != CCP_SM2_MODE_KG) { + CCP5_CMD_KEY_LO(&desc) = low_address(kaddr); + CCP5_CMD_KEY_HI(&desc) = high_address(kaddr); + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_passthru(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + + op->cmd_q->total_pt_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 0; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; + CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data is always 256 bytes */ + if (op->src.type == CCP_MEMTYPE_SYSTEM) + CCP5_CMD_LEN(&desc) = saddr->length; + else + CCP5_CMD_LEN(&desc) = daddr->length; + + if (op->src.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) + CCP5_CMD_LSB_ID(&desc) = op->sb_key; + } else { + u32 key_addr = op->src.u.sb * CCP_SB_BYTES; + + CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_SRC_HI(&desc) = 0; + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; + } + + if (op->dst.type == CCP_MEMTYPE_SYSTEM) { + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + } else { + u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; + + CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_DST_HI(&desc) = 0; + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; + } + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_aes(struct ccp_op *op) +{ + pr_err("AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_xts_aes(struct ccp_op *op) +{ + pr_err("XTS-AES function not implement!"); + return -EPERM; +} + +static int ccp5_perform_sha(struct ccp_op *op) +{ + pr_err("SHA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_des3(struct ccp_op *op) +{ + pr_err("DES3 function not implement!"); + return -EPERM; +} + +static int ccp5_perform_rsa(struct ccp_op *op) +{ + pr_err("RSA function not implement!"); + return -EPERM; +} + +static int ccp5_perform_ecc(struct ccp_op *op) +{ + pr_err("ECC function not implement!"); + return -EPERM; +} + +static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) +{ + int q_mask = 1 << cmd_q->id; + int queues = 0; + int j; + + /* Build a bit mask to know which LSBs this queue has access to. + * Don't bother with segment 0 as it has special privileges. + */ + status >>= LSB_REGION_WIDTH; + for (j = 1; j < MAX_LSB_CNT; j++) { + if (status & q_mask) + bitmap_set(cmd_q->lsbmask, j, 1); + status >>= LSB_REGION_WIDTH; + } + queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", + cmd_q->id, queues); + + return queues ? 0 : -EINVAL; +} + +static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, + int lsb_cnt, int n_lsbs, + unsigned long *lsb_pub) +{ + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int bitno; + int qlsb_wgt; + int i; + + /* For each queue: + * If the count of potential LSBs available to a queue matches the + * ordinal given to us in lsb_cnt: + * Copy the mask of possible LSBs for this queue into "qlsb"; + * For each bit in qlsb, see if the corresponding bit in the + * aggregation mask is set; if so, we have a match. + * If we have a match, clear the bit in the aggregation to + * mark it as no longer available. + * If there is no match, clear the bit in qlsb and keep looking. + */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); + + if (qlsb_wgt == lsb_cnt) { + bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); + + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + if (test_bit(bitno, lsb_pub)) { + /* We found an available LSB + * that this queue can access + */ + cmd_q->lsb = bitno; + bitmap_clear(lsb_pub, bitno, 1); + dev_dbg(ccp->dev, + "Queue %d gets LSB %d\n", + i, bitno); + break; + } + bitmap_clear(qlsb, bitno, 1); + bitno = find_first_bit(qlsb, MAX_LSB_CNT); + } + if (bitno >= MAX_LSB_CNT) + return -EINVAL; + n_lsbs--; + } + } + return n_lsbs; +} + +/* For each queue, from the most- to least-constrained: + * find an LSB that can be assigned to the queue. If there are N queues that + * can only use M LSBs, where N > M, fail; otherwise, every queue will get a + * dedicated LSB. Remaining LSB regions become a shared resource. + * If we have fewer LSBs than queues, all LSB regions become shared resources. + */ +static int ccp_assign_lsbs(struct ccp_device *ccp) +{ + DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); + DECLARE_BITMAP(qlsb, MAX_LSB_CNT); + int n_lsbs = 0; + int bitno; + int i, lsb_cnt; + int rc = 0; + + bitmap_zero(lsb_pub, MAX_LSB_CNT); + + /* Create an aggregate bitmap to get a total count of available LSBs */ + for (i = 0; i < ccp->cmd_q_count; i++) + bitmap_or(lsb_pub, + lsb_pub, ccp->cmd_q[i].lsbmask, + MAX_LSB_CNT); + + n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); + + if (n_lsbs >= ccp->cmd_q_count) { + /* We have enough LSBS to give every queue a private LSB. + * Brute force search to start with the queues that are more + * constrained in LSB choice. When an LSB is privately + * assigned, it is removed from the public mask. + * This is an ugly N squared algorithm with some optimization. + */ + for (lsb_cnt = 1; + n_lsbs && (lsb_cnt <= MAX_LSB_CNT); + lsb_cnt++) { + rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, + lsb_pub); + if (rc < 0) + return -EINVAL; + n_lsbs = rc; + } + } + + rc = 0; + /* What's left of the LSBs, according to the public mask, now become + * shared. Any zero bits in the lsb_pub mask represent an LSB region + * that can't be used as a shared resource, so mark the LSB slots for + * them as "in use". + */ + bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); + + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + while (bitno < MAX_LSB_CNT) { + bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); + bitmap_set(qlsb, bitno, 1); + bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); + } + + return rc; +} + +static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) +{ + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) + iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); +} + +static void ccp5_irq_bh(unsigned long data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + u32 status; + unsigned int i; + + for (i = 0; i < ccp->cmd_q_count; i++) { + struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; + + status = ioread32(cmd_q->reg_interrupt_status); + + if (status & SUPPORTED_INTERRUPTS) { + cmd_q->int_status = status; + cmd_q->q_status = ioread32(cmd_q->reg_status); + cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); + + /* On error, only save the first error value */ + if ((status & INT_ERROR) && !cmd_q->cmd_error) + cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); + + /* Acknowledge the interrupt and wake the kthread */ + iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; + wake_up_interruptible(&cmd_q->int_queue); + } + } + ccp5_enable_queue_interrupts(ccp); +} + +static irqreturn_t ccp5_irq_handler(int irq, void *data) +{ + struct ccp_device *ccp = (struct ccp_device *)data; + + ccp5_disable_queue_interrupts(ccp); + ccp->total_interrupts++; + if (ccp->use_tasklet) + tasklet_schedule(&ccp->irq_tasklet); + else + ccp5_irq_bh((unsigned long)ccp); + return IRQ_HANDLED; +} + +static int ccp5_init(struct ccp_device *ccp) +{ + struct device *dev = ccp->dev; + struct ccp_cmd_queue *cmd_q; + struct dma_pool *dma_pool; + char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; + unsigned int qmr, i; + u64 status; + u32 status_lo, status_hi; + int ret; + + /* Find available queues */ + qmr = ioread32(ccp->io_regs + Q_MASK_REG); + /* + * Check for a access to the registers. If this read returns + * 0xffffffff, it's likely that the system is running a broken + * BIOS which disallows access to the device. Stop here and fail + * the initialization (but not the load, as the PSP could get + * properly initialized). + */ + if (qmr == 0xffffffff) { + dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n"); + return 1; + } + + /* check if ccp support both sm2 and ecc. */ + ccp->support_sm2_ecc = !!(ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION) + & RI_ECC_PRESENT); + + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { + if (!(qmr & (1 << i))) + continue; + + /* Allocate a dma pool for this queue */ + snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", + ccp->name, i); + dma_pool = dma_pool_create(dma_pool_name, dev, + CCP_DMAPOOL_MAX_SIZE, + CCP_DMAPOOL_ALIGN, 0); + if (!dma_pool) { + dev_err(dev, "unable to allocate dma pool\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; + ccp->cmd_q_count++; + + cmd_q->ccp = ccp; + cmd_q->id = i; + cmd_q->dma_pool = dma_pool; + mutex_init(&cmd_q->q_mutex); + + /* Page alignment satisfies our needs for N <= 128 */ + BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); + cmd_q->qsize = CCP5_Q_SIZE(Q_DESC_SIZE); + cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, + &cmd_q->qbase_dma, + GFP_KERNEL); + if (!cmd_q->qbase) { + dev_err(dev, "unable to allocate command queue\n"); + ret = -ENOMEM; + goto e_pool; + } + + cmd_q->qidx = 0; + /* Preset some register values and masks that are queue + * number dependent + */ + cmd_q->reg_control = ccp->io_regs + + CMD5_Q_STATUS_INCR * (i + 1); + cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; + cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; + cmd_q->reg_int_enable = cmd_q->reg_control + + CMD5_Q_INT_ENABLE_BASE; + cmd_q->reg_interrupt_status = cmd_q->reg_control + + CMD5_Q_INTERRUPT_STATUS_BASE; + cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; + cmd_q->reg_int_status = cmd_q->reg_control + + CMD5_Q_INT_STATUS_BASE; + cmd_q->reg_dma_status = cmd_q->reg_control + + CMD5_Q_DMA_STATUS_BASE; + cmd_q->reg_dma_read_status = cmd_q->reg_control + + CMD5_Q_DMA_READ_STATUS_BASE; + cmd_q->reg_dma_write_status = cmd_q->reg_control + + CMD5_Q_DMA_WRITE_STATUS_BASE; + + init_waitqueue_head(&cmd_q->int_queue); + + dev_dbg(dev, "queue #%u available\n", i); + } + + if (ccp->cmd_q_count == 0) { + dev_notice(dev, "no command queues available\n"); + ret = 1; + goto e_pool; + } + + /* Turn off the queues and disable interrupts until ready */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol = 0; /* Start with nothing */ + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + } + + dev_dbg(dev, "Requesting an IRQ...\n"); + /* Request an irq */ + ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); + if (ret) { + dev_err(dev, "unable to allocate an IRQ\n"); + goto e_pool; + } + /* Initialize the ISR tasklet */ + if (ccp->use_tasklet) + tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, + (unsigned long)ccp); + + dev_dbg(dev, "Loading LSB map...\n"); + /* Copy the private LSB mask to the public registers */ + status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); + iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); + status = ((u64)status_hi<<30) | (u64)status_lo; + + dev_dbg(dev, "Configuring virtual queues...\n"); + /* Configure size of each virtual queue accessible to host */ + for (i = 0; i < ccp->cmd_q_count; i++) { + u32 dma_addr_lo; + u32 dma_addr_hi; + + cmd_q = &ccp->cmd_q[i]; + + cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); + cmd_q->qcontrol |= CCP5_QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + + cmd_q->qdma_tail = cmd_q->qbase_dma; + dma_addr_lo = low_address(cmd_q->qdma_tail); + iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); + iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); + + dma_addr_hi = high_address(cmd_q->qdma_tail); + cmd_q->qcontrol |= (dma_addr_hi << 16); + iowrite32(cmd_q->qcontrol, cmd_q->reg_control); + + /* Find the LSB regions accessible to the queue */ + ccp_find_lsb_regions(cmd_q, status); + cmd_q->lsb = -1; /* Unassigned value */ + } + + dev_dbg(dev, "Assigning LSBs...\n"); + ret = ccp_assign_lsbs(ccp); + if (ret) { + dev_err(dev, "Unable to assign LSBs (%d)\n", ret); + goto e_irq; + } + + /* Optimization: pre-allocate LSB slots for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); + } + + dev_dbg(dev, "Starting threads...\n"); + /* Create a kthread for each queue */ + for (i = 0; i < ccp->cmd_q_count; i++) { + struct task_struct *kthread; + + cmd_q = &ccp->cmd_q[i]; + + kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, + "%s-q%u", ccp->name, cmd_q->id); + if (IS_ERR(kthread)) { + dev_err(dev, "error creating queue thread (%ld)\n", + PTR_ERR(kthread)); + ret = PTR_ERR(kthread); + goto e_kthread; + } + + cmd_q->kthread = kthread; + } + + dev_dbg(dev, "Enabling interrupts...\n"); + ccp5_enable_queue_interrupts(ccp); + + dev_dbg(dev, "Registering device...\n"); + /* Put this on the unit list to make it available */ + ccp_add_device(ccp); + + ret = ccp_register_rng(ccp); + if (ret) + goto e_kthread; + + /* Register the DMA engine support */ + ret = ccp_dmaengine_register(ccp); + if (ret) + goto e_hwrng; + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* Set up debugfs entries */ + ccp5_debugfs_setup(ccp); +#endif + + return 0; + +e_hwrng: + ccp_unregister_rng(ccp); + +e_kthread: + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + +e_irq: + sp_free_ccp_irq(ccp->sp, ccp); + +e_pool: + for (i = 0; i < ccp->cmd_q_count; i++) + dma_pool_destroy(ccp->cmd_q[i].dma_pool); + + return ret; +} + +static void ccp5_destroy(struct ccp_device *ccp) +{ + struct ccp_cmd_queue *cmd_q; + struct ccp_cmd *cmd; + unsigned int i; + + /* Unregister the DMA engine */ + ccp_dmaengine_unregister(ccp); + + /* Unregister the RNG */ + ccp_unregister_rng(ccp); + + /* Remove this device from the list of available units first */ + ccp_del_device(ccp); + +#ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS + /* We're in the process of tearing down the entire driver; + * when all the devices are gone clean up debugfs + */ + if (ccp_present()) + ccp5_debugfs_destroy(); +#endif + + /* Disable and clear interrupts */ + ccp5_disable_queue_interrupts(ccp); + for (i = 0; i < ccp->cmd_q_count; i++) { + cmd_q = &ccp->cmd_q[i]; + + /* Turn off the run bit */ + iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); + + /* Clear the interrupt status */ + iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); + ioread32(cmd_q->reg_int_status); + ioread32(cmd_q->reg_status); + } + + /* Stop the queue kthreads */ + for (i = 0; i < ccp->cmd_q_count; i++) + if (ccp->cmd_q[i].kthread) + kthread_stop(ccp->cmd_q[i].kthread); + + sp_free_ccp_irq(ccp->sp, ccp); + + /* Flush the cmd and backlog queue */ + while (!list_empty(&ccp->cmd)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } + while (!list_empty(&ccp->backlog)) { + /* Invoke the callback directly with an error code */ + cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); + list_del(&cmd->entry); + cmd->callback(cmd->data, -ENODEV); + } +} + +static void ccp5_config(struct ccp_device *ccp) +{ + /* Public side */ + iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); +} + +static void ccp5other_config(struct ccp_device *ccp) +{ + int i; + u32 rnd; + + /* We own all of the queues on the NTB CCP */ + + iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); + iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); + + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + for (i = 0; i < 16; i++) { + rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); + iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); + } + + iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); + iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); + iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); + + iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); + iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); + + iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); + + ccp5_config(ccp); +} + +/* Version 5 adds some function, but is essentially the same as v5 */ +static const struct ccp_actions ccp5_actions = { + .aes = ccp5_perform_aes, + .xts_aes = ccp5_perform_xts_aes, + .sha = ccp5_perform_sha, + .des3 = ccp5_perform_des3, + .rsa = ccp5_perform_rsa, + .passthru = ccp5_perform_passthru, + .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, + .sballoc = ccp_lsb_alloc, + .sbfree = ccp_lsb_free, + .init = ccp5_init, + .destroy = ccp5_destroy, + .get_free_slots = ccp5_get_free_slots, +}; + +const struct ccp_vdata ccpv5a_hygon = { + .version = CCP_VERSION(5, 1), + .setup = ccp5_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; + +const struct ccp_vdata ccpv5b_hygon = { + .version = CCP_VERSION(5, 1), + .dma_chan_attr = DMA_PRIVATE, + .setup = ccp5other_config, + .perform = &ccp5_actions, + .offset = 0x0, + .rsamax = CCP5_RSA_MAX_WIDTH, +}; diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c index 6779f9ef0188..691127a0007b 100644 --- a/drivers/crypto/ccp/hygon/sp-pci.c +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -50,7 +50,7 @@ const struct sp_dev_vdata hygon_dev_vdata[] = { { /* 0 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5a, + .ccp_vdata = &ccpv5a_hygon, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv1, @@ -59,13 +59,13 @@ const struct sp_dev_vdata hygon_dev_vdata[] = { { /* 1 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5b, + .ccp_vdata = &ccpv5b_hygon, #endif }, { /* 2 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP - .ccp_vdata = &ccpv5a, + .ccp_vdata = &ccpv5a_hygon, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv2, From c4321e5f3158a2499d6c40e626b491412bed86e9 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 14 Mar 2024 20:46:36 +0800 Subject: [PATCH 88/99] drivers/crypto/ccp: concurrent psp access support between user and kernel space hygon inclusion category: feature --------------------------- Add a self-defined mutex to support concurrent psp access between kernel space and user space. Signed-off-by: xiongmengbiao [move mutex patch sev_platform_shutdown to sev_firmware_shutdown @@ -2838,9 +2866,18 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) static void sev_firmware_shutdown(struct sev_device *sev) { - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { : ] Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit 75f73904558ee0e04f91d13674c04f5d16d5fd0e) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/csv-dev.c | 31 ++++- drivers/crypto/ccp/hygon/psp-dev.c | 176 ++++++++++++++++++++++++++++- drivers/crypto/ccp/hygon/psp-dev.h | 21 ++++ drivers/crypto/ccp/psp-dev.c | 8 ++ drivers/crypto/ccp/sev-dev.c | 53 +++++++-- 5 files changed, 274 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 2bf6d1801650..6c5a3b53c70d 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -195,7 +195,13 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > CSV_MAX) return -EINVAL; - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } switch (input.cmd) { case CSV_HGSC_CERT_IMPORT: @@ -217,14 +223,20 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) * Release the mutex before calling the native ioctl function * because it will acquires the mutex. */ - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return hygon_psp_hooks.sev_ioctl(file, ioctl, arg); } if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return ret; } @@ -383,7 +395,13 @@ static int csv_do_ringbuf_cmds(int *psp_ret) if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } rc = __csv_ring_buffer_enter_locked(psp_ret); if (rc) @@ -396,7 +414,10 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index bfb7b9bbcb9b..e758d2ae0ebb 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -20,6 +20,169 @@ /* Function and variable pointers for hooks */ struct hygon_psp_hooks_table hygon_psp_hooks; +static struct psp_misc_dev *psp_misc; + +uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +{ + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while (time_before(jiffies, je)); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_mutex_lock_timeout); + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} +EXPORT_SYMBOL_GPL(psp_mutex_unlock); + +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + pr_info("remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP|VM_DONTEXPAND, 0); + pr_info("remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + pr_info("%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + pr_info("%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, +}; + +int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + hygon_psp_hooks.psp_misc = psp_misc; + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} +EXPORT_SYMBOL_GPL(hygon_psp_additional_setup); + +void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; + hygon_psp_hooks.psp_misc = NULL; +} +EXPORT_SYMBOL_GPL(hygon_psp_exit); + int fixup_hygon_psp_caps(struct psp_device *psp) { /* the hygon psp is unavailable if bit0 is cleared in feature reg */ @@ -96,10 +259,19 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } - mutex_lock(hygon_psp_hooks.sev_cmd_mutex); rc = __psp_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); return rc; } diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 480b3c36a002..ca22bc771cc7 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -12,6 +12,7 @@ #include #include +#include #include "sp-dev.h" @@ -30,6 +31,7 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; + struct psp_misc_dev *psp_misc; bool *psp_dead; int *psp_timeout; int *psp_cmd_timeout; @@ -43,6 +45,25 @@ extern struct hygon_psp_hooks_table { long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; +#define PSP_MUTEX_TIMEOUT 10000 +struct psp_mutex { + uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + +int hygon_psp_additional_setup(struct sp_device *sp); +void hygon_psp_exit(struct kref *ref); +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +int psp_mutex_unlock(struct psp_mutex *mutex); int fixup_hygon_psp_caps(struct psp_device *psp); int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 8e03851fc03d..cbb1a482fba4 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -295,6 +295,11 @@ int psp_dev_init(struct sp_device *sp) /* Request an irq */ if (is_vendor_hygon()) { + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_err; + } ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); } else { ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); @@ -361,6 +366,9 @@ void psp_dev_destroy(struct sp_device *sp) sp_free_psp_irq(sp, psp); + if (is_vendor_hygon() && hygon_psp_hooks.psp_misc) + kref_put(&hygon_psp_hooks.psp_misc->refcount, hygon_psp_exit); + if (sp->clear_psp_master_device) sp->clear_psp_master_device(sp); } diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 8c1cbce2ed74..0c37c48bff30 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1045,9 +1045,19 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } + rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -1667,9 +1677,18 @@ int sev_platform_init(struct sev_platform_init_args *args) { int rc; - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = _sev_platform_init_locked(args); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -2568,7 +2587,13 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > SEV_MAX) return -EINVAL; - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } switch (input.cmd) { @@ -2620,7 +2645,10 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return ret; } @@ -2838,9 +2866,18 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) static void sev_firmware_shutdown(struct sev_device *sev) { - mutex_lock(&sev_cmd_mutex); + if (is_vendor_hygon()) { + if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return ; + } else { + mutex_lock(&sev_cmd_mutex); + } __sev_firmware_shutdown(sev, false); - mutex_unlock(&sev_cmd_mutex); + if (is_vendor_hygon()) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); } void sev_platform_shutdown(void) From 218fac61852a26b47c7c8f9272c3d619d6bed256 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Thu, 14 Mar 2024 20:50:25 +0800 Subject: [PATCH 89/99] drivers/crypto/ccp: Add psp mutex enable ioctl support hygon inclusion category: feature --------------------------- Add ioctl interface to control the state of self-defined mutex in user and kernel space. By default, when psp user-mode driver is not used, the self-defined mutex is disabled, and the kernel's native private lock is utilized instead. Signed-off-by: xiongmengbiao Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit 87c2f52513835fa0c9b92f5f73411e619616cdc6) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/psp-dev.c drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/csv-dev.c | 12 +++++--- drivers/crypto/ccp/hygon/psp-dev.c | 49 +++++++++++++++++++++++++++++- drivers/crypto/ccp/hygon/psp-dev.h | 3 +- drivers/crypto/ccp/sev-dev.c | 21 ++++++++----- 4 files changed, 70 insertions(+), 15 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 6c5a3b53c70d..584a6cdc78f8 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -179,6 +179,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) void __user *argp = (void __user *)arg; struct sev_issue_cmd input; int ret = -EFAULT; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; @@ -195,7 +196,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > CSV_MAX) return -EINVAL; - if (is_vendor_hygon()) { + if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -223,7 +224,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) * Release the mutex before calling the native ioctl function * because it will acquires the mutex. */ - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -233,7 +234,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -391,11 +392,12 @@ static int csv_do_ringbuf_cmds(int *psp_ret) { struct sev_user_data_status data; int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; - if (is_vendor_hygon()) { + if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -414,7 +416,7 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index e758d2ae0ebb..bd1e9f2d81c9 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -14,6 +14,7 @@ #include #include #include +#include #include "psp-dev.h" @@ -21,6 +22,12 @@ struct hygon_psp_hooks_table hygon_psp_hooks; static struct psp_misc_dev *psp_misc; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_PSP_OPCODE_MAX_NR, +}; uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) { @@ -54,7 +61,7 @@ int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) ret = 1; break; } - } while (time_before(jiffies, je)); + } while ((ms == 0) || time_before(jiffies, je)); return ret; } @@ -124,11 +131,51 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + pr_info("%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + hygon_psp_hooks.psp_mutex_enabled = 1; + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + hygon_psp_hooks.psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + break; + + default: + pr_info("%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return 0; +} + static const struct file_operations psp_fops = { .owner = THIS_MODULE, .mmap = mmap_psp, .read = read_psp, .write = write_psp, + .unlocked_ioctl = ioctl_psp, }; int hygon_psp_additional_setup(struct sp_device *sp) diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index ca22bc771cc7..2961178819a5 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -32,6 +32,7 @@ extern struct hygon_psp_hooks_table { bool sev_dev_hooks_installed; struct mutex *sev_cmd_mutex; struct psp_misc_dev *psp_misc; + bool psp_mutex_enabled; bool *psp_dead; int *psp_timeout; int *psp_cmd_timeout; @@ -45,7 +46,7 @@ extern struct hygon_psp_hooks_table { long (*sev_ioctl)(struct file *file, unsigned int ioctl, unsigned long arg); } hygon_psp_hooks; -#define PSP_MUTEX_TIMEOUT 10000 +#define PSP_MUTEX_TIMEOUT 60000 struct psp_mutex { uint64_t locked; }; diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 0c37c48bff30..dd0cc3620981 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -1044,8 +1044,9 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_vendor_hygon()) { + if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -1054,7 +1055,7 @@ int sev_do_cmd(int cmd, void *data, int *psp_ret) } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -1676,8 +1677,9 @@ static int _sev_platform_init_locked(struct sev_platform_init_args *args) int sev_platform_init(struct sev_platform_init_args *args) { int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_vendor_hygon()) { + if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -1685,7 +1687,7 @@ int sev_platform_init(struct sev_platform_init_args *args) mutex_lock(&sev_cmd_mutex); } rc = _sev_platform_init_locked(args); - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -2574,6 +2576,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -2587,7 +2590,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > SEV_MAX) return -EINVAL; - if (is_vendor_hygon()) { + if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -2645,7 +2648,7 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); @@ -2866,7 +2869,9 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) static void sev_firmware_shutdown(struct sev_device *sev) { - if (is_vendor_hygon()) { + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return ; @@ -2874,7 +2879,7 @@ static void sev_firmware_shutdown(struct sev_device *sev) mutex_lock(&sev_cmd_mutex); } __sev_firmware_shutdown(sev, false); - if (is_vendor_hygon()) + if (is_vendor_hygon() && mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(&sev_cmd_mutex); From 389bdfb03166adc922e51588c76efa2ce0195590 Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Thu, 10 Aug 2023 10:53:18 +0800 Subject: [PATCH 90/99] arch/x86/kvm: Support psp virtualization hygon inclusion category: feature --------------------------- Support the PSP virtualization basic framework. The guest uses the vmmcall instruction to interact with KVM, which then forwards the data to the PSP device driver and sends it to the PSP hardware. Signed-off-by: niuyongwen Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit abe09b8dd27b78ba9a51685dadce7f9d8df4d151) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/Makefile arch/x86/kvm/x86.c --- arch/x86/include/asm/kvm_host.h | 2 + arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/psp.c | 84 +++++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 5 +- include/uapi/linux/kvm_para.h | 1 + 5 files changed, 92 insertions(+), 2 deletions(-) create mode 100644 arch/x86/kvm/psp.c diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 09eaa37e5d4e..bb63740e4a36 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2389,6 +2389,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 42828fc7d6e2..2822b34b9cac 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -6,7 +6,7 @@ ccflags-$(CONFIG_KVM_WERROR) += -Werror include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o irq.o lapic.o cpuid.o pmu.o mtrr.o \ - debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o + debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o psp.o kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o kvm-$(CONFIG_KVM_IOAPIC) += i8259.o i8254.o ioapic.o diff --git a/arch/x86/kvm/psp.c b/arch/x86/kvm/psp.c new file mode 100644 index 000000000000..f734013b25e7 --- /dev/null +++ b/arch/x86/kvm/psp.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSP virtualization + * + * Copyright (c) 2023, HYGON CORPORATION. All rights reserved. + * Author: Ge Yang + * + */ + +#include +#include +#include +#include + +struct psp_cmdresp_head { + uint32_t buf_size; + uint32_t cmdresp_size; + uint32_t cmdresp_code; +} __packed; + +int guest_addr_map_table_op(void *data_hva, gpa_t data_gpa, gpa_t table_gpa, + int op) +{ + return 0; +} + +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + void *data; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + int psp_ret = 0; + int ret = 0; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto e_free; + } + + if (guest_addr_map_table_op(data, data_gpa, table_gpa, 0)) { + ret = -EFAULT; + goto e_free; + } + + ret = psp_do_cmd(cmd, data, &psp_ret); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, psp_ret); + ret = -EIO; + goto e_free; + } + + if (guest_addr_map_table_op(data, data_gpa, table_gpa, 1)) { + ret = -EFAULT; + goto e_free; + } + + if (unlikely(kvm_write_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto e_free; + } + + if (unlikely(kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) { + ret = -EFAULT; + goto e_free; + } + + return ret; + +e_free: + kfree(data); + return ret; +} + diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7b9ad6ae979d..066235e6d0ff 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10399,7 +10399,7 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl, } if (cpl && - !(is_x86_vendor_hygon() && nr == KVM_HC_VM_ATTESTATION)) { + !(is_x86_vendor_hygon() && (nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP))) { ret = -KVM_EPERM; goto out; } @@ -10436,6 +10436,9 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl, kvm_sched_yield(vcpu, a0); ret = 0; break; + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + break; case KVM_HC_MAP_GPA_RANGE: { u64 gpa = a0, npages = a1, attrs = a2; diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 67192835455e..86369b7a5733 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -31,6 +31,7 @@ #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 #define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ +#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ /* * hypercalls use architecture specific From f66406c4de0984893d440ed74831d76a76dfa6cc Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Thu, 10 Aug 2023 11:05:02 +0800 Subject: [PATCH 91/99] arch/x86/kvm: Support tkm virtualization hygon inclusion category: feature --------------------------- Allow the guest to execute Trusted Key Management (TKM) commands in user mode. Each TKM command data block is transferred to KVM using the vmmcall instruction and processed through the following three steps: 1. Obtain the VM command and preprocess the pointer mapping table information in the command buffer 2. The command that has been converted will interact with the channel of the psp through the driver and try to obtain the execution result 3. The executed command data is recovered according to the multilevel pointer of the mapping table, and then returned to the VM Signed-off-by: niuyongwen Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit 907e2fa909cf861ae33b7361b9ed85f620e4f9fe) Signed-off-by: Wentao Guan --- arch/x86/kvm/psp.c | 592 +++++++++++++++++++++++-- drivers/crypto/ccp/hygon/csv-dev.c | 484 +++++++++++++++++++- drivers/crypto/ccp/hygon/psp-dev.h | 2 + drivers/crypto/ccp/hygon/ring-buffer.c | 23 + drivers/crypto/ccp/hygon/ring-buffer.h | 4 + include/linux/psp-hygon.h | 51 +++ 6 files changed, 1126 insertions(+), 30 deletions(-) diff --git a/arch/x86/kvm/psp.c b/arch/x86/kvm/psp.c index f734013b25e7..e3a04bef5daf 100644 --- a/arch/x86/kvm/psp.c +++ b/arch/x86/kvm/psp.c @@ -11,6 +11,44 @@ #include #include #include +#include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "vpsp: " fmt + +/* + * The file mainly implements the base execution + * logic of virtual PSP in kernel mode, which mainly includes: + * (1) Obtain the VM command and preprocess the pointer + * mapping table information in the command buffer + * (2) The command that has been converted will interact + * with the channel of the psp through the driver and + * try to obtain the execution result + * (3) The executed command data is recovered according to + * the multilevel pointer of the mapping table, and then returned to the VM + * + * The primary implementation logic of virtual PSP in kernel mode + * call trace: + * guest command(vmmcall) + * | + * | |-> kvm_pv_psp_cmd_pre_op + * | | + * | | -> guest_addr_map_table_op + * | | + * | | -> guest_multiple_level_gpa_replace + * | + * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * | + * | -> guest_addr_map_table_op + * | + * | -> guest_multiple_level_gpa_restore + */ struct psp_cmdresp_head { uint32_t buf_size; @@ -18,67 +56,565 @@ struct psp_cmdresp_head { uint32_t cmdresp_code; } __packed; -int guest_addr_map_table_op(void *data_hva, gpa_t data_gpa, gpa_t table_gpa, - int op) +/** + * struct map_tbl - multilevel pointer address mapping table + * + * @parent_pa: parent address block's physics address + * @offset: offset in parent address block + * @size: submemory size + * @align: submemory align size, hva need to keep size alignment in kernel + * @hva: submemory copy block in kernel virtual address + */ +struct map_tbl { + uint64_t parent_pa; + uint32_t offset; + uint32_t size; + uint32_t align; + uint64_t hva; +} __packed; + +struct addr_map_tbls { + uint32_t tbl_nums; + struct map_tbl tbl[]; +} __packed; + +/* gpa and hva conversion maintenance table for internal use */ +struct gpa2hva_t { + void *hva; + gpa_t gpa; +}; + +struct gpa2hva_tbls { + uint32_t max_nums; + uint32_t tbl_nums; + struct gpa2hva_t tbl[]; +}; + +/* save command data for restoring later */ +struct vpsp_hbuf_wrapper { + void *data; + uint32_t data_size; + struct addr_map_tbls *map_tbls; + struct gpa2hva_tbls *g2h_tbls; +}; + +/* Virtual PSP host memory information maintenance, used in ringbuffer mode */ +struct vpsp_hbuf_wrapper +g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; + +void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) { - return 0; + int i; + + pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); + for (i = 0; i < tbls->tbl_nums; i++) { + pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", + i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, + tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); + } + pr_info("\n"); } -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, - gpa_t table_gpa) +void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) { - void *data; - struct psp_cmdresp_head psp_head; - uint32_t data_size; - int psp_ret = 0; - int ret = 0; + int i; - if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, - sizeof(struct psp_cmdresp_head)))) + pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, + tbls->max_nums); + for (i = 0; i < tbls->tbl_nums; i++) + pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, + (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); + pr_info("\n"); +} + +static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) +{ + uint32_t fill_idx = tbls->tbl_nums; + + if (fill_idx >= tbls->max_nums) return -EFAULT; - data_size = psp_head.buf_size; - data = kzalloc(data_size, GFP_KERNEL); - if (!data) + tbls->tbl[fill_idx].hva = hva; + tbls->tbl[fill_idx].gpa = gpa; + tbls->tbl_nums = fill_idx + 1; + + return 0; +} + +static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + g2h->tbl[i].hva = NULL; + } +} + +static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].gpa == gpa) + return (void *)g2h->tbl[i].hva; + } + + return NULL; +} + +static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + return g2h->tbl[i].gpa; + } + + return 0; +} + +/* + * The virtual machine multilevel pointer command buffer handles the + * execution entity, synchronizes the data in the original gpa to the + * newly allocated hva(host virtual address) and updates the mapping + * relationship in the parent memory + */ +static int guest_multiple_level_gpa_replace(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + uint32_t sub_block_size; + uint64_t sub_paddr; + void *parent_kva = NULL; + + /* kmalloc memory for child block */ + sub_block_size = max(tbl->size, tbl->align); + tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); + if (!tbl->hva) return -ENOMEM; - if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + /* get child gpa from parent gpa */ + if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + &sub_paddr, sizeof(sub_paddr)))) { + pr_err("[%s]: kvm_read_guest for parent gpa failed\n", + __func__); ret = -EFAULT; goto e_free; } - if (guest_addr_map_table_op(data, data_gpa, table_gpa, 0)) { + /* copy child block data from gpa to hva */ + if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_read_guest for sub_data failed\n", + __func__); ret = -EFAULT; goto e_free; } - ret = psp_do_cmd(cmd, data, &psp_ret); - if (ret) { - pr_err("%s: psp do cmd error, %d\n", __func__, psp_ret); - ret = -EIO; + /* get hva from gpa */ + parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_kva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; goto e_free; } - if (guest_addr_map_table_op(data, data_gpa, table_gpa, 1)) { + /* replace pa of hva from gpa */ + *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); + + /* fill in gpa and hva to map table for restoring later */ + if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { + pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", + __func__); ret = -EFAULT; goto e_free; } - if (unlikely(kvm_write_guest(kvm, data_gpa, data, data_size))) { + return ret; + +e_free: + kfree((const void *)tbl->hva); + return ret; +} + +/* The virtual machine multi-level pointer command memory handles the + * execution entity, synchronizes the data in the hva(host virtual + * address) back to the memory corresponding to the gpa, and restores + * the mapping relationship in the original parent memory + */ +static int guest_multiple_level_gpa_restore(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + gpa_t sub_gpa; + void *parent_hva = NULL; + + /* get gpa from hva */ + sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); + if (unlikely(!sub_gpa)) { + pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", + __func__); ret = -EFAULT; - goto e_free; + goto end; } - if (unlikely(kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, - sizeof(psp_ret)))) { + /* copy child block data from hva to gpa */ + if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", + __func__); ret = -EFAULT; - goto e_free; + goto end; + } + + /* get parent hva from parent gpa */ + parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_hva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto end; } + /* restore gpa from pa of hva in parent block */ + *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; + + /* free child block memory */ + clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); + kfree((const void *)tbl->hva); + tbl->hva = 0; + +end: return ret; +} -e_free: +/* + * The virtual machine multilevel pointer command memory processing + * executes upper-layer abstract interfaces, including replacing and + * restoring two sub-processing functions + */ +static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, + struct addr_map_tbls *map_tbls, int op) +{ + int ret = 0; + int i; + uint64_t *sub_paddr_ptr; + + if (op) { + for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { + *sub_paddr_ptr = g2h->tbl[0].gpa; + continue; + } + } + + /* restore new pa of kva with the gpa from guest */ + if (unlikely(guest_multiple_level_gpa_restore(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } else { + for (i = 0; i < map_tbls->tbl_nums; i++) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { + *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); + map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; + continue; + } + } + + /* check if parent_pa is valid */ + if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { + pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", + __func__, i, map_tbls->tbl[i].parent_pa); + ret = -EFAULT; + goto end; + } + + /* replace the gpa from guest with the new pa of kva */ + if (unlikely(guest_multiple_level_gpa_replace(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } + +end: + return ret; +} + +static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls + *map_tbl, void *data) +{ + int i; + + if (g2h) { + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { + kfree(g2h->tbl[i].hva); + g2h->tbl[i].hva = NULL; + } + } + kfree(g2h); + } + + kfree(map_tbl); kfree(data); +} + +/* + * Obtain the VM command and preprocess the pointer mapping table + * information in the command buffer, the processed data will be + * used to interact with the psp device + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, + gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + struct addr_map_tbls map_head, *map_tbls = NULL; + uint32_t map_tbl_size; + struct gpa2hva_tbls *g2h = NULL; + uint32_t g2h_tbl_size; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + + if (table_gpa) { + /* parse address map table from guest */ + if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + sizeof(struct addr_map_tbls)))) { + pr_err("[%s]: kvm_read_guest for map_head failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums + * sizeof(struct map_tbl); + map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); + if (!map_tbls) { + ret = -ENOMEM; + goto end; + } + + if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + map_tbl_size))) { + pr_err("[%s]: kvm_read_guest for map_tbls failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* init for gpa2hva table*/ + g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums + + 1) * sizeof(struct gpa2hva_t); + g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); + if (!g2h) { + ret = -ENOMEM; + goto end; + } + g2h->max_nums = map_head.tbl_nums + 1; + + /* fill the root parent address */ + if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { + pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + hbuf->data = data; + hbuf->data_size = data_size; + hbuf->map_tbls = map_tbls; + hbuf->g2h_tbls = g2h; + +end: return ret; } +/* + * The executed command data is recovered according to the multilevel + * pointer of the mapping table when the command has finished + * interacting with the psp device + */ +static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + + if (hbuf->map_tbls) { + if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + hbuf->map_tbls, 1)) { + pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + /* restore cmdresp's buffer from context */ + if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; + } + +end: + /* release memory and clear hbuf */ + kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + + return ret; +} + +/* + * The primary implementation interface of virtual PSP in kernel mode + */ +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + int ret = 0; + struct vpsp_ret psp_ret = {0}; + struct vpsp_hbuf_wrapper hbuf = {0}; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + uint32_t index = 0; + + if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) + return -EFAULT; + + switch (psp_ret.status) { + case VPSP_INIT: + /* multilevel pointer replace*/ + ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + if (unlikely(ret)) { + psp_ret.status = VPSP_FINISH; + pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(cmd, (void *)hbuf.data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + ret = -EFAULT; + goto end; + } + + switch (psp_ret.status) { + case VPSP_RUNNING: + /* backup host memory message for restoring later*/ + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + g_hbuf_wrap[prio][psp_ret.index] = hbuf; + break; + + case VPSP_FINISH: + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + break; + + default: + ret = -EFAULT; + break; + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_ret.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(prio, index, g_hbuf_wrap[prio][index].data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + ret = -EFAULT; + goto end; + } + + switch (psp_ret.status) { + case VPSP_RUNNING: + break; + + case VPSP_FINISH: + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + &g_hbuf_wrap[prio][index]); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + break; + + default: + ret = -EFAULT; + break; + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + ret = -EFAULT; + break; + } +end: + /* return psp_ret to guest */ + kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + return ret; +} diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 584a6cdc78f8..620845e6ae51 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -30,6 +30,20 @@ u32 hygon_csv_build; int csv_comm_mode = CSV_COMM_MAILBOX_ON; +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + /* * csv_update_api_version used to update the api version of HYGON CSV * firmwareat driver side. @@ -465,7 +479,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer /* the command queue will points to @cmd_ptr_buffer */ csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); if (!stat_val_buffer) @@ -473,7 +487,7 @@ static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer /* the status queue will points to @stat_val_buffer */ csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, - CSV_RING_BUFFER_LEN, CSV_RING_BUFFER_ESIZE); + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); return 0; } @@ -593,6 +607,472 @@ int csv_check_stat_queue_status(int *psp_ret) } EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int vpsp_psp_mutex_trylock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) + return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); +} + +static int vpsp_psp_mutex_unlock(void) +{ + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = hygon_psp_hooks.__sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, (*hygon_psp_hooks.psp_timeout)*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "csv command in ringbuffer mode timed out, disabling PSP\n"); + *hygon_psp_hooks.psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + hygon_psp_hooks.__sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + kfree(status); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + kfree(status); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + kfree(status); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + return rb_supported; +} + +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (vpsp_psp_mutex_trylock()) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + + ret = hygon_psp_hooks.__sev_do_cmd_locked(cmd.cmd_id, data, + (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + vpsp_psp_mutex_unlock(); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed\n", __func__); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(prio, cmd, data, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = psp_do_cmd(cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(prio, index, data, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + goto end; + } + } else { + /* mailbox mode */ + ret = psp_do_cmd(cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); + #ifdef CONFIG_HYGON_CSV int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 2961178819a5..f60a112881be 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -61,6 +61,8 @@ struct psp_misc_dev { struct miscdevice misc; }; +extern int psp_mutex_trylock(struct psp_mutex *mutex); + int hygon_psp_additional_setup(struct sp_device *sp); void hygon_psp_exit(struct kref *ref); int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); diff --git a/drivers/crypto/ccp/hygon/ring-buffer.c b/drivers/crypto/ccp/hygon/ring-buffer.c index 93402b13b93a..0c9ea0217b2e 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.c +++ b/drivers/crypto/ccp/hygon/ring-buffer.c @@ -28,6 +28,7 @@ static void enqueue_data(struct csv_queue *queue, unsigned int l; void *data; + off &= queue->mask; if (esize != 1) { off *= esize; size *= esize; @@ -128,3 +129,25 @@ unsigned int csv_dequeue_stat(struct csv_queue *queue, queue->head += len; return len; } + +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len) +{ + unsigned int size; + + size = ring_buf->tail - ring_buf->head; + if (len > size) + len = size; + + dequeue_data(ring_buf, buf, len, ring_buf->head); + ring_buf->head += len; + return len; +} + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf) +{ + unsigned int free_size; + + free_size = queue_avail_size(ring_buf); + return ring_buf->mask - free_size; +} diff --git a/drivers/crypto/ccp/hygon/ring-buffer.h b/drivers/crypto/ccp/hygon/ring-buffer.h index 2c99ade02512..bf97aa6df36a 100644 --- a/drivers/crypto/ccp/hygon/ring-buffer.h +++ b/drivers/crypto/ccp/hygon/ring-buffer.h @@ -19,5 +19,9 @@ unsigned int csv_enqueue_cmd(struct csv_queue *queue, const void *buf, unsigned int len); unsigned int csv_dequeue_stat(struct csv_queue *queue, void *buf, unsigned int len); +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len); + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); #endif /* __CCP_HYGON_RINGBUF_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index efb618a0eb6e..90e250de48be 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -305,6 +305,46 @@ struct csv3_data_dbg_read_mem { u32 size; /* In */ } __packed; +/* + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 2; + u32 index : 12; + u32 status : 2; +}; + #ifdef CONFIG_CRYPTO_DEV_SP_PSP int psp_do_cmd(int cmd, void *data, int *psp_ret); @@ -320,6 +360,10 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); +int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret); + #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -332,6 +376,13 @@ static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } +static inline int +vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); From dce5c51e5a20f1d58862a5e8d15aaf7fdd880bf5 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Tue, 26 Dec 2023 16:59:41 +0800 Subject: [PATCH 92/99] drivers/crypto/ccp: support tkm key isolation hygon inclusion category: feature --------------------------- Add `vpsp_add_vid` and `vpsp_del_vid` to receive VID information in host user mode. Generally, these ioctl calls should be initiated from the QEMU process. When sending data to the PSP hardware, place the VID in the bit 56 to bit 63 range of the physical address. The PSP hardware will then access different key spaces based on the VID. Signed-off-by: xiongmengbiao Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit f5402aca2468cc19a7458095d77876ddcf990a14) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/hygon/psp-dev.c --- arch/x86/kvm/psp.c | 25 ++- drivers/crypto/ccp/hygon/csv-dev.c | 26 +-- drivers/crypto/ccp/hygon/psp-dev.c | 250 ++++++++++++++++++++++++++++- include/linux/psp-hygon.h | 26 ++- 4 files changed, 307 insertions(+), 20 deletions(-) diff --git a/arch/x86/kvm/psp.c b/arch/x86/kvm/psp.c index e3a04bef5daf..05ba28ab1317 100644 --- a/arch/x86/kvm/psp.c +++ b/arch/x86/kvm/psp.c @@ -50,6 +50,9 @@ * | -> guest_multiple_level_gpa_restore */ +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f + struct psp_cmdresp_head { uint32_t buf_size; uint32_t cmdresp_size; @@ -510,6 +513,13 @@ static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, return ret; } +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + /* * The primary implementation interface of virtual PSP in kernel mode */ @@ -522,6 +532,17 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; uint32_t index = 0; + uint32_t vid = 0; + + // only tkm cmd need vid + if (cmd_type_is_tkm(vcmd->cmd_id)) { + // if vm without set vid, then tkm command is not allowed + ret = vpsp_get_vid(&vid, kvm->userspace_pid); + if (ret) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EFAULT; + } + } if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) @@ -540,7 +561,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, } /* try to send command to the device for execution*/ - ret = vpsp_try_do_cmd(cmd, (void *)hbuf.data, + ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_do_cmd failed\n", __func__); @@ -578,7 +599,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, CSV_COMMAND_PRIORITY_LOW; index = psp_ret.index; /* try to get the execution result from ringbuffer*/ - ret = vpsp_try_get_result(prio, index, g_hbuf_wrap[prio][index].data, + ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, (struct vpsp_ret *)&psp_ret); if (unlikely(ret)) { pr_err("[%s]: vpsp_try_get_result failed\n", __func__); diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 620845e6ae51..3148810f0518 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -656,12 +656,12 @@ static int vpsp_dequeue_cmd(int prio, int index, * Populate the command from the virtual machine to the queue to * support execution in ringbuffer mode */ -static int vpsp_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) { struct csv_cmdptr_entry cmdptr = { }; int index = -1; - cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); cmdptr.cmd_id = cmd; cmdptr.cmd_flags = flags; @@ -949,11 +949,12 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, return rb_supported; } +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); /* * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode */ -int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret) { int ret = 0; @@ -973,8 +974,8 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, /* dequeue command from queue*/ vpsp_dequeue_cmd(prio, index, &cmd); - ret = hygon_psp_hooks.__sev_do_cmd_locked(cmd.cmd_id, data, - (int *)psp_ret); + ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, + (int *)psp_ret); psp_ret->status = VPSP_FINISH; vpsp_psp_mutex_unlock(); if (unlikely(ret)) { @@ -993,7 +994,8 @@ int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, psp_ret->status = VPSP_FINISH; vpsp_psp_mutex_unlock(); if (unlikely(ret)) { - pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed\n", __func__); + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); goto end; } } @@ -1016,7 +1018,7 @@ EXPORT_SYMBOL_GPL(vpsp_try_get_result); * vpsp_try_get_result interface will be used to obtain the result * later again */ -int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { int ret = 0; int rb_supported; @@ -1028,10 +1030,10 @@ int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) (struct vpsp_cmd *)&cmd); if (rb_supported) { /* fill command in ringbuffer's queue and get index */ - index = vpsp_fill_cmd_queue(prio, cmd, data, 0); + index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); if (unlikely(index < 0)) { /* do mailbox command if queuing failed*/ - ret = psp_do_cmd(cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; @@ -1047,14 +1049,14 @@ int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) } /* try to get result from the ringbuffer command */ - ret = vpsp_try_get_result(prio, index, data, psp_ret); + ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); if (unlikely(ret)) { - pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); goto end; } } else { /* mailbox mode */ - ret = psp_do_cmd(cmd, data, (int *)psp_ret); + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); if (unlikely(ret)) { if (ret == -EIO) { ret = 0; diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index bd1e9f2d81c9..a15fd8f736f5 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -15,6 +15,9 @@ #include #include #include +#include +#include +#include #include "psp-dev.h" @@ -26,9 +29,23 @@ static struct psp_misc_dev *psp_misc; enum HYGON_PSP_OPCODE { HYGON_PSP_MUTEX_ENABLE = 1, HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, HYGON_PSP_OPCODE_MAX_NR, }; +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + union { + unsigned int vid; + unsigned char reserved[128]; + } data; +}; + uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) { return xchg(dst, val); @@ -130,10 +147,141 @@ static ssize_t write_psp(struct file *file, const char __user *buf, size_t count return written; } +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +struct vpsp_vid_entry { + uint32_t vid; + pid_t pid; +}; +static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_vid_entry entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When the virtual machine executes the 'tkm' command, + * it needs to retrieve the corresponding 'vid' + * by performing a binary search using 'kvm->userspace_pid'. + */ +int vpsp_get_vid(uint32_t *vid, pid_t pid) +{ + struct vpsp_vid_entry new_entry = {.pid = pid}; + struct vpsp_vid_entry *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, + sizeof(struct vpsp_vid_entry), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + if (vid) { + *vid = existing_entry->vid; + pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); + } + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_vid); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_vid(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); + sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_vid_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) { unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { pr_info("%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); @@ -150,6 +298,7 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) // Wait 10ms just in case someone is right before getting the psp lock. mdelay(10); psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; break; case HYGON_PSP_MUTEX_DISABLE: @@ -161,13 +310,21 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) // Wait 10ms just in case someone is right before getting the sev lock. mdelay(10); mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); break; default: pr_info("%s: invalid ioctl number: %d\n", __func__, opcode); return -EINVAL; } - return 0; + return ret; } static const struct file_operations psp_fops = { @@ -303,6 +460,97 @@ static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (*hygon_psp_hooks.psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, *hygon_psp_hooks.psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = hygon_psp_hooks.sev_wait_cmd_ioc(sev, ®, *hygon_psp_hooks.psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + *hygon_psp_hooks.psp_dead = true; + + return ret; + } + + *hygon_psp_hooks.psp_timeout = *hygon_psp_hooks.psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + hygon_psp_hooks.sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (is_vendor_hygon() && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(hygon_psp_hooks.sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + + if (is_vendor_hygon() && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); + + return rc; +} + int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 90e250de48be..bcc91fd1469e 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -345,8 +345,16 @@ struct vpsp_ret { u32 status : 2; }; +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret); + int psp_do_cmd(int cmd, void *data, int *psp_ret); int csv_ring_buffer_queue_init(void); @@ -360,12 +368,17 @@ int csv_check_stat_queue_status(int *psp_ret); */ int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); -int vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, struct vpsp_ret *psp_ret); +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); -int vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret); +int vpsp_get_vid(uint32_t *vid, pid_t pid); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } @@ -377,12 +390,15 @@ static inline int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } static inline int -vpsp_try_get_result(uint8_t prio, uint32_t index, void *data, - struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_try_get_result(uint32_t vid, uint8_t prio, + uint32_t index, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } static inline int -vpsp_try_do_cmd(int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +vpsp_try_do_cmd(uint32_t vid, int cmd, + void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } +static inline int +vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); From 9a0f9af63fa4203b0b97a40eef3f75a242813aea Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Sun, 18 Feb 2024 22:56:37 +0800 Subject: [PATCH 93/99] drivers/crypto/ccp: Allow VM without a configured vid to use TKM hygon inclusion category: feature --------------------------- If the guest does not explicitly specify a VID via `vpsp_add_vid`, VID 0 will be used by default, sharing the key space with the host. A `vpsp_set_default_vid_permission` ioctl call has been added to control the behavior of the default VID. If the default VID permission is set to "not allowed," any guest that does not explicitly specify a VID will be denied access to the TKM function. Signed-off-by: xiongmengbiao Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit 726db88e56b35c033b7b3356ebb7ef61e3e94041) Signed-off-by: Wentao Guan --- arch/x86/kvm/psp.c | 4 ++-- drivers/crypto/ccp/hygon/psp-dev.c | 32 ++++++++++++++++++++++++++++++ include/linux/psp-hygon.h | 4 ++++ 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/psp.c b/arch/x86/kvm/psp.c index 05ba28ab1317..9ed9102674c1 100644 --- a/arch/x86/kvm/psp.c +++ b/arch/x86/kvm/psp.c @@ -536,9 +536,9 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { - // if vm without set vid, then tkm command is not allowed + // check the permission to use the default vid when no vid is set ret = vpsp_get_vid(&vid, kvm->userspace_pid); - if (ret) { + if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); return -EFAULT; } diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index a15fd8f736f5..56bba2c73b09 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -36,12 +36,16 @@ enum HYGON_PSP_OPCODE { enum VPSP_DEV_CTRL_OPCODE { VPSP_OP_VID_ADD, VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, }; struct vpsp_dev_ctrl { unsigned char op; union { unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; unsigned char reserved[128]; } data; }; @@ -175,6 +179,23 @@ static void swap_vid_entries(void *a, void *b, int size) memcpy(b, &entry, size); } +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + /** * When the virtual machine executes the 'tkm' command, * it needs to retrieve the corresponding 'vid' @@ -270,6 +291,14 @@ static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) ret = vpsp_del_vid(); break; + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + default: ret = -EINVAL; break; @@ -318,6 +347,9 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) sizeof(struct vpsp_dev_ctrl))) return -EFAULT; ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; break; default: diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index bcc91fd1469e..526d71f27401 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -375,6 +375,7 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) int vpsp_get_vid(uint32_t *vid, pid_t pid); +int vpsp_get_default_vid_permission(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -399,6 +400,9 @@ vpsp_try_do_cmd(uint32_t vid, int cmd, static inline int vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; } + +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); From 7774cb71ab80a1f1f5ff6d7644b69141c57e67c5 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Fri, 29 Mar 2024 16:29:50 +0800 Subject: [PATCH 94/99] drivers/crypto/ccp: Eliminate dependence of the kvm module on the ccp module hygon inclusion category: feature --------------------------- Because the KVM module calls certain interfaces from the ccp module, such as vpsp_try_do_cmd, it is necessary to load the ccp module before loading kvm. However, on CPUs other than Hygon, the ccp module might not be loaded, which would prevent the kvm module from loading. Therefore, we use function hooks to call functions from the ccp module. Now the module dependencies are as follows: [root@centos-7-8 ~]# lsmod | grep kvm kvm_amd 200704 0 kvm 1339392 1 kvm_amd ccp 352256 1 kvm_amd irqbypass 12288 2 vfio_pci_core,kvm Signed-off-by: xiongmengbiao Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit d97fda9a477625df22ad5131582ca25c8439f274) Signed-off-by: Wentao Guan Conflicts: arch/x86/kvm/Makefile --- arch/x86/include/asm/kvm_host.h | 5 +- arch/x86/kvm/Makefile | 2 +- arch/x86/kvm/svm/svm.c | 27 ++++++++++ arch/x86/kvm/x86.c | 18 ++++++- drivers/crypto/ccp/Makefile | 3 +- .../psp.c => drivers/crypto/ccp/hygon/vpsp.c | 50 +++++++++---------- include/linux/psp-hygon.h | 14 ++++++ include/linux/psp-sev.h | 1 + 8 files changed, 90 insertions(+), 30 deletions(-) rename arch/x86/kvm/psp.c => drivers/crypto/ccp/hygon/vpsp.c (90%) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bb63740e4a36..01db4e23a8c9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -2389,8 +2389,9 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, - gpa_t psp_ret_gpa, gpa_t table_gpa); + +void kvm_arch_hypercall_init(void *func); +void kvm_arch_hypercall_exit(void); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 2822b34b9cac..42828fc7d6e2 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -6,7 +6,7 @@ ccflags-$(CONFIG_KVM_WERROR) += -Werror include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o irq.o lapic.o cpuid.o pmu.o mtrr.o \ - debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o psp.o + debugfs.o mmu/mmu.o mmu/page_track.o mmu/spte.o kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o kvm-$(CONFIG_KVM_IOAPIC) += i8259.o i8254.o ioapic.o diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 67f7dea3fb74..6007bc32055d 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -926,6 +927,9 @@ static void svm_hardware_unsetup(void) __free_pages(__sme_pa_to_page(iopm_base), get_order(IOPM_SIZE)); iopm_base = 0; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + kvm_arch_hypercall_exit(); } static void init_seg(struct vmcb_seg *seg) @@ -5306,6 +5310,26 @@ static __init void svm_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM); } +static int kvm_hygon_arch_hypercall(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3) +{ + int ret = 0; + struct kvm_vpsp vpsp = { + .kvm = kvm, + .write_guest = kvm_write_guest, + .read_guest = kvm_read_guest + }; + switch (nr) { + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(&vpsp, a0, a1, a2, a3); + break; + + default: + ret = -KVM_ENOSYS; + break; + } + return ret; +} + static __init int svm_hardware_setup(void) { void *iopm_va; @@ -5469,6 +5493,9 @@ static __init int svm_hardware_setup(void) goto err; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + kvm_arch_hypercall_init(kvm_hygon_arch_hypercall); + return 0; err: diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 066235e6d0ff..a7fccd780f32 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -317,6 +317,8 @@ const struct kvm_stats_header kvm_vcpu_stats_header = { static struct kmem_cache *x86_emulator_cache; +static int (*kvm_arch_hypercall)(struct kvm *kvm, u64 nr, u64 a0, u64 a1, u64 a2, u64 a3); + /* * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features) track * the set of MSRs that KVM exposes to userspace through KVM_GET_MSRS, @@ -10437,7 +10439,9 @@ int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, int cpl, ret = 0; break; case KVM_HC_PSP_OP: - ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + ret = -KVM_ENOSYS; + if (kvm_arch_hypercall) + ret = kvm_arch_hypercall(vcpu->kvm, nr, a0, a1, a2, a3); break; case KVM_HC_MAP_GPA_RANGE: { u64 gpa = a0, npages = a1, attrs = a2; @@ -14399,6 +14403,18 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, } EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_sev_es_string_io); +void kvm_arch_hypercall_init(void *func) +{ + kvm_arch_hypercall = func; +} +EXPORT_SYMBOL_GPL(kvm_arch_hypercall_init); + +void kvm_arch_hypercall_exit(void) +{ + kvm_arch_hypercall = NULL; +} +EXPORT_SYMBOL_GPL(kvm_arch_hypercall_exit); + EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_mmio); diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index c8ae9e64a444..4b5d877b22a6 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -19,7 +19,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sfs.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ - hygon/ring-buffer.o + hygon/ring-buffer.o \ + hygon/vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o diff --git a/arch/x86/kvm/psp.c b/drivers/crypto/ccp/hygon/vpsp.c similarity index 90% rename from arch/x86/kvm/psp.c rename to drivers/crypto/ccp/hygon/vpsp.c index 9ed9102674c1..13208fe2c4b3 100644 --- a/arch/x86/kvm/psp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -184,7 +184,7 @@ static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) * newly allocated hva(host virtual address) and updates the mapping * relationship in the parent memory */ -static int guest_multiple_level_gpa_replace(struct kvm *kvm, +static int guest_multiple_level_gpa_replace(struct kvm_vpsp *vpsp, struct map_tbl *tbl, struct gpa2hva_tbls *g2h) { int ret = 0; @@ -199,7 +199,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, return -ENOMEM; /* get child gpa from parent gpa */ - if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + if (unlikely(vpsp->read_guest(vpsp->kvm, tbl->parent_pa + tbl->offset, &sub_paddr, sizeof(sub_paddr)))) { pr_err("[%s]: kvm_read_guest for parent gpa failed\n", __func__); @@ -208,7 +208,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, } /* copy child block data from gpa to hva */ - if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + if (unlikely(vpsp->read_guest(vpsp->kvm, sub_paddr, (void *)tbl->hva, tbl->size))) { pr_err("[%s]: kvm_read_guest for sub_data failed\n", __func__); @@ -248,7 +248,7 @@ static int guest_multiple_level_gpa_replace(struct kvm *kvm, * address) back to the memory corresponding to the gpa, and restores * the mapping relationship in the original parent memory */ -static int guest_multiple_level_gpa_restore(struct kvm *kvm, +static int guest_multiple_level_gpa_restore(struct kvm_vpsp *vpsp, struct map_tbl *tbl, struct gpa2hva_tbls *g2h) { int ret = 0; @@ -265,7 +265,7 @@ static int guest_multiple_level_gpa_restore(struct kvm *kvm, } /* copy child block data from hva to gpa */ - if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + if (unlikely(vpsp->write_guest(vpsp->kvm, sub_gpa, (void *)tbl->hva, tbl->size))) { pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", __func__); @@ -299,7 +299,7 @@ static int guest_multiple_level_gpa_restore(struct kvm *kvm, * executes upper-layer abstract interfaces, including replacing and * restoring two sub-processing functions */ -static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, +static int guest_addr_map_table_op(struct kvm_vpsp *vpsp, struct gpa2hva_tbls *g2h, struct addr_map_tbls *map_tbls, int op) { int ret = 0; @@ -320,7 +320,7 @@ static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, } /* restore new pa of kva with the gpa from guest */ - if (unlikely(guest_multiple_level_gpa_restore(kvm, + if (unlikely(guest_multiple_level_gpa_restore(vpsp, &map_tbls->tbl[i], g2h))) { pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", __func__); @@ -351,7 +351,7 @@ static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, } /* replace the gpa from guest with the new pa of kva */ - if (unlikely(guest_multiple_level_gpa_replace(kvm, + if (unlikely(guest_multiple_level_gpa_replace(vpsp, &map_tbls->tbl[i], g2h))) { pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", __func__); @@ -389,7 +389,7 @@ static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls * information in the command buffer, the processed data will be * used to interact with the psp device */ -static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, +static int kvm_pv_psp_cmd_pre_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; @@ -401,7 +401,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, struct gpa2hva_tbls *g2h = NULL; uint32_t g2h_tbl_size; - if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, &psp_head, sizeof(struct psp_cmdresp_head)))) return -EFAULT; @@ -410,14 +410,14 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, if (!data) return -ENOMEM; - if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + if (unlikely(vpsp->read_guest(vpsp->kvm, data_gpa, data, data_size))) { ret = -EFAULT; goto end; } if (table_gpa) { /* parse address map table from guest */ - if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, &map_head, sizeof(struct addr_map_tbls)))) { pr_err("[%s]: kvm_read_guest for map_head failed\n", __func__); @@ -433,7 +433,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, goto end; } - if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + if (unlikely(vpsp->read_guest(vpsp->kvm, table_gpa, map_tbls, map_tbl_size))) { pr_err("[%s]: kvm_read_guest for map_tbls failed\n", __func__); @@ -459,7 +459,7 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, goto end; } - if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + if (guest_addr_map_table_op(vpsp, g2h, map_tbls, 0)) { pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", __func__); ret = -EFAULT; @@ -481,13 +481,13 @@ static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, * pointer of the mapping table when the command has finished * interacting with the psp device */ -static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, +static int kvm_pv_psp_cmd_post_op(struct kvm_vpsp *vpsp, gpa_t data_gpa, struct vpsp_hbuf_wrapper *hbuf) { int ret = 0; if (hbuf->map_tbls) { - if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + if (guest_addr_map_table_op(vpsp, hbuf->g2h_tbls, hbuf->map_tbls, 1)) { pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", __func__); @@ -497,7 +497,7 @@ static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, } /* restore cmdresp's buffer from context */ - if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + if (unlikely(vpsp->write_guest(vpsp->kvm, data_gpa, hbuf->data, hbuf->data_size))) { pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", __func__); @@ -523,7 +523,7 @@ static int cmd_type_is_tkm(int cmd) /* * The primary implementation interface of virtual PSP in kernel mode */ -int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, gpa_t table_gpa) { int ret = 0; @@ -537,21 +537,21 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, // only tkm cmd need vid if (cmd_type_is_tkm(vcmd->cmd_id)) { // check the permission to use the default vid when no vid is set - ret = vpsp_get_vid(&vid, kvm->userspace_pid); + ret = vpsp_get_vid(&vid, vpsp->kvm->userspace_pid); if (ret && !vpsp_get_default_vid_permission()) { pr_err("[%s]: not allowed tkm command without vid\n", __func__); return -EFAULT; } } - if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + if (unlikely(vpsp->read_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)))) return -EFAULT; switch (psp_ret.status) { case VPSP_INIT: /* multilevel pointer replace*/ - ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + ret = kvm_pv_psp_cmd_pre_op(vpsp, data_gpa, table_gpa, &hbuf); if (unlikely(ret)) { psp_ret.status = VPSP_FINISH; pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", @@ -579,7 +579,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, case VPSP_FINISH: /* restore multilevel pointer data */ - ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &hbuf); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", __func__); @@ -613,7 +613,7 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, case VPSP_FINISH: /* restore multilevel pointer data */ - ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + ret = kvm_pv_psp_cmd_post_op(vpsp, data_gpa, &g_hbuf_wrap[prio][index]); if (unlikely(ret)) { pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", @@ -636,6 +636,6 @@ int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, } end: /* return psp_ret to guest */ - kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + vpsp->write_guest(vpsp->kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); return ret; -} +} EXPORT_SYMBOL_GPL(kvm_pv_psp_op); diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 526d71f27401..41308eed5a27 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -12,6 +12,7 @@ #include #include +#include /*****************************************************************************/ /***************************** CSV interface *********************************/ @@ -345,6 +346,12 @@ struct vpsp_ret { u32 status : 2; }; +struct kvm_vpsp { + struct kvm *kvm; + int (*write_guest)(struct kvm *kvm, gpa_t gpa, const void *data, unsigned long len); + int (*read_guest)(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); +}; + #define PSP_VID_MASK 0xff #define PSP_VID_SHIFT 56 #define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) @@ -376,6 +383,9 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) int vpsp_get_vid(uint32_t *vid, pid_t pid); int vpsp_get_default_vid_permission(void); + +int kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ static inline int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) { return -ENODEV; } @@ -403,6 +413,10 @@ vpsp_get_vid(uint32_t *vid, pid_t pid) { return -ENODEV; } static inline int vpsp_get_default_vid_permission(void) { return -ENODEV; } + +static inline int +kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index f3d02cb00daa..cb77828e04f3 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -13,6 +13,7 @@ #define __PSP_SEV_H__ #include +#include #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ From 356b0cff93ea0daa738ba2638b0f0f987ec972bd Mon Sep 17 00:00:00 2001 From: niuyongwen Date: Fri, 2 Aug 2024 11:31:33 +0800 Subject: [PATCH 95/99] drivers/crypto/ccp: memmove is used instead of memcpy in overlapped memmory for tkm hygon inclusion category: bugfix --------------------------- When deleting a VID, `memcpy` is used to move data within the array and overwrite the deleted VID entry. However, `memcpy` does not handle overlapping memory regions correctly within the array. Therefore, `memmove` should be used instead. Signed-off-by: niuyongwen Link: https://github.com/deepin-community/kernel/pull/386 (cherry picked from commit a67f9aa4254b4e2f055aa15f94db4a0fe469edb8) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/psp-dev.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 56bba2c73b09..1bb7b541245c 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -265,7 +265,7 @@ static int vpsp_del_vid(void) --g_vpsp_vid_num; pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); - memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + memmove(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); ret = 0; goto end; From 3a8997681bd3bbd79ef524f57c42aac76014ecaf Mon Sep 17 00:00:00 2001 From: Wentao Guan Date: Wed, 24 Dec 2025 16:31:52 +0800 Subject: [PATCH 96/99] drivers/crypto/ccp: fix hygon ccp build for 6.18 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Log: [fix psp-dev.c drivers/crypto/ccp/hygon/psp-dev.c:25:10: error: no previous prototype for ‘atomic64_exchange’ [-Werror=missing-prototypes] 25 | uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) | ^~~~~~~~~~~~~~~~~ drivers/crypto/ccp/hygon/psp-dev.c:30:5: error: no previous prototype for ‘psp_mutex_init’ [-Werror=missing-prototypes] 30 | int psp_mutex_init(struct psp_mutex *mutex) | ^~~~~~~~~~~~~~ drivers/crypto/ccp/hygon/vpsp.c:108:21: error: no previous prototype for ‘map_tbl_dump’ [-Werror=missing-prototypes] 108 | void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) | ^~~~~~~~~~~~ drivers/crypto/ccp/hygon/vpsp.c:121:21: error: no previous prototype for ‘g2h_tbl_dump’ [-Werror=missing-prototypes] 121 | void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) | ^~~~~~~~~~~~ cc1: all warnings being treated as errors make[5]: *** [scripts/Makefile.build:287:drivers/crypto/ccp/hygon/vpsp.o] 错误 1 make[5]: *** 正在等待未完成的任务.... drivers/crypto/ccp/hygon/psp-dev.c:188:6: error: no previous prototype for ‘vpsp_set_default_vid_permission’ [-Werror=missing-prototypes] 188 | void vpsp_set_default_vid_permission(uint32_t is_allow) | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ drivers/crypto/ccp/hygon/psp-dev.c:495:5: error: no previous prototype for ‘__vpsp_do_cmd_locked’ [-Werror=missing-prototypes] 495 | int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) | ^~~~~~~~~~~~~~~~~~~~ cc1: all warnings being treated as errors ] Link: https://github.com/deepin-community/kernel/pull/386 Signed-off-by: Wentao Guan --- drivers/crypto/ccp/hygon/csv-dev.c | 1 - drivers/crypto/ccp/hygon/psp-dev.c | 6 +++--- drivers/crypto/ccp/hygon/psp-dev.h | 1 + drivers/crypto/ccp/hygon/vpsp.c | 4 ++-- drivers/watchdog/stXISsHW | 0 5 files changed, 6 insertions(+), 6 deletions(-) create mode 100644 drivers/watchdog/stXISsHW diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index 3148810f0518..b2e19e2ade33 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -949,7 +949,6 @@ static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, return rb_supported; } -int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); /* * Try to obtain the result again by the command index, this * interface is used in ringbuffer mode diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 1bb7b541245c..57dd68298647 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -50,12 +50,12 @@ struct vpsp_dev_ctrl { } data; }; -uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +static uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) { return xchg(dst, val); } -int psp_mutex_init(struct psp_mutex *mutex) +static int psp_mutex_init(struct psp_mutex *mutex) { if (!mutex) return -1; @@ -185,7 +185,7 @@ static void swap_vid_entries(void *a, void *b, int size) * in the absence of a valid 'vid' setting. */ uint32_t allow_default_vid = 1; -void vpsp_set_default_vid_permission(uint32_t is_allow) +static void vpsp_set_default_vid_permission(uint32_t is_allow) { allow_default_vid = is_allow; } diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index f60a112881be..9352f3e5750e 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -67,6 +67,7 @@ int hygon_psp_additional_setup(struct sp_device *sp); void hygon_psp_exit(struct kref *ref); int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); int psp_mutex_unlock(struct psp_mutex *mutex); +int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret); int fixup_hygon_psp_caps(struct psp_device *psp); int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); diff --git a/drivers/crypto/ccp/hygon/vpsp.c b/drivers/crypto/ccp/hygon/vpsp.c index 13208fe2c4b3..76cc9c05fb47 100644 --- a/drivers/crypto/ccp/hygon/vpsp.c +++ b/drivers/crypto/ccp/hygon/vpsp.c @@ -105,7 +105,7 @@ struct vpsp_hbuf_wrapper { struct vpsp_hbuf_wrapper g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; -void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) +static void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) { int i; @@ -118,7 +118,7 @@ void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) pr_info("\n"); } -void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) +static void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) { int i; diff --git a/drivers/watchdog/stXISsHW b/drivers/watchdog/stXISsHW new file mode 100644 index 000000000000..e69de29bb2d1 From eafa7bea5d11a4edbb41e0cbfa9b91edd7136b32 Mon Sep 17 00:00:00 2001 From: xiongmengbiao Date: Sat, 14 Sep 2024 14:08:34 +0800 Subject: [PATCH 97/99] crypto: ccp: fix the sev_do_cmd panic on non-Hygon platforms hygon inclusion category: bugfix --------------------------- The Hygon platform indirectly accesses the `sev_cmd_mutex` variable through `hygon_psp_hooks`. However, on non-Hygon platforms (such as AMD), `hygon_psp_hooks` is not initialized, so `sev_cmd_mutex` should be accessed directly. Signed-off-by: xiongmengbiao Link: https://github.com/deepin-community/kernel/pull/425 (cherry picked from commit b338d3a9add34a6f4c8560bd72f5e7f9a4ed5103) Signed-off-by: Wentao Guan Conflicts: drivers/crypto/ccp/sev-dev.c --- drivers/crypto/ccp/hygon/csv-dev.c | 34 +++++++++++++++++++++++------- drivers/crypto/ccp/hygon/psp-dev.c | 24 ++++++++++++++++----- drivers/crypto/ccp/psp-dev.c | 17 ++++++++++----- 3 files changed, 57 insertions(+), 18 deletions(-) diff --git a/drivers/crypto/ccp/hygon/csv-dev.c b/drivers/crypto/ccp/hygon/csv-dev.c index b2e19e2ade33..8e2a25a44832 100644 --- a/drivers/crypto/ccp/hygon/csv-dev.c +++ b/drivers/crypto/ccp/hygon/csv-dev.c @@ -210,7 +210,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (input.cmd > CSV_MAX) return -EINVAL; - if (is_vendor_hygon() && mutex_enabled) { + if (mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -238,7 +238,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) * Release the mutex before calling the native ioctl function * because it will acquires the mutex. */ - if (is_vendor_hygon() && mutex_enabled) + if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -248,7 +248,7 @@ static long csv_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; - if (is_vendor_hygon() && mutex_enabled) + if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -411,7 +411,7 @@ static int csv_do_ringbuf_cmds(int *psp_ret) if (!hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; - if (is_vendor_hygon() && mutex_enabled) { + if (mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -430,7 +430,7 @@ static int csv_do_ringbuf_cmds(int *psp_ret) csv_comm_mode = CSV_COMM_MAILBOX_ON; cmd_unlock: - if (is_vendor_hygon() && mutex_enabled) + if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -720,7 +720,10 @@ static int vpsp_psp_mutex_trylock(void) { int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_vendor_hygon() && mutex_enabled) + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) return psp_mutex_trylock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else return mutex_trylock(hygon_psp_hooks.sev_cmd_mutex); @@ -730,7 +733,10 @@ static int vpsp_psp_mutex_unlock(void) { int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_vendor_hygon() && mutex_enabled) + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -746,6 +752,9 @@ static int __vpsp_ring_buffer_enter_locked(int *error) struct csv_ringbuffer_queue *hi_queue; struct sev_device *sev = psp_master->sev_data; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) return -EEXIST; @@ -782,7 +791,7 @@ static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) unsigned int rb_ctl; struct sev_device *sev; - if (!psp) + if (!psp || !hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; if (*hygon_psp_hooks.psp_dead) @@ -845,6 +854,9 @@ static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) struct sev_user_data_status data; int rc; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + rc = __vpsp_ring_buffer_enter_locked(psp_ret); if (rc) goto end; @@ -959,6 +971,9 @@ int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, int ret = 0; struct csv_cmdptr_entry cmd = {0}; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + /* Get the retult directly if the command has been executed */ if (index >= 0 && vpsp_get_cmd_status(prio, index) != VPSP_CMD_STATUS_RUNNING) { @@ -1024,6 +1039,9 @@ int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) int index = -1; uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + /* ringbuffer mode check and parse command prio*/ rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, (struct vpsp_cmd *)&cmd); diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 57dd68298647..5de8b9991644 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -312,6 +312,9 @@ static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) struct vpsp_dev_ctrl vpsp_ctrl_op; int ret = -EFAULT; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { pr_info("%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); return -EINVAL; @@ -372,6 +375,9 @@ int hygon_psp_additional_setup(struct sp_device *sp) struct device *dev = sp->dev; int ret = 0; + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + if (!psp_misc) { struct miscdevice *misc; @@ -500,7 +506,7 @@ int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) unsigned int phys_lsb, phys_msb; unsigned int reg, ret = 0; - if (!psp || !psp->sev_data) + if (!psp || !psp->sev_data || !hygon_psp_hooks.sev_dev_hooks_installed) return -ENODEV; if (*hygon_psp_hooks.psp_dead) @@ -564,7 +570,10 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) int rc; int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); - if (is_vendor_hygon() && mutex_enabled) { + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) { if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) { return -EBUSY; @@ -575,7 +584,7 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); - if (is_vendor_hygon() && mutex_enabled) + if (mutex_enabled) psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); @@ -586,7 +595,12 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) int psp_do_cmd(int cmd, void *data, int *psp_ret) { int rc; - if (is_vendor_hygon()) { + int mutex_enabled = READ_ONCE(hygon_psp_hooks.psp_mutex_enabled); + + if (!hygon_psp_hooks.sev_dev_hooks_installed) + return -ENODEV; + + if (mutex_enabled) { if (psp_mutex_lock_timeout(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex, PSP_MUTEX_TIMEOUT) != 1) return -EBUSY; @@ -595,7 +609,7 @@ int psp_do_cmd(int cmd, void *data, int *psp_ret) } rc = __psp_do_cmd_locked(cmd, data, psp_ret); - if (is_vendor_hygon()) + if (mutex_enabled) psp_mutex_unlock(&hygon_psp_hooks.psp_misc->data_pg_aligned->mb_mutex); else mutex_unlock(hygon_psp_hooks.sev_cmd_mutex); diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index cbb1a482fba4..8ea19fc736cb 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -295,11 +295,6 @@ int psp_dev_init(struct sp_device *sp) /* Request an irq */ if (is_vendor_hygon()) { - ret = hygon_psp_additional_setup(sp); - if (ret) { - dev_err(dev, "psp: unable to do additional setup\n"); - goto e_err; - } ret = sp_request_hygon_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); } else { ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); @@ -317,6 +312,18 @@ int psp_dev_init(struct sp_device *sp) if (ret) goto e_irq; + /** + * hygon_psp_additional_setup() needs to wait for + * sev_dev_install_hooks() to complete before it can be called. + */ + if (is_vendor_hygon()) { + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_irq; + } + } + /* Enable interrupt */ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg); From 0540dfedc26ce2d1fc3a363c88c4c34b0428941e Mon Sep 17 00:00:00 2001 From: baizhaowei Date: Fri, 22 Nov 2024 15:18:30 +0800 Subject: [PATCH 98/99] crypto: ccp: Fix S4 kernel panic issue on HYGON psp hygon inclusion category: bugfix CVE: NA --------------------------- When running the kernel which is compiled with the config CONFIG_CRYPTO_DEV_CCP_DD=y, the S4 resume process will change the TMR region, but the CSV firmware still keeps TMR region information as before. This will lead to kernel PANIC when the system resumed from S4. To address this issue, we provide PM callbacks, the callbacks will be called during S4 and resume from S4. Signed-off-by: baizhaowei Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/501 (cherry picked from commit b81ee1c81409fcc8f4d59e860c199d259baf3bd7) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Makefile | 3 +- drivers/crypto/ccp/hygon/psp-dev.c | 37 +++++++++++++++ drivers/crypto/ccp/hygon/psp-dev.h | 7 +++ drivers/crypto/ccp/hygon/sp-dev.c | 75 ++++++++++++++++++++++++++++++ drivers/crypto/ccp/hygon/sp-dev.h | 14 ++++++ drivers/crypto/ccp/hygon/sp-pci.c | 60 ++++++++++++++++++++++++ drivers/crypto/ccp/sp-pci.c | 5 ++ 7 files changed, 200 insertions(+), 1 deletion(-) create mode 100644 drivers/crypto/ccp/hygon/sp-dev.c diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 4b5d877b22a6..30c07dc5ab20 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -20,7 +20,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ hygon/ring-buffer.o \ - hygon/vpsp.o + hygon/vpsp.o \ + hygon/sp-dev.o ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o diff --git a/drivers/crypto/ccp/hygon/psp-dev.c b/drivers/crypto/ccp/hygon/psp-dev.c index 5de8b9991644..6fba5f7c2f17 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.c +++ b/drivers/crypto/ccp/hygon/psp-dev.c @@ -736,3 +736,40 @@ int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, } #endif /* CONFIG_HYGON_PSP2CPU_CMD */ + +#ifdef CONFIG_PM_SLEEP + +void hygon_psp_dev_freeze(struct sp_device *sp) +{ + struct psp_device *psp; + + if (!psp_master) + return; + + psp = sp->psp_data; + if (psp == psp_master) + psp_pci_exit(); +} + +void hygon_psp_dev_thaw(struct sp_device *sp) +{ + struct psp_device *psp; + + if (!psp_master) + return; + + psp = sp->psp_data; + + /* re-enable interrupt */ + iowrite32(-1, psp->io_regs + psp->vdata->inten_reg); + + if (psp == psp_master) + psp_pci_init(); +} + +void hygon_psp_dev_restore(struct sp_device *sp) +{ + hygon_psp_dev_thaw(sp); +} + +#endif diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 9352f3e5750e..9ac8e65edacc 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -13,6 +13,7 @@ #include #include #include +#include #include "sp-dev.h" @@ -72,4 +73,10 @@ int fixup_hygon_psp_caps(struct psp_device *psp); int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); +#ifdef CONFIG_PM_SLEEP +void hygon_psp_dev_freeze(struct sp_device *sp); +void hygon_psp_dev_thaw(struct sp_device *sp); +void hygon_psp_dev_restore(struct sp_device *sp); +#endif + #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-dev.c b/drivers/crypto/ccp/hygon/sp-dev.c new file mode 100644 index 000000000000..727ffd16421e --- /dev/null +++ b/drivers/crypto/ccp/hygon/sp-dev.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Zhaowei Bai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "sp-dev.h" + +#ifdef CONFIG_PM_SLEEP + +int hygon_sp_suspend(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_suspend(sp); + + return 0; +} + +int hygon_sp_resume(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_resume(sp); + + return 0; +} + +int hygon_sp_freeze(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_suspend(sp); + + if (sp->dev_vdata->psp_vdata) + hygon_psp_dev_freeze(sp); + + return 0; +} + +int hygon_sp_thaw(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_resume(sp); + + if (sp->dev_vdata->psp_vdata) + hygon_psp_dev_thaw(sp); + + return 0; +} + +int hygon_sp_poweroff(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_suspend(sp); + + return 0; +} + +int hygon_sp_restore(struct sp_device *sp) +{ + if (sp->dev_vdata->ccp_vdata) + ccp_dev_resume(sp); + + if (sp->dev_vdata->psp_vdata) + hygon_psp_dev_restore(sp); + + return 0; +} + +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/crypto/ccp/hygon/sp-dev.h b/drivers/crypto/ccp/hygon/sp-dev.h index e1996fc3b7c6..b11c2f225e10 100644 --- a/drivers/crypto/ccp/hygon/sp-dev.h +++ b/drivers/crypto/ccp/hygon/sp-dev.h @@ -15,6 +15,7 @@ #include "../ccp-dev.h" #include "../sp-dev.h" +#include "psp-dev.h" #ifdef CONFIG_X86_64 static inline bool is_vendor_hygon(void) @@ -27,4 +28,17 @@ static inline bool is_vendor_hygon(void) { return false; } extern const struct sp_dev_vdata hygon_dev_vdata[]; +#ifdef CONFIG_PM_SLEEP + +int hygon_sp_suspend(struct sp_device *sp); +int hygon_sp_resume(struct sp_device *sp); +int hygon_sp_freeze(struct sp_device *sp); +int hygon_sp_thaw(struct sp_device *sp); +int hygon_sp_poweroff(struct sp_device *sp); +int hygon_sp_restore(struct sp_device *sp); + +void hygon_set_pm_cb(struct pci_driver *drv); + +#endif + #endif /* __CCP_HYGON_SP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-pci.c b/drivers/crypto/ccp/hygon/sp-pci.c index 691127a0007b..88b42522bc23 100644 --- a/drivers/crypto/ccp/hygon/sp-pci.c +++ b/drivers/crypto/ccp/hygon/sp-pci.c @@ -72,3 +72,63 @@ const struct sp_dev_vdata hygon_dev_vdata[] = { #endif }, }; + +#ifdef CONFIG_PM_SLEEP + +static int hygon_sp_pci_suspend(struct device *dev) +{ + struct sp_device *sp = dev_get_drvdata(dev); + + return hygon_sp_suspend(sp); +} + +static int hygon_sp_pci_resume(struct device *dev) +{ + struct sp_device *sp = dev_get_drvdata(dev); + + return hygon_sp_resume(sp); +} + +static int hygon_sp_pci_freeze(struct device *dev) +{ + struct sp_device *sp = dev_get_drvdata(dev); + + return hygon_sp_freeze(sp); +} + +static int hygon_sp_pci_thaw(struct device *dev) +{ + struct sp_device *sp = dev_get_drvdata(dev); + + return hygon_sp_thaw(sp); +} + +static int hygon_sp_pci_poweroff(struct device *dev) +{ + struct sp_device *sp = dev_get_drvdata(dev); + + return hygon_sp_poweroff(sp); +} + +static int hygon_sp_pci_restore(struct device *dev) +{ + struct sp_device *sp = dev_get_drvdata(dev); + + return hygon_sp_restore(sp); +} + +static const struct dev_pm_ops hygon_pm_ops = { + .suspend = hygon_sp_pci_suspend, + .resume = hygon_sp_pci_resume, + .freeze = hygon_sp_pci_freeze, + .thaw = hygon_sp_pci_thaw, + .poweroff = hygon_sp_pci_poweroff, + .restore = hygon_sp_pci_restore, +}; + +void hygon_set_pm_cb(struct pci_driver *drv) +{ + drv->driver.pm = &hygon_pm_ops; +} + +#endif /* CONFIG_PM_SLEEP */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index a443d091caba..3723afa4189f 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -565,6 +565,11 @@ static struct pci_driver sp_pci_driver = { int sp_pci_init(void) { +#ifdef CONFIG_PM_SLEEP + /* Set pm driver callbacks for Hygon secure processor */ + if (is_vendor_hygon()) + hygon_set_pm_cb(&sp_pci_driver); +#endif return pci_register_driver(&sp_pci_driver); } From be869110d2ed410da777ad851c430d347aaa2b22 Mon Sep 17 00:00:00 2001 From: hanliyang Date: Wed, 4 Dec 2024 11:58:09 +0800 Subject: [PATCH 99/99] crypto: ccp: Fix some compile errors on Hygon interfaces hygon inclusion category: bugfix CVE: NA --------------------------- The commit 973eb41361b6 ("crypto: ccp: Fix S4 kernel panic issue on HYGON psp") introduced a HYGON-specific PM interface to resolve S4 issues. This commit assumed that the file .../hygon/sp-dev.c depended on CONFIG_CRYPTO_DEV_SP_PSP, which was incorrect because sp-dev.c serves as an abstraction for all secure processors. Consequently, we encountered compile errors on the aarch64 platform. To address these issues, we removed the dependency of .../hygon/sp-dev.c on CONFIG_CRYPTO_DEV_SP_PSP. Additionally, HYGON_PSP2CPU_CMD is depends on CONFIG_CRYPTO_DEV_SP_PSP, so we moved the related function declarations to resolve some compilation issues. Fixes: 973eb41361b6 ("crypto: ccp: Fix S4 kernel panic issue on HYGON psp") Fixes: a4a33c4bf37a ("crypto: ccp: Add another mailbox interrupt support for PSP sending command to X86") Signed-off-by: hanliyang Link: https://github.com/deepin-community/kernel/pull/513 (cherry picked from commit 334a3b2e2137dfffc5c98c2f1b1dd9463f0a4ebc) Signed-off-by: Wentao Guan --- drivers/crypto/ccp/Makefile | 6 +++--- drivers/crypto/ccp/hygon/psp-dev.h | 7 ------- drivers/crypto/ccp/hygon/sp-dev.c | 6 ++++++ drivers/crypto/ccp/hygon/sp-dev.h | 10 ++++++++-- include/linux/psp-hygon.h | 27 +++++++++++++-------------- 5 files changed, 30 insertions(+), 26 deletions(-) diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 30c07dc5ab20..a4a3278f8661 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o -ccp-objs := sp-dev.o sp-platform.o +ccp-objs := sp-dev.o sp-platform.o \ + hygon/sp-dev.o ccp-$(CONFIG_CRYPTO_DEV_SP_CCP) += ccp-dev.o \ ccp-ops.o \ ccp-dev-v3.o \ @@ -20,8 +21,7 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ hygon/psp-dev.o \ hygon/csv-dev.o \ hygon/ring-buffer.o \ - hygon/vpsp.o \ - hygon/sp-dev.o + hygon/vpsp.o ccp-$(CONFIG_TDM_DEV_HYGON) += hygon/tdm-dev.o diff --git a/drivers/crypto/ccp/hygon/psp-dev.h b/drivers/crypto/ccp/hygon/psp-dev.h index 9ac8e65edacc..9352f3e5750e 100644 --- a/drivers/crypto/ccp/hygon/psp-dev.h +++ b/drivers/crypto/ccp/hygon/psp-dev.h @@ -13,7 +13,6 @@ #include #include #include -#include #include "sp-dev.h" @@ -73,10 +72,4 @@ int fixup_hygon_psp_caps(struct psp_device *psp); int sp_request_hygon_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data); -#ifdef CONFIG_PM_SLEEP -void hygon_psp_dev_freeze(struct sp_device *sp); -void hygon_psp_dev_thaw(struct sp_device *sp); -void hygon_psp_dev_restore(struct sp_device *sp); -#endif - #endif /* __CCP_HYGON_PSP_DEV_H__ */ diff --git a/drivers/crypto/ccp/hygon/sp-dev.c b/drivers/crypto/ccp/hygon/sp-dev.c index 727ffd16421e..86d666b31f37 100644 --- a/drivers/crypto/ccp/hygon/sp-dev.c +++ b/drivers/crypto/ccp/hygon/sp-dev.c @@ -36,8 +36,10 @@ int hygon_sp_freeze(struct sp_device *sp) if (sp->dev_vdata->ccp_vdata) ccp_dev_suspend(sp); +#ifdef CONFIG_CRYPTO_DEV_SP_PSP if (sp->dev_vdata->psp_vdata) hygon_psp_dev_freeze(sp); +#endif return 0; } @@ -47,8 +49,10 @@ int hygon_sp_thaw(struct sp_device *sp) if (sp->dev_vdata->ccp_vdata) ccp_dev_resume(sp); +#ifdef CONFIG_CRYPTO_DEV_SP_PSP if (sp->dev_vdata->psp_vdata) hygon_psp_dev_thaw(sp); +#endif return 0; } @@ -66,8 +70,10 @@ int hygon_sp_restore(struct sp_device *sp) if (sp->dev_vdata->ccp_vdata) ccp_dev_resume(sp); +#ifdef CONFIG_CRYPTO_DEV_SP_PSP if (sp->dev_vdata->psp_vdata) hygon_psp_dev_restore(sp); +#endif return 0; } diff --git a/drivers/crypto/ccp/hygon/sp-dev.h b/drivers/crypto/ccp/hygon/sp-dev.h index b11c2f225e10..973fdf651878 100644 --- a/drivers/crypto/ccp/hygon/sp-dev.h +++ b/drivers/crypto/ccp/hygon/sp-dev.h @@ -12,10 +12,10 @@ #include #include +#include #include "../ccp-dev.h" #include "../sp-dev.h" -#include "psp-dev.h" #ifdef CONFIG_X86_64 static inline bool is_vendor_hygon(void) @@ -39,6 +39,12 @@ int hygon_sp_restore(struct sp_device *sp); void hygon_set_pm_cb(struct pci_driver *drv); -#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP +void hygon_psp_dev_freeze(struct sp_device *sp); +void hygon_psp_dev_thaw(struct sp_device *sp); +void hygon_psp_dev_restore(struct sp_device *sp); +#endif /* CONFIG_CRYPTO_DEV_SP_PSP */ + +#endif /* CONFIG_PM_SLEEP */ #endif /* __CCP_HYGON_SP_DEV_H__ */ diff --git a/include/linux/psp-hygon.h b/include/linux/psp-hygon.h index 41308eed5a27..7e186ccffbd8 100644 --- a/include/linux/psp-hygon.h +++ b/include/linux/psp-hygon.h @@ -364,6 +364,19 @@ int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret); int psp_do_cmd(int cmd, void *data, int *psp_ret); +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +#ifdef CONFIG_HYGON_PSP2CPU_CMD +int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); +int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); +#else /* !CONFIG_HYGON_PSP2CPU_CMD */ +static int __maybe_unused +psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } + +static int __maybe_unused +psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } +#endif /* CONFIG_HYGON_PSP2CPU_CMD */ + int csv_ring_buffer_queue_init(void); int csv_ring_buffer_queue_free(void); int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); @@ -419,18 +432,4 @@ kvm_pv_psp_op(struct kvm_vpsp *vpsp, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, gpa_t table_gpa) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ -typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); - -#ifdef CONFIG_HYGON_PSP2CPU_CMD - -int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); -int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier); - -#else /* !CONFIG_HYGON_PSP2CPU_CMD */ - -int psp_register_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } -int psp_unregister_cmd_notifier(uint32_t cmd_id, p2c_notifier_t notifier) { return -ENODEV; } - -#endif /* CONFIG_HYGON_PSP2CPU_CMD */ - #endif /* __PSP_HYGON_H__ */