linux_5.10: weekly rls 2024.07.20

-142736, Sync spacc driver code.
-2303c6, add suspend to ram featrue.
-d3bff8, support pdm mic using AEC.
-203610, support new spinand flash:HYF1GQ4UDACAE.

Change-Id: Iec21bccaee43c4a71213b42b88f41a45527fd8a5
This commit is contained in:
sophgo-forum-service
2024-07-20 21:41:42 +08:00
committed by carbon
parent 2330e10fa0
commit 50fd0630f7
28 changed files with 1650 additions and 27 deletions

View File

@ -41,6 +41,7 @@ config RISCV
select CLINT_TIMER if !MMU
select COMMON_CLK
select COMPAT_BINFMT_ELF if BINFMT_ELF && COMPAT
select CPU_PM if SUSPEND
select EDAC_SUPPORT
select DMA_DIRECT_REMAP
select GENERIC_ALLOCATOR
@ -517,6 +518,11 @@ config BUILTIN_DTB
depends on RISCV_M_MODE
depends on OF
config ARCH_SUSPEND_POSSIBLE
def_bool y
help
Enable riscv deep sleep to RAM
menu "Power management options"
source "kernel/power/Kconfig"

View File

@ -66,4 +66,23 @@
#error "Unexpected __SIZEOF_SHORT__"
#endif
#ifdef __ASSEMBLY__
/* Common assembly source macros */
#ifdef CONFIG_XIP_KERNEL
//TODO
.macro XIP_FIXUP_OFFSET reg
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
.endm
#else
.macro XIP_FIXUP_OFFSET reg
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
.endm
#endif /* CONFIG_XIP_KERNEL */
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */

View File

@ -27,6 +27,7 @@ enum sbi_ext_id {
SBI_EXT_IPI = 0x735049,
SBI_EXT_RFENCE = 0x52464E43,
SBI_EXT_HSM = 0x48534D,
SBI_EXT_SUSP = 0x53555350,
};
enum sbi_ext_base_fid {
@ -73,6 +74,14 @@ enum sbi_hsm_hart_status {
SBI_HSM_HART_STATUS_STOP_PENDING,
};
enum sbi_ext_susp_fid {
SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
};
enum sbi_ext_susp_sleep_type {
SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
};
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f

View File

@ -0,0 +1,40 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#ifndef _ASM_RISCV_SUSPEND_H
#define _ASM_RISCV_SUSPEND_H
#include <asm/ptrace.h>
struct suspend_context {
/* Saved and restored by low-level functions */
struct pt_regs regs;
/* Saved and restored by high-level functions */
unsigned long scratch;
unsigned long tvec;
unsigned long ie;
#ifdef CONFIG_MMU
unsigned long satp;
#endif
};
/* Low-level CPU suspend entry function */
int __cpu_suspend_enter(struct suspend_context *context);
/* High-level CPU suspend which will save context and call finish() */
int cpu_suspend(unsigned long arg,
int (*finish)(unsigned long arg,
unsigned long entry,
unsigned long context));
/* Low-level CPU resume entry function */
int __cpu_resume_enter(unsigned long hartid, unsigned long context);
/* Used to save and restore the CSRs */
void suspend_save_csrs(struct suspend_context *context);
void suspend_restore_csrs(struct suspend_context *context);
#endif

View File

@ -44,6 +44,8 @@ obj-$(CONFIG_SMP) += cpu_ops_spinwait.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
obj-$(CONFIG_CPU_PM) += suspend_entry.o suspend.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o

View File

@ -10,6 +10,7 @@
#include <linux/sched.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/suspend.h>
void asm_offsets(void)
{
@ -145,6 +146,8 @@ void asm_offsets(void)
OFFSET(PT_BADADDR, pt_regs, badaddr);
OFFSET(PT_CAUSE, pt_regs, cause);
OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs);
/*
* THREAD_{F,X}* might be larger than a S-type offset can handle, but
* these are used in performance-sensitive assembly so we can't resort

View File

@ -61,7 +61,8 @@ pe_head_start:
.align 2
#ifdef CONFIG_MMU
relocate:
.global relocate_enable_mmu
relocate_enable_mmu:
/* Relocate return address */
li a1, PAGE_OFFSET + LOAD_OFFSET
la a2, _start
@ -150,7 +151,7 @@ secondary_start_common:
#ifdef CONFIG_MMU
/* Enable virtual memory and relocate to virtual address */
la a0, swapper_pg_dir
call relocate
call relocate_enable_mmu
#endif
call setup_trap_vector
tail smp_callin
@ -258,7 +259,7 @@ clear_bss_done:
call setup_vm
#ifdef CONFIG_MMU
la a0, early_pg_dir
call relocate
call relocate_enable_mmu
#endif /* CONFIG_MMU */
call setup_trap_vector

View File

@ -0,0 +1,131 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#define pr_fmt(fmt) "suspend: " fmt
#include <linux/ftrace.h>
#include <linux/suspend.h>
#include <asm/csr.h>
#include <asm/sbi.h>
#include <asm/suspend.h>
void suspend_save_csrs(struct suspend_context *context)
{
context->scratch = csr_read(CSR_SCRATCH);
context->tvec = csr_read(CSR_TVEC);
context->ie = csr_read(CSR_IE);
/*
* No need to save/restore IP CSR (i.e. MIP or SIP) because:
*
* 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
* external devices (such as interrupt controller, timer, etc).
* 2. For MMU (S-mode) kernel, the bits in SIP are set by
* M-mode firmware and external devices (such as interrupt
* controller, etc).
*/
#ifdef CONFIG_MMU
context->satp = csr_read(CSR_SATP);
#endif
}
void suspend_restore_csrs(struct suspend_context *context)
{
csr_write(CSR_SCRATCH, context->scratch);
csr_write(CSR_TVEC, context->tvec);
csr_write(CSR_IE, context->ie);
#ifdef CONFIG_MMU
csr_write(CSR_SATP, context->satp);
#endif
}
int cpu_suspend(unsigned long arg,
int (*finish)(unsigned long arg,
unsigned long entry,
unsigned long context))
{
int rc = 0;
struct suspend_context context = { 0 };
/* Finisher should be non-NULL */
if (!finish)
return -EINVAL;
/* Save additional CSRs*/
suspend_save_csrs(&context);
/*
* Function graph tracer state gets incosistent when the kernel
* calls functions that never return (aka finishers) hence disable
* graph tracing during their execution.
*/
pause_graph_tracing();
/* Save context on stack */
if (__cpu_suspend_enter(&context)) {
/* Call the finisher */
rc = finish(arg, __pa_symbol(__cpu_resume_enter),
(ulong)&context);
/*
* Should never reach here, unless the suspend finisher
* fails. Successful cpu_suspend() should return from
* __cpu_resume_entry()
*/
if (!rc)
rc = -EOPNOTSUPP;
}
/* Enable function graph tracer */
unpause_graph_tracing();
/* Restore additional CSRs */
suspend_restore_csrs(&context);
return rc;
}
#ifdef CONFIG_RISCV_SBI
static int sbi_system_suspend(unsigned long sleep_type,
unsigned long resume_addr,
unsigned long opaque)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
sleep_type, resume_addr, opaque, 0, 0, 0);
if (ret.error)
return sbi_err_map_linux_errno(ret.error);
return ret.value;
}
static int sbi_system_suspend_enter(suspend_state_t state)
{
return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
}
static const struct platform_suspend_ops sbi_system_suspend_ops = {
.valid = suspend_valid_only_mem,
.enter = sbi_system_suspend_enter,
};
static int __init sbi_system_suspend_init(void)
{
pr_info("SBI SUSP extension detecting\n");
if (sbi_probe_extension(SBI_EXT_SUSP) > 0) {
pr_info("SBI SUSP extension detected\n");
if (IS_ENABLED(CONFIG_SUSPEND))
suspend_set_ops(&sbi_system_suspend_ops);
}
return 0;
}
arch_initcall(sbi_system_suspend_init);
#endif

View File

@ -0,0 +1,124 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2021 Western Digital Corporation or its affiliates.
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#include <linux/linkage.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/csr.h>
.text
.altmacro
.option norelax
ENTRY(__cpu_suspend_enter)
/* Save registers (except A0 and T0-T6) */
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_S gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_S tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_S s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_S s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_S a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_S a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_S a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_S a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_S a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_S a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_S a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_S s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_S s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_S s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_S s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_S s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_S s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_S s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_S s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_S s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_S s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
/* Save CSRs */
csrr t0, CSR_EPC
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrr t0, CSR_STATUS
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrr t0, CSR_TVAL
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrr t0, CSR_CAUSE
REG_S t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
/* Return non-zero value */
li a0, 1
/* Return to C code */
ret
END(__cpu_suspend_enter)
ENTRY(__cpu_resume_enter)
/* Load the global pointer */
.option push
.option norelax
la gp, __global_pointer$
.option pop
#ifdef CONFIG_MMU
/* Save A0 and A1 */
add t0, a0, zero
add t1, a1, zero
/* Enable MMU */
la a0, swapper_pg_dir
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
/* Restore A0 and A1 */
add a0, t0, zero
add a1, t1, zero
#endif
/* Make A0 point to suspend context */
add a0, a1, zero
/* Restore CSRs */
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
csrw CSR_EPC, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
csrw CSR_STATUS, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
csrw CSR_TVAL, t0
REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
csrw CSR_CAUSE, t0
/* Restore registers (except A0 and T0-T6) */
REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
/* Return zero value */
add a0, zero, zero
/* Return to C code */
ret
END(__cpu_resume_enter)

View File

@ -2673,7 +2673,7 @@ static void cv180x_clk_resume(void)
/* switch clock to xtal */
writel(0xffffffff, clk_data->base + REG_CLK_BYP_0);
writel(0x0000000f, clk_data->base + REG_CLK_BYP_1);
writel(0xffffffff, clk_data->base + REG_CLK_BYP_1);
memcpy_toio(clk_data->base + REG_CLK_EN_START,
clk_data->clken_saved_regs,
@ -2691,9 +2691,9 @@ static void cv180x_clk_resume(void)
clk_data->g2_clkdiv_saved_regs,
REG_CLK_G2_DIV_NUM * 4);
memcpy_toio(clk_data->base + REG_PLL_G6_CSR_START,
clk_data->pll_g6_csr_saved_regs,
REG_PLL_G6_CSR_NUM * 4);
//memcpy_toio(clk_data->base + REG_PLL_G6_CSR_START,
// clk_data->pll_g6_csr_saved_regs,
// REG_PLL_G6_CSR_NUM * 4);
/* wait for pll setting updated */
while (readl(clk_data->base + REG_PLL_G6_STATUS) & 0x7) {

View File

@ -2679,7 +2679,7 @@ static void cv181x_clk_resume(void)
/* switch clock to xtal */
writel(0xffffffff, clk_data->base + REG_CLK_BYP_0);
writel(0x0000000f, clk_data->base + REG_CLK_BYP_1);
writel(0xffffffff, clk_data->base + REG_CLK_BYP_1);
memcpy_toio(clk_data->base + REG_CLK_EN_START,
clk_data->clken_saved_regs,
@ -2697,9 +2697,9 @@ static void cv181x_clk_resume(void)
clk_data->g2_clkdiv_saved_regs,
REG_CLK_G2_DIV_NUM * 4);
memcpy_toio(clk_data->base + REG_PLL_G6_CSR_START,
clk_data->pll_g6_csr_saved_regs,
REG_PLL_G6_CSR_NUM * 4);
// memcpy_toio(clk_data->base + REG_PLL_G6_CSR_START,
// clk_data->pll_g6_csr_saved_regs,
// REG_PLL_G6_CSR_NUM * 4);
/* wait for pll setting updated */
while (readl(clk_data->base + REG_PLL_G6_STATUS) & 0x7) {

View File

@ -901,4 +901,12 @@ config CRYPTO_DEV_SA2UL
used for crypto offload. Select this if you want to use hardware
acceleration for cryptographic algorithms on these devices.
config CRYPTO_DEV_CVITEK_SPACC
tristate "Support for cvitek spacc"
depends on ARCH_CVITEK
help
This driver provides support for SPACC devices that are part of the CVITEK SoCs.
It handles cryptographic operations and provides a generic interface for SPACC devices.
To compile this driver as a module, choose M here: the module will be called spacc-cvitek.
endif # CRYPTO_HW

View File

@ -51,3 +51,5 @@ obj-$(CONFIG_CRYPTO_DEV_ARTPEC6) += axis/
obj-$(CONFIG_CRYPTO_DEV_ZYNQMP_AES) += xilinx/
obj-y += hisilicon/
obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/
obj-$(CONFIG_CRYPTO_DEV_CVITEK_SPACC) += cvitek-spacc.o

View File

@ -0,0 +1,57 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _CVITEK_SPACC_REGS_H_
#define _CVITEK_SPACC_REGS_H_
// Register offset
#define CRYPTODMA_DMA_CTRL 0x0
#define CRYPTODMA_INT_MASK 0x4
#define CRYPTODMA_DES_BASE_L 0x8
#define CRYPTODMA_DES_BASE_H 0xC
#define CRYPTODMA_WR_INT 0x10
#define CRYPTODMA_DES_KEY 0x100
#define CRYPTODMA_DES_IV 0x180
#define CRYPTODMA_SHA_PARA 0x1C0
// DMA Descriptor
#define CRYPTODMA_CTRL 0x00
#define CRYPTODMA_CIPHER 0x01
#define CRYPTODMA_NEXT_PTR_ADDR_L 0x02
#define CRYPTODMA_NEXT_PTR_ADDR_H 0x03
#define CRYPTODMA_SRC_ADDR_L 0x04
#define CRYPTODMA_SRC_ADDR_H 0x05
#define CRYPTODMA_DST_ADDR_L 0x06
#define CRYPTODMA_DST_ADDR_H 0x07
#define CRYPTODMA_DATA_AMOUNT_L 0x08
#define CRYPTODMA_SRC_LEN 0x08
#define CRYPTODMA_DATA_AMOUNT_H 0x09
#define CRYPTODMA_DST_LEN 0x0A
#define CRYPTODMA_KEY 0x0A
#define CRYPTODMA_IV 0x12
#define DES_USE_BYPASS BIT(8)
#define DES_USE_AES BIT(9)
#define DES_USE_DES BIT(10)
#define DES_USE_SM4 BIT(11)
#define DES_USE_SHA BIT(12)
#define DES_USE_BASE64 BIT(13)
#define DES_USE_KEY0 BIT(16)
#define DES_USE_KEY1 BIT(17)
#define DES_USE_KEY2 BIT(18)
#define DES_USE_DESCRIPTOR_KEY BIT(19)
#define DES_USE_IV0 BIT(20)
#define DES_USE_IV1 BIT(21)
#define DES_USE_IV2 BIT(22)
#define DES_USE_DESCRIPTOR_IV BIT(23)
// Cipher control for AES
#define DECRYPT_ENABLE 0x0
#define CBC_ENABLE 0x1
#define AES_KEY_MODE 0x4
// DMA control
#define DMA_ENABLE 1
#define DMA_DESCRIPTOR_MODE 1
#define DMA_READ_MAX_BURST 16
#define DMA_WRITE_MAX_BURST 6
#endif

View File

@ -0,0 +1,748 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2022-2023 CVITEK
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/version.h>
#include <linux/jiffies.h>
#include <linux/init.h>
#include <asm/cacheflush.h>
#include <linux/dma-buf.h>
#include <linux/dma-map-ops.h>
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <linux/cdev.h>
#include <linux/interrupt.h>
#include <linux/cvitek_spacc.h>
#include <cvitek-spacc-regs.h>
#define DEVICE_NAME "spacc"
static char flag = 'n';
static DECLARE_WAIT_QUEUE_HEAD(wq);
struct cvi_spacc {
struct device *dev;
struct cdev cdev;
dev_t tdev;
void __iomem *spacc_base;
struct class *spacc_class;
void *buffer;
u32 buffer_size;
u32 used_size;
// for sha256/sha1
u32 state[8];
u32 result_size;
#ifdef CONFIG_PM_SLEEP
struct clk *efuse_clk;
#endif
};
#ifdef CONFIG_PM_SLEEP
static int cvitek_spacc_suspend(struct device *dev)
{
struct cvi_spacc *spacc = dev_get_drvdata(dev);
void __iomem *sec_top;
clk_prepare_enable(spacc->efuse_clk);
sec_top = ioremap(0x020b0000, 4);
iowrite32(0x3, sec_top);
iounmap(sec_top);
clk_disable_unprepare(spacc->efuse_clk);
return 0;
}
static int cvitek_spacc_resume(struct device *dev)
{
struct cvi_spacc *spacc = dev_get_drvdata(dev);
void __iomem *sec_top;
clk_prepare_enable(spacc->efuse_clk);
sec_top = ioremap(0x020b0000, 4);
iowrite32(0x0, sec_top);
iounmap(sec_top);
clk_disable_unprepare(spacc->efuse_clk);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(cvitek_spacc_pm_ops, cvitek_spacc_suspend, cvitek_spacc_resume);
static inline void cvi_sha256_init(struct cvi_spacc *spacc)
{
spacc->state[0] = cpu_to_be32(0x6A09E667);
spacc->state[1] = cpu_to_be32(0xBB67AE85);
spacc->state[2] = cpu_to_be32(0x3C6EF372);
spacc->state[3] = cpu_to_be32(0xA54FF53A);
spacc->state[4] = cpu_to_be32(0x510E527F);
spacc->state[5] = cpu_to_be32(0x9B05688C);
spacc->state[6] = cpu_to_be32(0x1F83D9AB);
spacc->state[7] = cpu_to_be32(0x5BE0CD19);
}
static inline void cvi_sha1_init(struct cvi_spacc *spacc)
{
spacc->state[0] = cpu_to_be32(0x67452301);
spacc->state[1] = cpu_to_be32(0xEFCDAB89);
spacc->state[2] = cpu_to_be32(0x98BADCFE);
spacc->state[3] = cpu_to_be32(0x10325476);
spacc->state[4] = cpu_to_be32(0xC3D2E1F0);
}
static inline void trigger_cryptodma_engine_and_wait_finish(struct cvi_spacc *spacc)
{
// Set cryptodma control
iowrite32(0x3, spacc->spacc_base + CRYPTODMA_INT_MASK);
// Clear interrupt
// Important!!! must do this
iowrite32(0x3, spacc->spacc_base + CRYPTODMA_WR_INT);
// Trigger cryptodma engine
iowrite32(DMA_WRITE_MAX_BURST << 24 |
DMA_READ_MAX_BURST << 16 |
DMA_DESCRIPTOR_MODE << 1 | DMA_ENABLE, spacc->spacc_base + CRYPTODMA_DMA_CTRL);
wait_event_interruptible(wq, flag == 'y');
flag = 'n';
}
static inline void get_hash_result(struct cvi_spacc *spacc, int count)
{
u32 i;
u32 *result = (u32 *)spacc->buffer;
for (i = 0; i < count; i++)
result[i] = ioread32(spacc->spacc_base + CRYPTODMA_SHA_PARA + i * 4);
}
static inline void setup_dma_descriptor(struct cvi_spacc *spacc, uint32_t *dma_descriptor)
{
phys_addr_t descriptor_phys;
descriptor_phys = virt_to_phys(dma_descriptor);
arch_sync_dma_for_device(descriptor_phys, /*sizeof(dma_descriptor)*/ 4 * 22, DMA_TO_DEVICE);
// set dma descriptor addr
iowrite32((uint32_t)((uint64_t)descriptor_phys & 0xFFFFFFFF), spacc->spacc_base + CRYPTODMA_DES_BASE_L);
iowrite32((uint32_t)((uint64_t)descriptor_phys >> 32), spacc->spacc_base + CRYPTODMA_DES_BASE_H);
}
static inline void setup_src(u32 *dma_descriptor, uintptr_t src, u32 len)
{
phys_addr_t src_phys;
src_phys = virt_to_phys((void *)src);
arch_sync_dma_for_device(src_phys, len, DMA_TO_DEVICE);
dma_descriptor[CRYPTODMA_SRC_LEN] = len;
dma_descriptor[CRYPTODMA_SRC_ADDR_L] = (uint32_t)((uint64_t)src_phys & 0xFFFFFFFF);
dma_descriptor[CRYPTODMA_SRC_ADDR_H] = (uint32_t)((uint64_t)src_phys >> 32);
}
static void setup_src_dst(u32 *dma_descriptor, phys_addr_t buffer, u32 len)
{
dma_descriptor[CRYPTODMA_SRC_LEN] = len;
dma_descriptor[CRYPTODMA_SRC_ADDR_L] = (uint32_t)((uint64_t)buffer & 0xFFFFFFFF);
dma_descriptor[CRYPTODMA_SRC_ADDR_H] = (uint32_t)((uint64_t)buffer >> 32);
dma_descriptor[CRYPTODMA_DST_ADDR_L] = (uint32_t)((uint64_t)buffer & 0xFFFFFFFF);
dma_descriptor[CRYPTODMA_DST_ADDR_H] = (uint32_t)((uint64_t)buffer >> 32);
}
#define setup_dst(dst)\
do {\
phys_addr_t dst_phys = virt_to_phys((void *)dst);\
dma_descriptor[CRYPTODMA_DST_ADDR_L] = (uint32_t)((uint64_t)dst_phys & 0xFFFFFFFF);\
dma_descriptor[CRYPTODMA_DST_ADDR_H] = (uint32_t)((uint64_t)dst_phys >> 32);\
} while (0)
static inline void setup_mode(u32 *dma_descriptor, SPACC_ALGO_MODE_E mode, unsigned char *iv)
{
switch (mode) {
case SPACC_ALGO_MODE_CBC:
dma_descriptor[CRYPTODMA_CTRL] |= DES_USE_DESCRIPTOR_IV;
dma_descriptor[CRYPTODMA_CIPHER] = CBC_ENABLE << 1;
memcpy(&dma_descriptor[CRYPTODMA_IV], iv, 16);
break;
case SPACC_ALGO_MODE_CTR:
dma_descriptor[CRYPTODMA_CTRL] |= DES_USE_DESCRIPTOR_IV;
dma_descriptor[CRYPTODMA_CIPHER] = 0x1 << 2;
memcpy(&dma_descriptor[CRYPTODMA_IV], iv, 16);
break;
case SPACC_ALGO_MODE_ECB:
default:
break;
}
}
static inline void setup_key_size(u32 *dma_descriptor, SPACC_KEY_SIZE_E size, unsigned char *key)
{
switch (size) {
case SPACC_KEY_SIZE_64BITS:
memcpy(&dma_descriptor[CRYPTODMA_KEY], key, 8);
break;
case SPACC_KEY_SIZE_128BITS:
dma_descriptor[CRYPTODMA_CIPHER] |= (0x1 << 5);
memcpy(&dma_descriptor[CRYPTODMA_KEY], key, 16);
break;
case SPACC_KEY_SIZE_192BITS:
dma_descriptor[CRYPTODMA_CIPHER] |= (0x1 << 4);
memcpy(&dma_descriptor[CRYPTODMA_KEY], key, 24);
break;
case SPACC_KEY_SIZE_256BITS:
dma_descriptor[CRYPTODMA_CIPHER] |= (0x1 << 3);
memcpy(&dma_descriptor[CRYPTODMA_KEY], key, 32);
break;
default:
break;
}
}
static inline void setup_action(u32 *dma_descriptor, SPACC_ACTION_E action)
{
if (action == SPACC_ACTION_ENCRYPTION)
dma_descriptor[CRYPTODMA_CIPHER] |= 0x1;
}
static irqreturn_t cvitek_spacc_irq(int irq, void *data)
{
struct cvi_spacc *spacc = (struct cvi_spacc *)data;
iowrite32(0x3, spacc->spacc_base + CRYPTODMA_WR_INT);
flag = 'y';
wake_up_interruptible(&wq);
return IRQ_HANDLED;
}
int spacc_sha256(struct cvi_spacc *spacc, uintptr_t src, uint32_t len)
{
__aligned(32) u32 dma_descriptor[22] = {0};
u32 i;
// must mark DES_USE_DESCRIPTOR_KEY flag
dma_descriptor[CRYPTODMA_CTRL] = DES_USE_DESCRIPTOR_KEY | DES_USE_SHA | 0xF;
dma_descriptor[CRYPTODMA_CIPHER] = (0x1 << 1) | 0x1;
for (i = 0; i < 8; i++)
dma_descriptor[CRYPTODMA_KEY + i] = spacc->state[i];
setup_src(dma_descriptor, src, len);
setup_dma_descriptor(spacc, dma_descriptor);
trigger_cryptodma_engine_and_wait_finish(spacc);
return 0;
}
int spacc_sha1(struct cvi_spacc *spacc, uintptr_t src, uint32_t len)
{
__aligned(32) u32 dma_descriptor[22] = {0};
u32 i;
// must mark DES_USE_DESCRIPTOR_KEY flag
dma_descriptor[CRYPTODMA_CTRL] = DES_USE_DESCRIPTOR_KEY | DES_USE_SHA | 0xF;
dma_descriptor[CRYPTODMA_CIPHER] = 0x1;
for (i = 0; i < 5; i++)
dma_descriptor[CRYPTODMA_KEY + i] = spacc->state[i];
setup_src(dma_descriptor, src, len);
setup_dma_descriptor(spacc, dma_descriptor);
trigger_cryptodma_engine_and_wait_finish(spacc);
return 0;
}
int spacc_base64(struct cvi_spacc *spacc, phys_addr_t src, uint32_t len, SPACC_ACTION_E ation)
{
__aligned(32) u32 dma_descriptor[22] = {0};
dma_descriptor[CRYPTODMA_CTRL] = DES_USE_BASE64 | 0xF;
if (ation == SPACC_ACTION_ENCRYPTION) {
dma_descriptor[CRYPTODMA_CIPHER] = 0x1;
spacc->result_size = (len + (3 - 1)) / 3 * 4;
dma_descriptor[CRYPTODMA_DST_LEN] = spacc->result_size;
} else {
spacc->result_size = (len / 4) * 3;
dma_descriptor[CRYPTODMA_DST_LEN] = spacc->result_size;
}
setup_src_dst(dma_descriptor, src, len);
setup_dma_descriptor(spacc, dma_descriptor);
trigger_cryptodma_engine_and_wait_finish(spacc);
return 0;
}
int spacc_aes(struct cvi_spacc *spacc, phys_addr_t src, uint32_t len, spacc_aes_config_s config)
{
__aligned(32) u32 dma_descriptor[22] = {0};
spacc->result_size = len;
dma_descriptor[CRYPTODMA_CTRL] = DES_USE_DESCRIPTOR_KEY | DES_USE_AES | 0xF;
setup_mode(dma_descriptor, config.mode, config.iv);
setup_key_size(dma_descriptor, config.size, config.key);
setup_action(dma_descriptor, config.action);
setup_src_dst(dma_descriptor, src, len);
setup_dma_descriptor(spacc, dma_descriptor);
trigger_cryptodma_engine_and_wait_finish(spacc);
return 0;
}
int spacc_sm4(struct cvi_spacc *spacc, phys_addr_t src, uint32_t len, spacc_sm4_config_s config)
{
__aligned(32) u32 dma_descriptor[22] = {0};
spacc->result_size = len;
dma_descriptor[CRYPTODMA_CTRL] = DES_USE_DESCRIPTOR_KEY | DES_USE_SM4 | 0xF;
setup_mode(dma_descriptor, config.mode, config.iv);
setup_key_size(dma_descriptor, config.size, config.key);
setup_action(dma_descriptor, config.action);
setup_src_dst(dma_descriptor, src, len);
setup_dma_descriptor(spacc, dma_descriptor);
trigger_cryptodma_engine_and_wait_finish(spacc);
return 0;
}
int spacc_des(struct cvi_spacc *spacc, phys_addr_t src, uint32_t len, spacc_des_config_s config, int is_tdes)
{
__aligned(32) u32 dma_descriptor[22] = {0};
spacc->result_size = len;
dma_descriptor[CRYPTODMA_CTRL] = DES_USE_DESCRIPTOR_KEY | DES_USE_DES | 0xF;
setup_mode(dma_descriptor, config.mode, config.iv);
if (is_tdes) {
dma_descriptor[CRYPTODMA_CIPHER] |= (0x1 << 3);
memcpy(&dma_descriptor[CRYPTODMA_KEY], config.key, 24);
} else {
memcpy(&dma_descriptor[CRYPTODMA_KEY], config.key, 8);
}
setup_action(dma_descriptor, config.action);
setup_src_dst(dma_descriptor, src, len);
setup_dma_descriptor(spacc, dma_descriptor);
trigger_cryptodma_engine_and_wait_finish(spacc);
return 0;
}
static int cvi_spacc_init_buffer(struct cvi_spacc *spacc, size_t size)
{
unsigned int order = get_order(size);
struct page *page;
if (size == spacc->buffer_size) {
return 0;
} else if (spacc->buffer_size) {
free_pages((unsigned long)spacc->buffer, get_order(spacc->buffer_size));
spacc->buffer_size = 0;
}
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!page)
return -ENOMEM;
spacc->buffer = page_address(page);
spacc->buffer_size = size;
return 0;
}
static int spacc_open(struct inode *inode, struct file *file)
{
struct cvi_spacc *spacc;
spacc = container_of(inode->i_cdev, struct cvi_spacc, cdev);
spacc->used_size = 0;
spacc->result_size = 0;
file->private_data = spacc;
return 0;
}
static ssize_t spacc_read(struct file *filp, char *buf, size_t count, loff_t *f_pos)
{
struct cvi_spacc *spacc = filp->private_data;
int ret = 0;
if (spacc->result_size == 0) {
pr_err("spacc result is 0\n");
return -1;
}
if (count < spacc->result_size)
return -1;
ret = copy_to_user(buf, spacc->buffer, spacc->result_size);
if (ret != 0)
return -1;
return spacc->result_size;
}
static ssize_t spacc_write(struct file *filp, const char *buf, size_t count, loff_t *f_pos)
{
struct cvi_spacc *spacc = filp->private_data;
int ret = spacc->buffer_size - spacc->used_size;
if (ret <= 0)
return spacc->used_size;
if (count > ret)
count = ret;
ret = copy_from_user(((unsigned char *)spacc->buffer + spacc->used_size), buf, count);
if (ret != 0)
return -1;
spacc->used_size += count;
return spacc->used_size;
}
static int spacc_release(struct inode *inode, struct file *file)
{
return 0;
}
static long spacc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct cvi_spacc *spacc = filp->private_data;
int ret;
switch (cmd) {
case IOCTL_SPACC_CREATE_MEMPOOL: {
unsigned int size = 0;
ret = copy_from_user((unsigned char *)&size, (unsigned char *)arg, sizeof(size));
if (ret != 0)
return -1;
ret = cvi_spacc_init_buffer(spacc, size);
if (ret != 0)
return -1;
break;
}
case IOCTL_SPACC_GET_MEMPOOL_SIZE: {
ret = copy_to_user((unsigned char *)arg, (unsigned char *)&spacc->buffer_size,
sizeof(spacc->buffer_size));
if (ret != 0)
return -1;
break;
}
case IOCTL_SPACC_SHA256_ACTION: {
if (spacc->used_size & 0x3F) {
pr_err("used_size : %d\n", spacc->used_size);
return -1;
}
cvi_sha256_init(spacc);
ret = spacc_sha256(spacc, (uintptr_t)spacc->buffer, spacc->used_size);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
get_hash_result(spacc, 8);
spacc->result_size = 32;
spacc->used_size = 0;
break;
}
case IOCTL_SPACC_SHA1_ACTION: {
if (spacc->used_size & 0x3F) {
pr_err("spacc_dev->used_size : %d\n", spacc->used_size);
return -1;
}
cvi_sha1_init(spacc);
ret = spacc_sha1(spacc, (uintptr_t)spacc->buffer, spacc->used_size);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
get_hash_result(spacc, 5);
spacc->result_size = 20;
spacc->used_size = 0;
break;
}
case IOCTL_SPACC_BASE64_ACTION: {
spacc_base64_action_s action = {0};
phys_addr_t src_phys;
ret = copy_from_user((unsigned char *)&action, (unsigned char *)arg, sizeof(action));
if (ret != 0)
return -1;
src_phys = virt_to_phys(spacc->buffer);
arch_sync_dma_for_device(src_phys, spacc->used_size, DMA_TO_DEVICE);
ret = spacc_base64(spacc, src_phys, spacc->used_size, action.action);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
arch_sync_dma_for_device(src_phys, spacc->result_size, DMA_FROM_DEVICE);
spacc->used_size = 0;
break;
}
case IOCTL_SPACC_AES_ACTION: {
spacc_aes_config_s config = {0};
phys_addr_t src_phys;
uint32_t len;
ret = copy_from_user((unsigned char *)&config, (unsigned char *)arg, sizeof(config));
if (ret != 0)
return -1;
if (config.src) {
if ((config.len == 0) ||
(config.len & 0xF)) {
pr_err("src len [%d] invailed\n", config.len);
return -1;
}
src_phys = (phys_addr_t)config.src;
len = config.len;
} else {
if ((spacc->used_size == 0) ||
(spacc->used_size & 0xF)) {
pr_err("used_size : %d\n", spacc->used_size);
return -1;
}
src_phys = virt_to_phys(spacc->buffer);
len = spacc->used_size;
}
arch_sync_dma_for_device(src_phys, len, DMA_TO_DEVICE);
ret = spacc_aes(spacc, src_phys, len, config);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
arch_sync_dma_for_device(src_phys, spacc->result_size, DMA_FROM_DEVICE);
spacc->used_size = 0;
break;
}
case IOCTL_SPACC_SM4_ACTION: {
spacc_sm4_config_s action = {0};
phys_addr_t src_phys;
if (spacc->used_size & 0xF) {
pr_err("used_size : %d\n", spacc->used_size);
return -1;
}
ret = copy_from_user((unsigned char *)&action, (unsigned char *)arg, sizeof(action));
if (ret != 0)
return -1;
src_phys = virt_to_phys(spacc->buffer);
arch_sync_dma_for_device(src_phys, spacc->used_size, DMA_TO_DEVICE);
ret = spacc_sm4(spacc, src_phys, spacc->used_size, action);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
arch_sync_dma_for_device(src_phys, spacc->result_size, DMA_FROM_DEVICE);
spacc->used_size = 0;
break;
}
case IOCTL_SPACC_DES_ACTION: {
spacc_des_config_s action = {0};
phys_addr_t src_phys;
if (spacc->used_size & 0x7) {
pr_err("spacc_dev->used_size : %d\n", spacc->used_size);
return -1;
}
ret = copy_from_user((unsigned char *)&action, (unsigned char *)arg, sizeof(action));
if (ret != 0)
return -1;
src_phys = virt_to_phys(spacc->buffer);
arch_sync_dma_for_device(src_phys, spacc->used_size, DMA_TO_DEVICE);
ret = spacc_des(spacc, src_phys, spacc->used_size, action, 0);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
arch_sync_dma_for_device(src_phys, spacc->result_size, DMA_FROM_DEVICE);
spacc->used_size = 0;
break;
}
case IOCTL_SPACC_TDES_ACTION: {
spacc_tdes_config_s action = {0};
phys_addr_t src_phys;
if (spacc->used_size & 0x7) {
pr_err("spacc_dev->used_size : %d\n", spacc->used_size);
return -EINVAL;
}
ret = copy_from_user((unsigned char *)&action, (unsigned char *)arg, sizeof(action));
if (ret != 0)
return -1;
src_phys = virt_to_phys(spacc->buffer);
arch_sync_dma_for_device(src_phys, spacc->used_size, DMA_TO_DEVICE);
ret = spacc_des(spacc, src_phys, spacc->used_size, action, 1);
if (ret < 0) {
pr_err("plat_cryptodma_do failed\n");
return -1;
}
arch_sync_dma_for_device(src_phys, spacc->result_size, DMA_FROM_DEVICE);
spacc->used_size = 0;
break;
}
default:
return -EINVAL;
}
return 0;
}
const struct file_operations spacc_fops = {
.owner = THIS_MODULE,
.open = spacc_open,
.read = spacc_read,
.write = spacc_write,
.release = spacc_release,
.unlocked_ioctl = spacc_ioctl,
};
static int cvitek_spacc_drv_probe(struct platform_device *pdev)
{
struct cvi_spacc *spacc;
struct device *dev = &pdev->dev;
int ret = 0;
spacc = devm_kzalloc(dev, sizeof(*spacc), GFP_KERNEL);
if (!spacc)
return -ENOMEM;
spacc->dev = dev;
spacc->spacc_base = devm_platform_ioremap_resource(pdev, 0);
if (!spacc->spacc_base)
return -ENOMEM;
ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
pr_err("get irq num failed\n");
return -1;
}
#ifdef CONFIG_PM_SLEEP
spacc->efuse_clk = clk_get_sys(NULL, "clk_efuse");
if (IS_ERR(spacc->efuse_clk)) {
pr_err("%s: efuse clock not found %ld\n", __func__
, PTR_ERR(spacc->efuse_clk));
return -1;
}
#endif
ret = devm_request_irq(dev, ret, cvitek_spacc_irq
, IRQF_SHARED | IRQF_TRIGGER_RISING, pdev->name, spacc);
if (ret) {
pr_err("request irq failed\n");
return -1;
}
ret = alloc_chrdev_region(&spacc->tdev, 0, 1, DEVICE_NAME);
if (ret)
return ret;
cdev_init(&spacc->cdev, &spacc_fops);
spacc->cdev.owner = THIS_MODULE;
ret = cdev_add(&spacc->cdev, spacc->tdev, 1);
if (ret)
goto failed;
spacc->spacc_class = class_create(THIS_MODULE, DEVICE_NAME);
if (IS_ERR(spacc->spacc_class)) {
pr_err("Err: failed when create class.\n");
goto failed;
}
device_create(spacc->spacc_class, NULL, spacc->tdev, spacc, DEVICE_NAME);
return ret;
failed:
unregister_chrdev_region(spacc->tdev, 1);
return -ENOMEM;
}
static int cvitek_spacc_drv_remove(struct platform_device *pdev)
{
struct cvi_spacc *spacc = platform_get_drvdata(pdev);
device_destroy(spacc->spacc_class, spacc->tdev);
cdev_del(&spacc->cdev);
unregister_chrdev_region(spacc->tdev, 1);
class_destroy(spacc->spacc_class);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id cvitek_spacc_of_match[] = {
{ .compatible = "cvitek,spacc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, cvitek_spacc_of_match);
#endif
static struct platform_driver cvitek_spacc_driver = {
.probe = cvitek_spacc_drv_probe,
.remove = cvitek_spacc_drv_remove,
.driver = {
.name = "cvitek_spacc",
.of_match_table = of_match_ptr(cvitek_spacc_of_match),
.pm = &cvitek_spacc_pm_ops,
},
};
module_platform_driver(cvitek_spacc_driver);
MODULE_DESCRIPTION("Cvitek Spacc Driver");
MODULE_LICENSE("GPL");

View File

@ -17,6 +17,7 @@
#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include <asm/smp.h>
/*
@ -64,6 +65,8 @@ struct plic_priv {
struct cpumask lmask;
struct irq_domain *irqdomain;
void __iomem *regs;
unsigned int nr_irqs;
unsigned long *prio_save;
};
struct plic_handler {
@ -75,6 +78,7 @@ struct plic_handler {
*/
raw_spinlock_t enable_lock;
void __iomem *enable_base;
u32 *enable_save;
struct plic_priv *priv;
};
static int plic_parent_irq;
@ -182,6 +186,70 @@ static struct irq_chip plic_chip = {
#endif
};
static int plic_irq_suspend(void)
{
unsigned int i, cpu;
u32 __iomem *reg;
struct plic_priv *priv;
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
for (i = 0; i < priv->nr_irqs; i++)
if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID))
__set_bit(i, priv->prio_save);
else
__clear_bit(i, priv->prio_save);
for_each_cpu(cpu, cpu_present_mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
continue;
raw_spin_lock(&handler->enable_lock);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
handler->enable_save[i] = readl(reg);
}
raw_spin_unlock(&handler->enable_lock);
}
return 0;
}
static void plic_irq_resume(void)
{
unsigned int i, index, cpu;
u32 __iomem *reg;
struct plic_priv *priv;
priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
for (i = 0; i < priv->nr_irqs; i++) {
index = BIT_WORD(i);
writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
}
for_each_cpu(cpu, cpu_present_mask) {
struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
if (!handler->present)
continue;
raw_spin_lock(&handler->enable_lock);
for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
reg = handler->enable_base + i * sizeof(u32);
writel(handler->enable_save[i], reg);
}
raw_spin_unlock(&handler->enable_lock);
}
}
static struct syscore_ops plic_irq_syscore_ops = {
.suspend = plic_irq_suspend,
.resume = plic_irq_resume,
};
static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
@ -302,15 +370,21 @@ static int __init plic_init(struct device_node *node,
if (WARN_ON(!nr_irqs))
goto out_iounmap;
priv->nr_irqs = nr_irqs;
priv->prio_save = bitmap_alloc(nr_irqs, GFP_KERNEL);
if (!priv->prio_save)
goto out_free_priority_reg;
nr_contexts = of_irq_count(node);
if (WARN_ON(!nr_contexts))
goto out_iounmap;
goto out_free_priority_reg;
error = -ENOMEM;
priv->irqdomain = irq_domain_add_linear(node, nr_irqs + 1,
&plic_irqdomain_ops, priv);
if (WARN_ON(!priv->irqdomain))
goto out_iounmap;
goto out_free_priority_reg;
for (i = 0; i < nr_contexts; i++) {
struct of_phandle_args parent;
@ -369,6 +443,11 @@ static int __init plic_init(struct device_node *node,
handler->enable_base =
priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
handler->priv = priv;
handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
sizeof(*handler->enable_save), GFP_KERNEL);
if (!handler->enable_save)
goto out_free_enable_reg;
done:
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
plic_toggle(handler, hwirq, 0);
@ -384,6 +463,7 @@ done:
cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
"irqchip/sifive/plic:starting",
plic_starting_cpu, plic_dying_cpu);
register_syscore_ops(&plic_irq_syscore_ops);
plic_cpuhp_setup_done = true;
}
@ -391,6 +471,10 @@ done:
" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
return 0;
out_free_enable_reg:
kfree(handler->enable_save);
out_free_priority_reg:
kfree(priv->prio_save);
out_iounmap:
iounmap(priv->regs);
out_free_priv:

View File

@ -97,6 +97,7 @@ short ECC_1bits_remap[4] = {0, 1, -1, -1};
short ECC_GD_4bit_remap[16] = {0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff};
short ECC_GD_8bit_remap[16] = {0, 0, 0, 0, 4, 5, 6, 7, 0, 0, 0, 0, 8, 8, 8, 8};
short ECC_HYF2G_remap[4] = {0, 1, -1, 14};
short ECC_HYF1G_remap[4] = {0, 1, -1, 4};
struct cvsnfc_chip_info nand_flash_cvitek_supported_ids[] = {
{
@ -1272,6 +1273,32 @@ struct cvsnfc_chip_info nand_flash_cvitek_supported_ids[] = {
.flags = 0
},
{
{ .name = "HYF1GQ4UDACAE",
.id = {0xC9, 0x21},
.pagesize = SZ_2K,
.chipsize = SZ_128,
.erasesize = SZ_128K,
.options = 0,
.id_len = 2,
.oobsize = SZ_64,
{ .strength_ds = 4,
.step_ds = SZ_512
},
},
{ .ecc_sr_addr = 0xc0,
.ecc_mbf_addr = 0x0,
.read_ecc_opcode = 0,
.ecc_bits = 2,
.ecc_bit_shift = 4,
.uncorr_val = 0x2,
.remap = ECC_HYF1G_remap
},
.driver = &spi_nand_driver_gd,
.flags = 0
},
{
{ .name = "FM25S01A",
.id = {0xA1, 0xE4},

View File

@ -24,6 +24,14 @@
#include "stmmac_platform.h"
#ifdef CONFIG_PM_SLEEP
#if defined(CONFIG_ARCH_CV180X) || defined(CONFIG_ARCH_CV181X)
#include "dwmac1000.h"
#include "dwmac_dma.h"
#include "hwif.h"
#endif
#endif
struct cvitek_mac {
struct device *dev;
struct reset_control *rst;
@ -39,19 +47,54 @@ static int bm_eth_reset_phy(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
int phy_reset_gpio;
u32 ephy_addr = 0x0;
void __iomem *ephy_reg;
u32 ephy_base_addr = 0x0;
void __iomem *ephy_base_reg;
u32 ephy_top_addr = 0x0;
void __iomem *ephy_top_reg;
if (!np)
return 0;
of_property_read_u32(np, "ephy_ctl_reg", &ephy_addr);
of_property_read_u32(np, "ephy_base_reg", &ephy_base_addr);
of_property_read_u32(np, "ephy_top_reg", &ephy_top_addr);
if (ephy_addr) {
ephy_reg = ioremap(ephy_addr, 0x10);
writel(readl(ephy_reg) & 0xFFFFFFFC, ephy_reg);
mdelay(2);
iounmap(ephy_reg);
if (ephy_base_addr && ephy_top_addr) {
ephy_base_reg = ioremap(ephy_base_addr, 0x80);
ephy_top_reg = ioremap(ephy_top_addr, 0x10);
#ifdef CONFIG_PM_SLEEP
#if defined(CONFIG_ARCH_CV180X) || defined(CONFIG_ARCH_CV181X)
// set rg_ephy_apb_rw_sel 0x0804@[0]=1/APB by using APB interface
writel(0x0001, ephy_top_reg + 0x4);
// Release 0x0800[0]=0/shutdown
writel(0x0900, ephy_top_reg);
// Release 0x0800[2]=1/dig_rst_n, Let mii_reg can be accessabile
writel(0x0904, ephy_top_reg);
// ANA INIT (PD/EN), switch to MII-page5
writel(0x0500, ephy_base_reg + 0x7c);
// Release ANA_PD p5.0x10@[13:8] = 6'b001100
writel(0x0c00, ephy_base_reg + 0x40);
// Release ANA_EN p5.0x10@[7:0] = 8'b01111110
writel(0x0c7e, ephy_base_reg + 0x40);
// Wait PLL_Lock, Lock_Status p5.0x12@[15] = 1
//mdelay(1);
// Release 0x0800[1] = 1/ana_rst_n
writel(0x0906, ephy_top_reg);
// ANA INIT
// @Switch to MII-page5
writel(0x0500, ephy_base_reg + 0x7c);
// PHY_ID
writel(0x0043, ephy_base_reg + 0x8);
writel(0x5649, ephy_base_reg + 0xc);
// switch to MDIO control by ETH_MAC
writel(0x0, ephy_top_reg + 0x4);
#endif
#endif
iounmap(ephy_base_reg);
iounmap(ephy_top_reg);
}
phy_reset_gpio = of_get_named_gpio(np, "phy-reset-gpios", 0);
@ -143,12 +186,108 @@ static const struct of_device_id bm_dwmac_match[] = {
};
MODULE_DEVICE_TABLE(of, bm_dwmac_match);
#ifdef CONFIG_PM_SLEEP
#if defined(CONFIG_ARCH_CV180X) || defined(CONFIG_ARCH_CV181X)
static int cvi_eth_pm_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
if (!priv->reg_ctx) {
priv->reg_ctx = devm_kzalloc(priv->device, sizeof(struct stmmac_reg_context), GFP_KERNEL);
if (!priv->reg_ctx)
return -ENOMEM;
}
priv->reg_ctx->ctrl = readl(priv->ioaddr + GMAC_CONTROL);
priv->reg_ctx->frame_filter = readl(priv->ioaddr + GMAC_FRAME_FILTER);
priv->reg_ctx->hash_high = readl(priv->ioaddr + GMAC_HASH_HIGH);
priv->reg_ctx->hash_low = readl(priv->ioaddr + GMAC_HASH_LOW);
priv->reg_ctx->mii_addr = readl(priv->ioaddr + GMAC_MII_ADDR);
priv->reg_ctx->mii_data = readl(priv->ioaddr + GMAC_MII_DATA);
priv->reg_ctx->flow_ctrl = readl(priv->ioaddr + GMAC_FLOW_CTRL);
priv->reg_ctx->vlan_tag = readl(priv->ioaddr + GMAC_VLAN_TAG);
priv->reg_ctx->debug = readl(priv->ioaddr + GMAC_DEBUG);
priv->reg_ctx->wakeup_fileter = readl(priv->ioaddr + GMAC_WAKEUP_FILTER);
priv->reg_ctx->lpi_ctrl_status = readl(priv->ioaddr + LPI_CTRL_STATUS);
priv->reg_ctx->lpi_timer_ctrl = readl(priv->ioaddr + LPI_TIMER_CTRL);
priv->reg_ctx->int_mask = readl(priv->ioaddr + GMAC_INT_MASK);
priv->reg_ctx->mac_addr0_high = readl(priv->ioaddr + GMAC_MAC_ADDR0_HIGH);
priv->reg_ctx->mac_addr0_low = readl(priv->ioaddr + GMAC_MAC_ADDR0_LOW);
priv->reg_ctx->pcs_base = readl(priv->ioaddr + GMAC_PCS_BASE);
priv->reg_ctx->mmc_ctrl = readl(priv->ioaddr + GMAC_MMC_CTRL);
priv->reg_ctx->mmc_rx_intr_mask = readl(priv->ioaddr + GMAC_MMC_RX_INTR_MASK);
priv->reg_ctx->mmc_tx_intr_mask = readl(priv->ioaddr + GMAC_MMC_TX_INTR_MASK);
priv->reg_ctx->mmc_ipc_rx_intr_mask = readl(priv->ioaddr + GMAC_MMC_IPC_RX_INTR_MASK);
priv->reg_ctx->mmc_rx_csum_offload = readl(priv->ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD);
priv->reg_ctx->dma_bus_mode = readl(priv->ioaddr + DMA_BUS_MODE);
priv->reg_ctx->dma_rx_base_addr = readl(priv->ioaddr + DMA_RCV_BASE_ADDR);
priv->reg_ctx->dma_tx_base_addr = readl(priv->ioaddr + DMA_TX_BASE_ADDR);
priv->reg_ctx->dma_ctrl = readl(priv->ioaddr + DMA_CONTROL);
priv->reg_ctx->dma_intr_ena = readl(priv->ioaddr + DMA_INTR_ENA);
priv->reg_ctx->dma_rx_watchdog = readl(priv->ioaddr + DMA_RX_WATCHDOG);
priv->reg_ctx->dma_axi_bus_mode = readl(priv->ioaddr + DMA_AXI_BUS_MODE);
return 0;
}
extern void stmmac_reset_subtask2(struct stmmac_priv *priv);
static int cvi_eth_pm_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
bm_eth_reset_phy(pdev);
writel(priv->reg_ctx->ctrl, priv->ioaddr + GMAC_CONTROL);
writel(priv->reg_ctx->frame_filter, priv->ioaddr + GMAC_FRAME_FILTER);
writel(priv->reg_ctx->hash_high, priv->ioaddr + GMAC_HASH_HIGH);
writel(priv->reg_ctx->hash_low, priv->ioaddr + GMAC_HASH_LOW);
writel(priv->reg_ctx->mii_addr, priv->ioaddr + GMAC_MII_ADDR);
writel(priv->reg_ctx->mii_data, priv->ioaddr + GMAC_MII_DATA);
writel(priv->reg_ctx->flow_ctrl, priv->ioaddr + GMAC_FLOW_CTRL);
writel(priv->reg_ctx->vlan_tag, priv->ioaddr + GMAC_VLAN_TAG);
writel(priv->reg_ctx->debug, priv->ioaddr + GMAC_DEBUG);
writel(priv->reg_ctx->wakeup_fileter, priv->ioaddr + GMAC_WAKEUP_FILTER);
writel(priv->reg_ctx->lpi_ctrl_status, priv->ioaddr + LPI_CTRL_STATUS);
writel(priv->reg_ctx->lpi_timer_ctrl, priv->ioaddr + LPI_TIMER_CTRL);
writel(priv->reg_ctx->int_mask, priv->ioaddr + GMAC_INT_MASK);
writel(priv->reg_ctx->mac_addr0_high, priv->ioaddr + GMAC_MAC_ADDR0_HIGH);
writel(priv->reg_ctx->mac_addr0_low, priv->ioaddr + GMAC_MAC_ADDR0_LOW);
writel(priv->reg_ctx->pcs_base, priv->ioaddr + GMAC_PCS_BASE);
writel(priv->reg_ctx->mmc_ctrl, priv->ioaddr + GMAC_MMC_CTRL);
writel(priv->reg_ctx->mmc_rx_intr_mask, priv->ioaddr + GMAC_MMC_RX_INTR_MASK);
writel(priv->reg_ctx->mmc_tx_intr_mask, priv->ioaddr + GMAC_MMC_TX_INTR_MASK);
writel(priv->reg_ctx->mmc_ipc_rx_intr_mask, priv->ioaddr + GMAC_MMC_IPC_RX_INTR_MASK);
writel(priv->reg_ctx->mmc_rx_csum_offload, priv->ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD);
writel(priv->reg_ctx->dma_bus_mode | 0x1, priv->ioaddr + DMA_BUS_MODE);
stmmac_reset_subtask2(priv);
return 0;
}
#else
#define cvi_eth_pm_suspend NULL
#define cvi_eth_pm_resume NULL
#endif
static const struct dev_pm_ops cvi_eth_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(cvi_eth_pm_suspend, cvi_eth_pm_resume)
};
#endif
static struct platform_driver bm_dwmac_driver = {
.probe = bm_dwmac_probe,
.remove = stmmac_pltfr_remove,
.driver = {
.name = "bm-dwmac",
#ifdef CONFIG_PM_SLEEP
#if defined(CONFIG_ARCH_CV180X) || defined(CONFIG_ARCH_CV181X)
.pm = &cvi_eth_pm_ops,
#else
.pm = &stmmac_pltfr_pm_ops,
#endif
#endif
.of_match_table = bm_dwmac_match,
},
};

View File

@ -76,6 +76,9 @@ enum power_event {
#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
/* GMAC HW ADDR regs */
#define GMAC_MAC_ADDR0_HIGH 0x00000040
#define GMAC_MAC_ADDR0_LOW 0x00000044
#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
(reg * 8))
#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
@ -326,6 +329,9 @@ enum rtc_control {
#define GMAC_MMC_CTRL 0x100
#define GMAC_MMC_RX_INTR 0x104
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_INTR_MASK 0x10c
#define GMAC_MMC_TX_INTR_MASK 0x110
#define GMAC_MMC_IPC_RX_INTR_MASK 0x200
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
#define GMAC_EXTHASH_BASE 0x500

View File

@ -128,6 +128,41 @@ struct stmmac_pps_cfg {
struct timespec64 period;
};
#ifdef CONFIG_PM_SLEEP
#if defined(CONFIG_ARCH_CV180X) || defined(CONFIG_ARCH_CV181X)
struct stmmac_reg_context {
u32 ctrl;
u32 frame_filter;
u32 hash_high;
u32 hash_low;
u32 mii_addr;
u32 mii_data;
u32 flow_ctrl;
u32 vlan_tag;
u32 debug;
u32 wakeup_fileter;
u32 lpi_ctrl_status;
u32 lpi_timer_ctrl;
u32 int_mask;
u32 mac_addr0_high;
u32 mac_addr0_low;
u32 pcs_base;
u32 mmc_ctrl;
u32 mmc_rx_intr_mask;
u32 mmc_tx_intr_mask;
u32 mmc_ipc_rx_intr_mask;
u32 mmc_rx_csum_offload;
u32 exthash_base;
u32 dma_bus_mode;
u32 dma_rx_base_addr;
u32 dma_tx_base_addr;
u32 dma_ctrl;
u32 dma_intr_ena;
u32 dma_rx_watchdog;
u32 dma_axi_bus_mode;
};
#endif
#endif
struct stmmac_rss {
int enable;
u8 key[STMMAC_RSS_HASH_KEY_SIZE];
@ -244,12 +279,15 @@ struct stmmac_priv {
/* Receive Side Scaling */
struct stmmac_rss rss;
#ifdef CONFIG_PM_SLEEP
struct stmmac_reg_context *reg_ctx;
#endif
};
enum stmmac_state {
STMMAC_DOWN,
STMMAC_RESET_REQUESTED,
STMMAC_RESETING,
STMMAC_RESETTING,
STMMAC_SERVICE_SCHED,
};

View File

@ -4627,6 +4627,22 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
};
void stmmac_reset_subtask2(struct stmmac_priv *priv)
{
rtnl_lock();
netif_trans_update(priv->dev);
while (test_and_set_bit(STMMAC_RESETTING, &priv->state))
usleep_range(1000, 2000);
set_bit(STMMAC_DOWN, &priv->state);
dev_close(priv->dev);
dev_open(priv->dev, NULL);
clear_bit(STMMAC_DOWN, &priv->state);
clear_bit(STMMAC_RESETTING, &priv->state);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(stmmac_reset_subtask2);
static void stmmac_reset_subtask(struct stmmac_priv *priv)
{
if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
@ -4638,14 +4654,14 @@ static void stmmac_reset_subtask(struct stmmac_priv *priv)
rtnl_lock();
netif_trans_update(priv->dev);
while (test_and_set_bit(STMMAC_RESETING, &priv->state))
while (test_and_set_bit(STMMAC_RESETTING, &priv->state))
usleep_range(1000, 2000);
set_bit(STMMAC_DOWN, &priv->state);
dev_close(priv->dev);
dev_open(priv->dev, NULL);
clear_bit(STMMAC_DOWN, &priv->state);
clear_bit(STMMAC_RESETING, &priv->state);
clear_bit(STMMAC_RESETTING, &priv->state);
rtnl_unlock();
}
@ -5305,6 +5321,8 @@ int stmmac_resume(struct device *dev)
phylink_mac_change(priv->phylink, true);
netif_device_attach(ndev);
if (ndev->phydev)
phy_start(ndev->phydev);
return 0;
}

View File

@ -343,6 +343,22 @@ err_ephy_mem_1:
return ret;
}
static int cvi_genphy_suspend(struct phy_device *phydev)
{
return 0;
}
static int cvi_genphy_resume(struct phy_device *phydev)
{
int ret;
ret = cv182xa_phy_config_init(phydev);
if (ret < 0)
return ret;
ret = genphy_config_aneg(phydev);
//return phy_clear_bits(phydev, MII_BMCR, BMCR_PDOWN);
return 0;
}
static struct phy_driver cv182xa_phy_driver[] = {
{
.phy_id = 0x00435649,
@ -355,8 +371,8 @@ static struct phy_driver cv182xa_phy_driver[] = {
.ack_interrupt = cv182xa_phy_ack_interrupt,
.config_intr = cv182xa_phy_config_intr,
.aneg_done = genphy_aneg_done,
.suspend = genphy_suspend,
.resume = genphy_resume,
.suspend = cvi_genphy_suspend,
.resume = cvi_genphy_resume,
.set_loopback = genphy_loopback,
} };

View File

@ -88,6 +88,7 @@ static int dma_remap_suspend_late(struct device *dev)
static int dma_remap_resume_early(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
u32 val;
val = UPDATE_REMAP
@ -106,6 +107,22 @@ static int dma_remap_resume_early(struct device *dev)
writel(val, remap_subsys_base + 0x4);
if (device_property_present(&pdev->dev, "int_mux_base") &&
device_property_present(&pdev->dev, "int_mux")) {
u32 int_mux_base = 0;
u32 int_mux = 0;
void __iomem *int_mux_reg;
/* Set sysDMA interrupt receiver of IC after CV181X */
device_property_read_u32(&pdev->dev, "int_mux_base", &int_mux_base);
if (int_mux_base != 0x0) {
int_mux_reg = ioremap(int_mux_base, 0x4);
device_property_read_u32(&pdev->dev, "int_mux", &int_mux);
writel(int_mux, int_mux_reg);
iounmap(int_mux_reg);
}
}
return 0;
}
#else

View File

@ -347,6 +347,27 @@ static const struct acpi_device_id dw_spi_mmio_acpi_match[] = {
MODULE_DEVICE_TABLE(acpi, dw_spi_mmio_acpi_match);
#endif
#ifdef CONFIG_PM_SLEEP
static int dw_spi_suspend(struct device *dev)
{
struct dw_spi *dws = dev_get_drvdata(dev);
dws->dw_spi_div = dw_readl(dws, DW_SPI_BAUDR);
dw_spi_remove_host(dws);
return 0;
}
static int dw_spi_resume(struct device *dev)
{
struct dw_spi *dws = dev_get_drvdata(dev);
spi_set_clk(dws, dws->dw_spi_div);
return dw_spi_add_host(dev, dws);
}
#endif
static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, dw_spi_suspend, dw_spi_resume);
static struct platform_driver dw_spi_mmio_driver = {
.probe = dw_spi_mmio_probe,
.remove = dw_spi_mmio_remove,
@ -354,6 +375,7 @@ static struct platform_driver dw_spi_mmio_driver = {
.name = DRIVER_NAME,
.of_match_table = dw_spi_mmio_of_match,
.acpi_match_table = ACPI_PTR(dw_spi_mmio_acpi_match),
.pm = &dw_spi_pm_ops,
},
};
module_platform_driver(dw_spi_mmio_driver);

View File

@ -185,6 +185,10 @@ struct dw_spi {
const struct dw_spi_dma_ops *dma_ops;
struct completion dma_completion;
#ifdef CONFIG_PM_SLEEP
u32 dw_spi_div;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs;
struct debugfs_regset32 regset;

View File

@ -5,6 +5,7 @@ config USB_DWC2
depends on HAS_DMA
depends on USB || USB_GADGET
depends on HAS_IOMEM
select USB_ROLE_SWITCH
help
Say Y here if your system has a Dual Role Hi-Speed USB
controller based on the DesignWare HSOTG IP Core.

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _CVITEK_SPACC_H_
#define _CVITEK_SPACC_H_
typedef enum SPACC_ALGO {
SPACC_ALGO_AES,
SPACC_ALGO_DES,
SPACC_ALGO_TDES,
SPACC_ALGO_SM4,
SPACC_ALGO_SHA1,
SPACC_ALGO_SHA256,
SPACC_ALGO_BASE64,
} SPACC_ALGO_E;
typedef enum SPACC_ALGO_MODE {
SPACC_ALGO_MODE_ECB,
SPACC_ALGO_MODE_CBC, //cv180x Not Supported
SPACC_ALGO_MODE_CTR, //cv180x Not Supported
SPACC_ALGO_MODE_OFB, //cv180x Not Supported
} SPACC_ALGO_MODE_E;
typedef enum SPACC_KEY_SIZE {
SPACC_KEY_SIZE_64BITS,
SPACC_KEY_SIZE_128BITS,
SPACC_KEY_SIZE_192BITS,
SPACC_KEY_SIZE_256BITS,
} SPACC_KEY_SIZE_E;
typedef enum SPACC_ACTION {
SPACC_ACTION_ENCRYPTION,
SPACC_ACTION_DECRYPT,
} SPACC_ACTION_E;
typedef struct spacc_base64_action {
SPACC_ACTION_E action;
} spacc_base64_action_s;
typedef struct spacc_aes_config {
// data config
void *src; //src phy address
size_t len;
// spacc config
unsigned char key[32];
unsigned char iv[16];
SPACC_ALGO_MODE_E mode;
SPACC_KEY_SIZE_E size;
SPACC_ACTION_E action;
} spacc_aes_config_s;
typedef struct spacc_des_config {
unsigned char key[24];
unsigned char iv[16];
SPACC_ALGO_MODE_E mode;
SPACC_ACTION_E action;
} spacc_des_config_s;
typedef spacc_aes_config_s spacc_sm4_config_s;
typedef spacc_des_config_s spacc_tdes_config_s;
#define IOCTL_SPACC_BASE 'S'
#define IOCTL_SPACC_CREATE_MEMPOOL _IOW(IOCTL_SPACC_BASE, 1, unsigned int)
#define IOCTL_SPACC_GET_MEMPOOL_SIZE _IOR(IOCTL_SPACC_BASE, 2, unsigned int)
#define IOCTL_SPACC_SHA256_ACTION _IO(IOCTL_SPACC_BASE, 5)
#define IOCTL_SPACC_SHA1_ACTION _IO(IOCTL_SPACC_BASE, 6)
#define IOCTL_SPACC_BASE64_ACTION _IOW(IOCTL_SPACC_BASE, 7, spacc_base64_action_s)
#define IOCTL_SPACC_AES_ACTION _IOW(IOCTL_SPACC_BASE, 8, spacc_aes_config_s)
#define IOCTL_SPACC_SM4_ACTION _IOW(IOCTL_SPACC_BASE, 9, spacc_sm4_config_s)
#define IOCTL_SPACC_DES_ACTION _IOW(IOCTL_SPACC_BASE, 10, spacc_des_config_s)
#define IOCTL_SPACC_TDES_ACTION _IOW(IOCTL_SPACC_BASE, 11, spacc_tdes_config_s)
#endif // _CVITEK_SPACC_H_

View File

@ -12,17 +12,32 @@
#include <linux/clk.h>
#include <linux/miscdevice.h>
// #define PDM_AEC
/* register offset */
#define PDM_EN_REG 0x30
#define PDM_EN 0x1
#define PDM_OFF 0x0
#ifdef PDM_AEC
//for 16384M audio clk
#define PDM_48K 2
#define PDM_44_1K 2
#define PDM_32K 3
#define PDM_22_05K 4
#define PDM_16K 7
#define PDM_11_025K 10
#define PDM_8K 15
#else
//for 24576M audio clk
#define PDM_48K 3
#define PDM_44_1K 3
#define PDM_22_05K 7
#define PDM_16K 11
#define PDM_11_025K 15
#define PDM_8K 23
#endif
#define PDM_SETTING_REG 0x34
#define PDM_CLK_DIV_MASK 0xFFFFFFE0
@ -34,13 +49,26 @@
#define PDM_BOND_SEL_0 (0 << 24)
#define PDM_BOND_SEL_1 (1 << 24)
#ifdef PDM_AEC
//for 16384M audio clk
#define I2S_48K 4
#define I2S_44_1K 4
#define I2S_32K 7
#define I2S_22_05K 10
#define I2S_16K 15
#define I2S_11_025K 20
#define I2S_8K 31
#else
//for 24576M audio clk
#define I2S_48K 7
#define I2S_44_1K 7
#define I2S_22_05K 15
#define I2S_16K 23
#define I2S_11_025K 31
#define I2S_8K 47
#endif
#define I2S_SETTING_REG 0x38
#define I2S_CLK_DIV_MASK 0xFFFFFF00
#define I2S_CLK_DIV(v) (v << 0)