From f5397c3ee0a3e2ca0a6d66d079ffcd5386b45b81 Mon Sep 17 00:00:00 2001 From: Nanyong Sun Date: Fri, 30 Apr 2021 16:28:47 +0800 Subject: [PATCH 01/62] riscv: mm: add _PAGE_LEAF macro In riscv, a page table entry is leaf when any bit of read, write, or execute bit is set. So add a macro:_PAGE_LEAF instead of (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC), which is frequently used to determine if it is a leaf page. This make code easier to read, without any functional change. Signed-off-by: Nanyong Sun Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable-64.h | 3 +-- arch/riscv/include/asm/pgtable-bits.h | 5 +++++ arch/riscv/include/asm/pgtable.h | 6 ++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index f3b0da64c6c8f2..e3b7c5dd6a8032 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -46,8 +46,7 @@ static inline int pud_bad(pud_t pud) #define pud_leaf pud_leaf static inline int pud_leaf(pud_t pud) { - return pud_present(pud) && - (pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); + return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF); } static inline void set_pud(pud_t *pudp, pud_t pud) diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index bbaeb5d358420d..2ee41391292606 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -39,5 +39,10 @@ #define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \ _PAGE_WRITE | _PAGE_EXEC | \ _PAGE_USER | _PAGE_GLOBAL)) +/* + * when all of R/W/X are zero, the PTE is a pointer to the next level + * of the page table; otherwise, it is a leaf PTE. + */ +#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) #endif /* _ASM_RISCV_PGTABLE_BITS_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 9469f464e71aff..dbced7d37768cc 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -190,8 +190,7 @@ static inline int pmd_bad(pmd_t pmd) #define pmd_leaf pmd_leaf static inline int pmd_leaf(pmd_t pmd) { - return pmd_present(pmd) && - (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); + return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); } static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) @@ -267,8 +266,7 @@ static inline int pte_exec(pte_t pte) static inline int pte_huge(pte_t pte) { - return pte_present(pte) - && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)); + return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); } static inline int pte_dirty(pte_t pte) From 141682f5b9d658b5fba7c33cf8574329a7840cdc Mon Sep 17 00:00:00 2001 From: Nanyong Sun Date: Fri, 30 Apr 2021 16:28:48 +0800 Subject: [PATCH 02/62] riscv: mm: make pmd_bad() check leaf condition In the definition in Documentation/vm/arch_pgtable_helpers.rst, pmd_bad() means test a non-table mapped PMD, so it should also return true when it is a leaf page. Signed-off-by: Nanyong Sun Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index dbced7d37768cc..b06eb8394e4ea6 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -184,7 +184,7 @@ static inline int pmd_none(pmd_t pmd) static inline int pmd_bad(pmd_t pmd) { - return !pmd_present(pmd); + return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); } #define pmd_leaf pmd_leaf From c3b2d67046d236edb45eed5ca561c62ee7baa788 Mon Sep 17 00:00:00 2001 From: Nanyong Sun Date: Fri, 30 Apr 2021 16:28:49 +0800 Subject: [PATCH 03/62] riscv: mm: add param stride for __sbi_tlb_flush_range Add a parameter: stride for __sbi_tlb_flush_range(), represent the page stride between the address of start and end. Normally, the stride is PAGE_SIZE, and when flush huge page address, the stride can be the huge page size such as:PMD_SIZE, then it only need to flush one tlb entry if the address range within PMD_SIZE. Signed-off-by: Nanyong Sun Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/tlbflush.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 720b443c4528f7..382781abffd0dc 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -15,7 +15,7 @@ void flush_tlb_all(void) * Kernel may panic if cmask is NULL. */ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, - unsigned long size) + unsigned long size, unsigned long stride) { struct cpumask hmask; unsigned int cpuid; @@ -27,7 +27,7 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { /* local cpu is the only cpu present in cpumask */ - if (size <= PAGE_SIZE) + if (size <= stride) local_flush_tlb_page(start); else local_flush_tlb_all(); @@ -41,16 +41,16 @@ static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start, void flush_tlb_mm(struct mm_struct *mm) { - __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1); + __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1, PAGE_SIZE); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE); + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE, PAGE_SIZE); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start); + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE); } From e88b333142e4aba7410d6d3292ad97b3a8588bfe Mon Sep 17 00:00:00 2001 From: Nanyong Sun Date: Fri, 30 Apr 2021 16:28:50 +0800 Subject: [PATCH 04/62] riscv: mm: add THP support on 64-bit Bring Transparent HugePage support to riscv. A transparent huge page is always represented as a pmd. Signed-off-by: Nanyong Sun Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/include/asm/pgtable.h | 156 +++++++++++++++++++++++++++++++ arch/riscv/mm/tlbflush.c | 7 ++ 3 files changed, 164 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a8ad8eb761206a..a160f60f937835 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -103,6 +103,7 @@ config RISCV select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select UACCESS_MEMCPY if !MMU + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index b06eb8394e4ea6..4b708ae0891009 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -172,10 +172,23 @@ extern pgd_t swapper_pg_dir[]; #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_present(pmd_t pmd) +{ + /* + * Checking for _PAGE_LEAF is needed too because: + * When splitting a THP, split_huge_page() will temporarily clear + * the present bit, in this situation, pmd_present() and + * pmd_trans_huge() still needs to return true. + */ + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); +} +#else static inline int pmd_present(pmd_t pmd) { return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); } +#endif static inline int pmd_none(pmd_t pmd) { @@ -369,6 +382,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, local_flush_tlb_page(address); } +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + pte_t *ptep = (pte_t *)pmdp; + + update_mmu_cache(vma, address, ptep); +} + #define __HAVE_ARCH_PTE_SAME static inline int pte_same(pte_t pte_a, pte_t pte_b) { @@ -462,6 +483,141 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, return ptep_test_and_clear_young(vma, address, ptep); } +/* + * THP functions + */ +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); +} + +#define __pmd_to_phys(pmd) (pmd_val(pmd) >> _PAGE_PFN_SHIFT << PAGE_SHIFT) + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); +} + +static inline pmd_t mk_pmd(struct page *page, pgprot_t prot) +{ + return pfn_pmd(page_to_pfn(page), prot); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); +} + +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return pte_write(pmd_pte(pmd)); +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return pte_dirty(pmd_pte(pmd)); +} + +static inline int pmd_young(pmd_t pmd) +{ + return pte_young(pmd_pte(pmd)); +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + return pte_pmd(pte_mkold(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + return pte_pmd(pte_mkyoung(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + return pte_pmd(pte_mkwrite(pmd_pte(pmd))); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + return pte_pmd(pte_wrprotect(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + return pte_pmd(pte_mkclean(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + return pte_pmd(pte_mkdirty(pmd_pte(pmd))); +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); +} + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_leaf(pmd); +} + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); +} + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); +} + +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp)); +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + ptep_set_wrprotect(mm, address, (pte_t *)pmdp); +} + +#define pmdp_establish pmdp_establish +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); +} + +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); + +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + /* * Encode and decode a swap entry * diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 382781abffd0dc..fea45af91f5346 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -54,3 +54,10 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, { __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PAGE_SIZE); } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start, PMD_SIZE); +} +#endif From 3332f4190674114e08daaf6859c11a7e464bceff Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sat, 17 Apr 2021 00:37:22 +0800 Subject: [PATCH 05/62] riscv: mremap speedup - enable HAVE_MOVE_PUD and HAVE_MOVE_PMD HAVE_MOVE_PUD enables remapping pages at the PUD level if both the source and destination addresses are PUD-aligned. HAVE_MOVE_PMD does similar speedup on the PMD level. With HAVE_MOVE_PUD enabled, there is about a 143x improvement on qemu With HAVE_MOVE_PMD enabled, there is about a 5x improvement on qemu Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 2 ++ arch/riscv/include/asm/pgtable.h | 11 +++++++++++ 2 files changed, 13 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index a160f60f937835..b58596b141fcc5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -82,6 +82,8 @@ config RISCV select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES + select HAVE_MOVE_PMD + select HAVE_MOVE_PUD select HAVE_PCI select HAVE_PERF_EVENTS select HAVE_PERF_REGS diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 4b708ae0891009..3b72862a83fa13 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -241,6 +241,11 @@ static inline pte_t pmd_pte(pmd_t pmd) return __pte(pmd_val(pmd)); } +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + /* Yields the page frame number (PFN) of a page table entry */ static inline unsigned long pte_pfn(pte_t pte) { @@ -570,6 +575,12 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, return set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); } +static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) +{ + return set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE static inline int pmd_trans_huge(pmd_t pmd) { From 8f3e136ff378a2b22dbc0ca2a6e58022e6df36d2 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 7 May 2021 22:19:59 +0800 Subject: [PATCH 06/62] riscv: mm: Remove setup_zero_page() The empty_zero_page sits at .bss..page_aligned section, so will be cleared to zero during clearing bss, we don't need to clear it again. Signed-off-by: Jisheng Zhang Reviewed-by: Anup Patel Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 4faf8bd157eaa9..11b61bea0c4d3a 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -67,11 +67,6 @@ static void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } -static void __init setup_zero_page(void) -{ - memset((void *)empty_zero_page, 0, PAGE_SIZE); -} - #if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM) static inline void print_mlk(char *name, unsigned long b, unsigned long t) { @@ -867,7 +862,6 @@ RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup); void __init paging_init(void) { setup_vm_final(); - setup_zero_page(); } void __init misc_mem_init(void) From db756746807b5cb64bbe2e6ac4ff38d18b7787ed Mon Sep 17 00:00:00 2001 From: Stanislaw Kardach Date: Mon, 12 Apr 2021 13:10:12 +0200 Subject: [PATCH 07/62] riscv: enable generic PCI resource mapping Enable the PCI resource mapping on RISC-V using the generic framework. This allows userspace applications to mmap PCI resources using /sys/devices/pci*/*/resource* interface. The mmap has been tested with Intel x520-DA2 NIC card on a HiFive Unmatched board (SiFive FU740 SoC). Signed-off-by: Stanislaw Kardach Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pci.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h index 658e112c3ce73b..7fd52a30e6058e 100644 --- a/arch/riscv/include/asm/pci.h +++ b/arch/riscv/include/asm/pci.h @@ -18,6 +18,8 @@ /* RISC-V shim does not initialize PCI bus */ #define pcibios_assign_all_busses() 1 +#define ARCH_GENERIC_PCI_MMAP_RESOURCE 1 + extern int isa_dma_bridge_buggy; #ifdef CONFIG_PCI From f842f5ff6aafc2752580ed99ee757652c08684e7 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 10 May 2021 19:42:22 +0800 Subject: [PATCH 08/62] riscv: Move setup_bootmem into paging_init Make setup_bootmem() static. Signed-off-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 1 - arch/riscv/kernel/setup.c | 1 - arch/riscv/mm/init.c | 3 ++- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 3b72862a83fa13..bde8ce3bfe7cd7 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -698,7 +698,6 @@ extern uintptr_t _dtb_early_pa; #define dtb_early_pa _dtb_early_pa #endif /* CONFIG_XIP_KERNEL */ -void setup_bootmem(void); void paging_init(void); void misc_mem_init(void); diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 03901d3a8b0273..4db4d0b5911fd9 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -276,7 +276,6 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); efi_init(); - setup_bootmem(); paging_init(); #if IS_ENABLED(CONFIG_BUILTIN_DTB) unflatten_and_copy_device_tree(); diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 11b61bea0c4d3a..dab317126846d8 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -114,7 +114,7 @@ void __init mem_init(void) print_vm_layout(); } -void __init setup_bootmem(void) +static void __init setup_bootmem(void) { phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); @@ -861,6 +861,7 @@ RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup); void __init paging_init(void) { + setup_bootmem(); setup_vm_final(); } From 50bae95e17c6dd0b7a2a3a92ad8808067234e9ef Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 14 May 2021 17:49:08 +0800 Subject: [PATCH 09/62] riscv: mm: Drop redundant _sdata and _edata declaration The _sdata/_edata is already in sections.h, drop redundant declaration. Also move _xiprom/_exiprom declarations at the beginning of the file, cleanup one CONFIG_XIP_KERNEL. Signed-off-by: Kefeng Wang Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index dab317126846d8..2d80088f33d565 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -33,6 +33,7 @@ unsigned long kernel_virt_addr = KERNEL_LINK_ADDR; EXPORT_SYMBOL(kernel_virt_addr); #ifdef CONFIG_XIP_KERNEL #define kernel_virt_addr (*((unsigned long *)XIP_FIXUP(&kernel_virt_addr))) +extern char _xiprom[], _exiprom[]; #endif unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] @@ -171,13 +172,6 @@ static void __init setup_bootmem(void) memblock_allow_resize(); } -#ifdef CONFIG_XIP_KERNEL - -extern char _xiprom[], _exiprom[]; -extern char _sdata[], _edata[]; - -#endif /* CONFIG_XIP_KERNEL */ - #ifdef CONFIG_MMU static struct pt_alloc_ops _pt_ops __ro_after_init; From 8237c5243a614d33fe339bc844f90aa2b393c2a8 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 12 May 2021 01:42:31 +0800 Subject: [PATCH 10/62] riscv: Optimize switch_mm by passing "cpu" to flush_icache_deferred() Directly passing the cpu to flush_icache_deferred() rather than calling smp_processor_id() again. Signed-off-by: Jisheng Zhang [Palmer: drop the QEMU performance numbers, and update the comment] Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/context.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 68aa312fc35245..83e7ae37675a3e 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -280,11 +280,12 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu) * cache flush to be performed before execution resumes on each hart. This * actually performs that local instruction cache flush, which implicitly only * refers to the current hart. + * + * The "cpu" argument must be the current local CPU number. */ -static inline void flush_icache_deferred(struct mm_struct *mm) +static inline void flush_icache_deferred(struct mm_struct *mm, unsigned int cpu) { #ifdef CONFIG_SMP - unsigned int cpu = smp_processor_id(); cpumask_t *mask = &mm->context.icache_stale_mask; if (cpumask_test_cpu(cpu, mask)) { @@ -320,5 +321,5 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, set_mm(next, cpu); - flush_icache_deferred(next); + flush_icache_deferred(next, cpu); } From 37a7a2a10ec525a79d733008bc7fe4ebbca34382 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 12 May 2021 22:55:45 +0800 Subject: [PATCH 11/62] riscv: Turn has_fpu into a static key if FPU=y The has_fpu check sits at hot code path: switch_to(). Currently, has_fpu is a bool variable if FPU=y, switch_to() checks it each time, we can optimize out this check by turning the has_fpu into a static key. Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/switch_to.h | 11 ++++++++--- arch/riscv/kernel/cpufeature.c | 4 ++-- arch/riscv/kernel/process.c | 2 +- arch/riscv/kernel/signal.c | 4 ++-- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index 407bcc96a71093..0a3f4f95c55554 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -6,6 +6,7 @@ #ifndef _ASM_RISCV_SWITCH_TO_H #define _ASM_RISCV_SWITCH_TO_H +#include #include #include #include @@ -55,9 +56,13 @@ static inline void __switch_to_aux(struct task_struct *prev, fstate_restore(next, task_pt_regs(next)); } -extern bool has_fpu; +extern struct static_key_false cpu_hwcap_fpu; +static __always_inline bool has_fpu(void) +{ + return static_branch_likely(&cpu_hwcap_fpu); +} #else -#define has_fpu false +static __always_inline bool has_fpu(void) { return false; } #define fstate_save(task, regs) do { } while (0) #define fstate_restore(task, regs) do { } while (0) #define __switch_to_aux(__prev, __next) do { } while (0) @@ -70,7 +75,7 @@ extern struct task_struct *__switch_to(struct task_struct *, do { \ struct task_struct *__prev = (prev); \ struct task_struct *__next = (next); \ - if (has_fpu) \ + if (has_fpu()) \ __switch_to_aux(__prev, __next); \ ((last) = __switch_to(__prev, __next)); \ } while (0) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index ac202f44a67024..a2848dc3692723 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -19,7 +19,7 @@ unsigned long elf_hwcap __read_mostly; static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly; #ifdef CONFIG_FPU -bool has_fpu __read_mostly; +__ro_after_init DEFINE_STATIC_KEY_FALSE(cpu_hwcap_fpu); #endif /** @@ -146,6 +146,6 @@ void riscv_fill_hwcap(void) #ifdef CONFIG_FPU if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)) - has_fpu = true; + static_branch_enable(&cpu_hwcap_fpu); #endif } diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index f9cd57c9c67d2d..03ac3aa611f59c 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -87,7 +87,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) { regs->status = SR_PIE; - if (has_fpu) { + if (has_fpu()) { regs->status |= SR_FS_INITIAL; /* * Restore the initial value to the FP register diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 65942b3748b41b..c2d5ecbe552648 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -90,7 +90,7 @@ static long restore_sigcontext(struct pt_regs *regs, /* sc_regs is structured the same as the start of pt_regs */ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs)); /* Restore the floating-point state. */ - if (has_fpu) + if (has_fpu()) err |= restore_fp_state(regs, &sc->sc_fpregs); return err; } @@ -143,7 +143,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, /* sc_regs is structured the same as the start of pt_regs */ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs)); /* Save the floating-point state. */ - if (has_fpu) + if (has_fpu()) err |= save_fp_state(regs, &sc->sc_fpregs); return err; } From 8c9f4940c27dd72ee68ca5af2922e4d83ca9121b Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Mon, 19 Apr 2021 00:29:19 +0800 Subject: [PATCH 12/62] riscv: kprobes: Remove redundant kprobe_step_ctx Inspired by commit ba090f9cafd5 ("arm64: kprobes: Remove redundant kprobe_step_ctx"), the ss_pending and match_addr of kprobe_step_ctx are redundant because those can be replaced by KPROBE_HIT_SS and &cur_kprobe->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) respectively. Remove the kprobe_step_ctx to simplify the code. Signed-off-by: Jisheng Zhang Reviewed-by: Masami Hiramatsu Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/kprobes.h | 7 ------ arch/riscv/kernel/probes/kprobes.c | 40 +++++++----------------------- 2 files changed, 9 insertions(+), 38 deletions(-) diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h index 4647d38018f6a6..9ea9b5ec311392 100644 --- a/arch/riscv/include/asm/kprobes.h +++ b/arch/riscv/include/asm/kprobes.h @@ -29,18 +29,11 @@ struct prev_kprobe { unsigned int status; }; -/* Single step context for kprobe */ -struct kprobe_step_ctx { - unsigned long ss_pending; - unsigned long match_addr; -}; - /* per-cpu kprobe control block */ struct kprobe_ctlblk { unsigned int kprobe_status; unsigned long saved_status; struct prev_kprobe prev_kprobe; - struct kprobe_step_ctx ss_ctx; }; void arch_remove_kprobe(struct kprobe *p); diff --git a/arch/riscv/kernel/probes/kprobes.c b/arch/riscv/kernel/probes/kprobes.c index 10b965c345366a..fbd127d5dc04ac 100644 --- a/arch/riscv/kernel/probes/kprobes.c +++ b/arch/riscv/kernel/probes/kprobes.c @@ -17,7 +17,7 @@ DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); +post_kprobe_handler(struct kprobe *, struct kprobe_ctlblk *, struct pt_regs *); static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { @@ -43,7 +43,7 @@ static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) p->ainsn.api.handler((u32)p->opcode, (unsigned long)p->addr, regs); - post_kprobe_handler(kcb, regs); + post_kprobe_handler(p, kcb, regs); } int __kprobes arch_prepare_kprobe(struct kprobe *p) @@ -149,21 +149,6 @@ static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, regs->status = kcb->saved_status; } -static void __kprobes -set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr, struct kprobe *p) -{ - unsigned long offset = GET_INSN_LENGTH(p->opcode); - - kcb->ss_ctx.ss_pending = true; - kcb->ss_ctx.match_addr = addr + offset; -} - -static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) -{ - kcb->ss_ctx.ss_pending = false; - kcb->ss_ctx.match_addr = 0; -} - static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) @@ -182,8 +167,6 @@ static void __kprobes setup_singlestep(struct kprobe *p, /* prepare for single stepping */ slot = (unsigned long)p->ainsn.api.insn; - set_ss_context(kcb, slot, p); /* mark pending ss */ - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); @@ -219,13 +202,8 @@ static int __kprobes reenter_kprobe(struct kprobe *p, } static void __kprobes -post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) +post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb, struct pt_regs *regs) { - struct kprobe *cur = kprobe_running(); - - if (!cur) - return; - /* return addr restore if non-branching insn */ if (cur->ainsn.api.restore != 0) regs->epc = cur->ainsn.api.restore; @@ -357,16 +335,16 @@ bool __kprobes kprobe_single_step_handler(struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + unsigned long addr = instruction_pointer(regs); + struct kprobe *cur = kprobe_running(); - if ((kcb->ss_ctx.ss_pending) - && (kcb->ss_ctx.match_addr == instruction_pointer(regs))) { - clear_ss_context(kcb); /* clear pending ss */ - + if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) && + ((unsigned long)&cur->ainsn.api.insn[0] + GET_INSN_LENGTH(cur->opcode) == addr)) { kprobes_restore_local_irqflag(kcb, regs); - - post_kprobe_handler(kcb, regs); + post_kprobe_handler(cur, kcb, regs); return true; } + /* not ours, kprobes should ignore it */ return false; } From 3df952ae2ac81fbc5d44b014e5462b53d1decbb5 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 16 May 2021 20:59:42 +0800 Subject: [PATCH 13/62] riscv: Add __init section marker to some functions again These functions are not needed after booting, so mark them as __init to move them to the __init section. Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 2 +- arch/riscv/mm/context.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index a2848dc3692723..d959d207a40d6c 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -59,7 +59,7 @@ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) } EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); -void riscv_fill_hwcap(void) +void __init riscv_fill_hwcap(void) { struct device_node *node; const char *isa; diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 83e7ae37675a3e..9bc46ab01c25a6 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -213,7 +213,7 @@ static inline void set_mm(struct mm_struct *mm, unsigned int cpu) set_mm_noasid(mm); } -static int asids_init(void) +static int __init asids_init(void) { unsigned long old; From 010623568222bd144eb73aa9f3b46c79b63d7676 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Sun, 16 May 2021 21:15:56 +0800 Subject: [PATCH 14/62] riscv: mm: init: Consolidate vars, functions Consolidate the following items in init.c Staticize global vars as much as possible; Add __initdata mark if the global var isn't needed after init Add __init mark if the func isn't needed after init Add __ro_after_init if the global var is read only after init Signed-off-by: Jisheng Zhang Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/set_memory.h | 2 +- arch/riscv/mm/init.c | 36 +++++++++++++++-------------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h index 086f757e8ba3cc..9d4d455726d45b 100644 --- a/arch/riscv/include/asm/set_memory.h +++ b/arch/riscv/include/asm/set_memory.h @@ -27,7 +27,7 @@ static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; #endif #if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void protect_kernel_linear_mapping_text_rodata(void); +void __init protect_kernel_linear_mapping_text_rodata(void); #else static inline void protect_kernel_linear_mapping_text_rodata(void) {} #endif diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 2d80088f33d565..ae32f78207f0ba 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -54,7 +54,7 @@ struct pt_alloc_ops { #endif }; -static phys_addr_t dma32_phys_limit __ro_after_init; +static phys_addr_t dma32_phys_limit __initdata; static void __init zone_sizes_init(void) { @@ -173,7 +173,7 @@ static void __init setup_bootmem(void) } #ifdef CONFIG_MMU -static struct pt_alloc_ops _pt_ops __ro_after_init; +static struct pt_alloc_ops _pt_ops __initdata; #ifdef CONFIG_XIP_KERNEL #define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops)) @@ -189,13 +189,13 @@ EXPORT_SYMBOL(va_pa_offset); #endif /* Offset between kernel mapping virtual address and kernel load address */ #ifdef CONFIG_64BIT -unsigned long va_kernel_pa_offset; +unsigned long va_kernel_pa_offset __ro_after_init; EXPORT_SYMBOL(va_kernel_pa_offset); #endif #ifdef CONFIG_XIP_KERNEL #define va_kernel_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_pa_offset))) #endif -unsigned long va_kernel_xip_pa_offset; +unsigned long va_kernel_xip_pa_offset __ro_after_init; EXPORT_SYMBOL(va_kernel_xip_pa_offset); #ifdef CONFIG_XIP_KERNEL #define va_kernel_xip_pa_offset (*((unsigned long *)XIP_FIXUP(&va_kernel_xip_pa_offset))) @@ -205,7 +205,7 @@ EXPORT_SYMBOL(pfn_base); pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss; -pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; +static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss; pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); @@ -242,7 +242,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa) return (pte_t *)set_fixmap_offset(FIX_PTE, pa); } -static inline pte_t *get_pte_virt_late(phys_addr_t pa) +static inline pte_t *__init get_pte_virt_late(phys_addr_t pa) { return (pte_t *) __va(pa); } @@ -261,7 +261,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); } -static phys_addr_t alloc_pte_late(uintptr_t va) +static phys_addr_t __init alloc_pte_late(uintptr_t va) { unsigned long vaddr; @@ -285,10 +285,10 @@ static void __init create_pte_mapping(pte_t *ptep, #ifndef __PAGETABLE_PMD_FOLDED -pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; -pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; -pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); -pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; +static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); +static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); #ifdef CONFIG_XIP_KERNEL #define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd)) @@ -308,7 +308,7 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa) return (pmd_t *)set_fixmap_offset(FIX_PMD, pa); } -static pmd_t *get_pmd_virt_late(phys_addr_t pa) +static pmd_t *__init get_pmd_virt_late(phys_addr_t pa) { return (pmd_t *) __va(pa); } @@ -325,7 +325,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); } -static phys_addr_t alloc_pmd_late(uintptr_t va) +static phys_addr_t __init alloc_pmd_late(uintptr_t va) { unsigned long vaddr; @@ -443,14 +443,16 @@ asmlinkage void __init __copy_data(void) #error "setup_vm() is called from head.S before relocate so it should not use absolute addressing." #endif -uintptr_t load_pa, load_sz; +static uintptr_t load_pa __initdata; +static uintptr_t load_sz __initdata; #ifdef CONFIG_XIP_KERNEL #define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) #define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) #endif #ifdef CONFIG_XIP_KERNEL -uintptr_t xiprom, xiprom_sz; +static uintptr_t xiprom __inidata; +static uintptr_t xiprom_sz __initdata; #define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) #define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) @@ -635,7 +637,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) } #if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) -void protect_kernel_linear_mapping_text_rodata(void) +void __init protect_kernel_linear_mapping_text_rodata(void) { unsigned long text_start = (unsigned long)lm_alias(_start); unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin); @@ -843,7 +845,7 @@ static void __init reserve_crashkernel(void) * reserved once we call early_init_fdt_scan_reserved_mem() * later on. */ -static int elfcore_hdr_setup(struct reserved_mem *rmem) +static int __init elfcore_hdr_setup(struct reserved_mem *rmem) { elfcorehdr_addr = rmem->base; elfcorehdr_size = rmem->size; From 7fa865f5640a46ed9d3655dd19583fe750e85a8a Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 24 May 2021 17:13:02 -0700 Subject: [PATCH 15/62] riscv: TRANSPARENT_HUGEPAGE: depends on MMU Fix a Kconfig warning and many build errors: WARNING: unmet direct dependencies detected for COMPACTION Depends on [n]: MMU [=n] Selected by [y]: - TRANSPARENT_HUGEPAGE [=y] && HAVE_ARCH_TRANSPARENT_HUGEPAGE [=y] and the subseqent thousands of build errors and warnings. Fixes: e88b333142e4 ("riscv: mm: add THP support on 64-bit") Signed-off-by: Randy Dunlap Acked-by: Mike Rapoport Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index b58596b141fcc5..a71b1d2cab3db8 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -105,7 +105,7 @@ config RISCV select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK select UACCESS_MEMCPY if !MMU - select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT From cba43c31f14b08f193ebb5b4a72751b0947436c1 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Wed, 26 May 2021 05:49:20 +0000 Subject: [PATCH 16/62] riscv: Use global mappings for kernel pages We map kernel pages into all addresses spages, so they can be marked as global. This allows hardware to avoid flushing the kernel mappings when moving between address spaces. Signed-off-by: Guo Ren Reviewed-by: Anup Patel Reviewed-by: Christoph Hellwig [Palmer: commit text] Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index bde8ce3bfe7cd7..c103f0a278e52b 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -134,7 +134,8 @@ | _PAGE_WRITE \ | _PAGE_PRESENT \ | _PAGE_ACCESSED \ - | _PAGE_DIRTY) + | _PAGE_DIRTY \ + | _PAGE_GLOBAL) #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) From b59459088e629d847c316dd229b09dea3146d2f4 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 27 May 2021 19:57:35 +0200 Subject: [PATCH 17/62] dt-bindings: usb: cdns,usb3: Fix interrupts order Correct the order of the descriptions for the "interrupts" property to match the order of the "interrupt-names" property. Fixes: 68989fe1c39d9b32 ("dt-bindings: usb: Convert cdns-usb3.txt to YAML schema") Signed-off-by: Geert Uytterhoeven --- Documentation/devicetree/bindings/usb/cdns,usb3.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml index a407e1143cf425..8dedfa16c9929d 100644 --- a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml +++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml @@ -28,9 +28,9 @@ properties: interrupts: minItems: 3 items: - - description: OTG/DRD controller interrupt - description: XHCI host controller interrupt - description: Device controller interrupt + - description: OTG/DRD controller interrupt - description: interrupt used to wake up core, e.g when usbcmd.rs is cleared by xhci core, this interrupt is optional From 6535d6fddc46b7ef7600a15f26263e908938ff29 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 27 May 2021 20:03:18 +0200 Subject: [PATCH 18/62] mmc: dw_mmc-pltfm: Remove unused As of commit 4cdc2ec1da322776 ("mmc: dw_mmc: move rockchip related code to a separate file"), dw_mmc-pltfm.c no longer uses the clock API. Signed-off-by: Geert Uytterhoeven --- drivers/mmc/host/dw_mmc-pltfm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c index 73731cd3ba2315..9901208be7973f 100644 --- a/drivers/mmc/host/dw_mmc-pltfm.c +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -17,7 +17,6 @@ #include #include #include -#include #include "dw_mmc.h" #include "dw_mmc-pltfm.h" From c54debc689592d4e4300956ecae349ebe93e3b5d Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Sun, 30 May 2021 23:42:44 -0700 Subject: [PATCH 19/62] dt-bindings: add StarFive Technology Co. Ltd. Add vendor prefix for StarFive Technology Co. Ltd [1]. StarFive was formed in 2018 and has now produced their first SoC, the JH7100, which contains 64-bit RISC-V cores [2]. It used in the BeagleV Starlight [3]. [1] https://starfivetech.com/site/company [2] https://github.com/beagleboard/beaglev-starlight [3] https://github.com/starfive-tech/beaglev_doc Signed-off-by: Drew Fustini --- Documentation/devicetree/bindings/vendor-prefixes.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index b868cefc7c5500..3dd6a42fbbad2b 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1085,6 +1085,8 @@ patternProperties: (formerly part of MStar Semiconductor, Inc.) "^st,.*": description: STMicroelectronics + "^starfive,.*": + description: StarFive Technology Co. Ltd. "^starry,.*": description: Starry Electronic Technology (ShenZhen) Co., LTD "^startek,.*": From 2c23db84c472d810c2c17744621175ba898cf04f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 1 Jun 2021 16:02:23 +0200 Subject: [PATCH 20/62] [WIP] dt-bindings: clock: starfive: Add preliminary JH7100 bindings Add preliminary Device Tree bindings for the StarFive JH7100 Clock Generator. To be verified against documentation when it becomes available. Signed-off-by: Geert Uytterhoeven --- .../clock/starfive,jh7100-clkgen.yaml | 52 +++++++++++++++++++ 1 file changed, 52 insertions(+) create mode 100644 Documentation/devicetree/bindings/clock/starfive,jh7100-clkgen.yaml diff --git a/Documentation/devicetree/bindings/clock/starfive,jh7100-clkgen.yaml b/Documentation/devicetree/bindings/clock/starfive,jh7100-clkgen.yaml new file mode 100644 index 00000000000000..9e88ac647ae2a7 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/starfive,jh7100-clkgen.yaml @@ -0,0 +1,52 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/clock/starfive,jh7100-clkgen.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: StarFive JH7100 Clock Generator + +maintainers: + - FIXME + - Geert Uytterhoeven + +properties: + compatible: + const: starfive,jh7100-clkgen + + reg: + maxItems: 1 + + clocks: + items: + - description: Main clock source (default 25 MHz) + - description: Application-specific clock source (12-27 MHz) + + clock-names: + items: + - const: osc0 + - const: osc1 + + '#clock-cells': + const: 1 + description: + See for valid indices. + +required: + - compatible + - reg + - clocks + - clock-names + - '#clock-cells' + +additionalProperties: false + +examples: + - | + clkgen: clock-controller@11800000 { + compatible = "starfive,jh7100-clkgen"; + reg = <0x11800000 0x10000>; + clocks = <&osc0_clk>, <&osc1_clk>; + clock-names = "osc0", "osc1"; + #clock-cells = <1>; + }; From e0369eaac760ae5cdc07e434143bcf7239fc9a0e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 1 Jun 2021 15:57:52 +0200 Subject: [PATCH 21/62] [WIP] dt-bindings: clock: starfive: Add preliminary JH7100 Clock Definitions Add all clock outputs for the StarFive JH7100 Clock Generator, based on the list of fixed-frequency clocks defined in jh7100.dtsi. To be verified against documentation when it becomes available. Signed-off-by: Geert Uytterhoeven --- .../clock/starfive-jh7100-clkgen.h | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 include/dt-bindings/clock/starfive-jh7100-clkgen.h diff --git a/include/dt-bindings/clock/starfive-jh7100-clkgen.h b/include/dt-bindings/clock/starfive-jh7100-clkgen.h new file mode 100644 index 00000000000000..5f6807c82ec55a --- /dev/null +++ b/include/dt-bindings/clock/starfive-jh7100-clkgen.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ +/* + * Copyright (C) 2021 Glider bv + */ +#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7100_CLOCK_H__ +#define __DT_BINDINGS_CLOCK_STARFIVE_JH7100_CLOCK_H__ + +/* StarFive JH7100 clocks FIXME PRELIMINARY */ +#define JH7100_CLK_AXI 0 +#define JH7100_CLK_AHB0 1 +#define JH7100_CLK_AHB2 2 +#define JH7100_CLK_APB1 3 +#define JH7100_CLK_APB2 4 +#define JH7100_CLK_VPU 5 +#define JH7100_CLK_JPU 6 +#define JH7100_CLK_PWM 7 +#define JH7100_CLK_DWMMC_BIU 8 +#define JH7100_CLK_DWMMC_CIU 9 +#define JH7100_CLK_UART 10 +#define JH7100_CLK_HS_UART 11 +#define JH7100_CLK_I2C0 12 +#define JH7100_CLK_I2C2 13 +#define JH7100_CLK_QSPI 14 +#define JH7100_CLK_SPI 15 +#define JH7100_CLK_GMAC 16 +#define JH7100_CLK_HF 17 +#define JH7100_CLK_RTC 18 + +#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7100_CLOCK_H__ */ From 09a73a6fbb30a868682b2d6178743d0da3e5e8ae Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 1 Jun 2021 15:57:52 +0200 Subject: [PATCH 22/62] [WIP] clk: starfive: Add preliminary JH7100 Clock Generator Driver Add a preliminary driver for the StarFive JH7100 Clock Generator. For now, all clocks are implemented as fixed-factor clocks relative to osc0, based on the list of fixed-frequency clocks defined in jh7100.dtsi. To be updated when the documentation becomes available. Signed-off-by: Geert Uytterhoeven --- drivers/clk/Kconfig | 1 + drivers/clk/Makefile | 1 + drivers/clk/starfive/Kconfig | 9 ++ drivers/clk/starfive/Makefile | 3 + drivers/clk/starfive/clk-starfive-jh7100.c | 124 +++++++++++++++++++++ 5 files changed, 138 insertions(+) create mode 100644 drivers/clk/starfive/Kconfig create mode 100644 drivers/clk/starfive/Makefile create mode 100644 drivers/clk/starfive/clk-starfive-jh7100.c diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index e80918be8e9c45..61b243a14f428b 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -397,6 +397,7 @@ source "drivers/clk/samsung/Kconfig" source "drivers/clk/sifive/Kconfig" source "drivers/clk/socfpga/Kconfig" source "drivers/clk/sprd/Kconfig" +source "drivers/clk/starfive/Kconfig" source "drivers/clk/sunxi/Kconfig" source "drivers/clk/sunxi-ng/Kconfig" source "drivers/clk/tegra/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 5f06879d7fe98c..c154596d1ab5c6 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -109,6 +109,7 @@ obj-y += socfpga/ obj-$(CONFIG_PLAT_SPEAR) += spear/ obj-y += sprd/ obj-$(CONFIG_ARCH_STI) += st/ +obj-$(CONFIG_SOC_STARFIVE_VIC7100) += starfive/ obj-$(CONFIG_ARCH_SUNXI) += sunxi/ obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/ obj-$(CONFIG_ARCH_TEGRA) += tegra/ diff --git a/drivers/clk/starfive/Kconfig b/drivers/clk/starfive/Kconfig new file mode 100644 index 00000000000000..0e23c9a8a66381 --- /dev/null +++ b/drivers/clk/starfive/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 + +config CLK_STARFIVE_JH7100 + bool "StarFive JH7100 clock support" + depends on SOC_STARFIVE_VIC7100 || COMPILE_TEST + default y if SOC_STARFIVE_VIC7100 + help + Say yes here to support the clock controller on the StarFive JH7100 + SoC. diff --git a/drivers/clk/starfive/Makefile b/drivers/clk/starfive/Makefile new file mode 100644 index 00000000000000..09759cc735307f --- /dev/null +++ b/drivers/clk/starfive/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +# StarFive Clock +obj-$(CONFIG_CLK_STARFIVE_JH7100) += clk-starfive-jh7100.o diff --git a/drivers/clk/starfive/clk-starfive-jh7100.c b/drivers/clk/starfive/clk-starfive-jh7100.c new file mode 100644 index 00000000000000..a768394c28f0a1 --- /dev/null +++ b/drivers/clk/starfive/clk-starfive-jh7100.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * StarFive JH7100 Clock Generator Driver + * This is part of the PCR (Power/Clock/Reset) Management Unit Driver + * + * FIXME PRELIMINARY + * For now, all clocks are implemented as fixed-factor clocks relative to osc0 + * + * TODO Real clock topology, clock register programming + * PLL0 used for system main logic, including CPU, bus + * PLL1 output to support DDR, DLA and DSP + * PLL2 output to support slow speed peripherals, video input and video output + * + * Copyright (C) 2021 Glider bv + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static const struct jh7100_clk { + const char *name; + unsigned int mult; + unsigned int div; +} jh7100_clks[] = { + [JH7100_CLK_AXI] = { "axi", .mult = 20, .div = 1 }, + [JH7100_CLK_AHB0] = { "ahb0", .mult = 10, .div = 1 }, + [JH7100_CLK_AHB2] = { "ahb2", .mult = 5, .div = 1 }, + [JH7100_CLK_APB1] = { "apb1", .mult = 5, .div = 1 }, + [JH7100_CLK_APB2] = { "apb2", .mult = 5, .div = 1 }, + [JH7100_CLK_VPU] = { "vpu", .mult = 16, .div = 1 }, + [JH7100_CLK_JPU] = { "jpu", .mult = 40, .div = 3 }, + [JH7100_CLK_PWM] = { "pwm", .mult = 5, .div = 1 }, + [JH7100_CLK_DWMMC_BIU] = { "dwmmc-biu", .mult = 4, .div = 1 }, + [JH7100_CLK_DWMMC_CIU] = { "dwmmc-ciu", .mult = 4, .div = 1 }, + [JH7100_CLK_UART] = { "uart", .mult = 4, .div = 1 }, + [JH7100_CLK_HS_UART] = { "hs_uart", .mult = 297, .div = 100 }, + [JH7100_CLK_I2C0] = { "i2c0", .mult = 99, .div = 50 }, + [JH7100_CLK_I2C2] = { "i2c2", .mult = 2, .div = 1 }, + [JH7100_CLK_QSPI] = { "qspi", .mult = 2, .div = 1 }, + [JH7100_CLK_SPI] = { "spi", .mult = 2, .div = 1 }, + [JH7100_CLK_GMAC] = { "gmac", .mult = 1, .div = 1 }, + [JH7100_CLK_HF] = { "hf", .mult = 1, .div = 1 }, + [JH7100_CLK_RTC] = { "rtc", .mult = 1, .div = 4 } +}; + +struct clk_starfive_jh7100_priv { + struct clk_onecell_data data; + void __iomem *base; + struct clk *clks[]; +}; + +static int __init clk_starfive_jh7100_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + unsigned int nclks = ARRAY_SIZE(jh7100_clks); + struct clk_starfive_jh7100_priv *priv; + const char *osc0_name; + struct clk_hw *hw; + struct clk *osc0; + unsigned int i; + + priv = devm_kzalloc(dev, struct_size(priv, clks, nclks), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + osc0 = devm_clk_get(dev, "osc0"); + if (IS_ERR(osc0)) + return PTR_ERR(osc0); + + osc0_name = __clk_get_name(osc0); + + for (i = 0; i < nclks; i++) { + hw = devm_clk_hw_register_fixed_factor(dev, + jh7100_clks[i].name, osc0_name, 0, jh7100_clks[i].mult, + jh7100_clks[i].div); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + priv->clks[i] = hw->clk; + } + + priv->data.clks = priv->clks; + priv->data.clk_num = nclks; + + return of_clk_add_provider(np, of_clk_src_onecell_get, &priv->data); +} + +static const struct of_device_id clk_starfive_jh7100_match[] = { + { + .compatible = "starfive,jh7100-clkgen", + }, + { /* sentinel */ } +}; +static struct platform_driver clk_starfive_jh7100_driver = { + .driver = { + .name = "clk-starfive-jh7100", + .of_match_table = clk_starfive_jh7100_match, + }, +}; + +static int __init clk_starfive_jh7100_init(void) +{ + return platform_driver_probe(&clk_starfive_jh7100_driver, + clk_starfive_jh7100_probe); +} + +subsys_initcall(clk_starfive_jh7100_init); + +MODULE_DESCRIPTION("StarFive JH7100 Clock Generator Driver"); +MODULE_AUTHOR("Geert Uytterhoeven "); +MODULE_LICENSE("GPL v2"); From 0fa6dd2ee27cfc9260baa0734e1c18d4fbd2bee7 Mon Sep 17 00:00:00 2001 From: Drew Fustini Date: Mon, 31 May 2021 00:07:38 -0700 Subject: [PATCH 23/62] dt-bindings: gpio: add starfive,jh7100-gpio bindings Add bindings for the GPIO controller in the StarFive JH7100 SoC [1]. [1] https://github.com/starfive-tech/beaglev_doc Signed-off-by: Drew Fustini --- .../bindings/gpio/starfive,jh7100-gpio.yaml | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 Documentation/devicetree/bindings/gpio/starfive,jh7100-gpio.yaml diff --git a/Documentation/devicetree/bindings/gpio/starfive,jh7100-gpio.yaml b/Documentation/devicetree/bindings/gpio/starfive,jh7100-gpio.yaml new file mode 100644 index 00000000000000..8c9d14d9ac3b69 --- /dev/null +++ b/Documentation/devicetree/bindings/gpio/starfive,jh7100-gpio.yaml @@ -0,0 +1,60 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/gpio/starfive,jh7100-gpio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: StarFive JH7100 GPIO controller + +maintainers: + - Huan Feng + - Drew Fustini + +properties: + compatible: + items: + - const: starfive,jh7100-gpio + + reg: + maxItems: 1 + + interrupts: + description: + Interrupt mapping, one per GPIO. Maximum 32 GPIOs. + minItems: 1 + maxItems: 32 + + gpio-controller: true + + "#gpio-cells": + const: 2 + + interrupt-controller: true + + "#interrupt-cells": + const: 2 + +required: + - compatible + - reg + - interrupts + - interrupt-controller + - "#interrupt-cells" + - "#gpio-cells" + - gpio-controller + +additionalProperties: false + +examples: + - | + gpio@11910000 { + compatible = "starfive,jh7100-gpio"; + reg = <0x11910000 0x10000>; + gpio-controller; + #gpio-cells = <2>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <32>; + }; + +... From 02c69530298e01b8a3484883586abbc856cb61c4 Mon Sep 17 00:00:00 2001 From: Huan Feng Date: Fri, 8 Jan 2021 03:19:19 +0800 Subject: [PATCH 24/62] gpio: starfive-jh7100: Add StarFive JH7100 GPIO driver This SoC is used on the BeagleV Starlight JH7100 board [1]. [1] https://github.com/beagleboard/beaglev-starlight Signed-off-by: Emil Renner Berthing Signed-off-by: Drew Fustini --- drivers/gpio/Kconfig | 8 + drivers/gpio/Makefile | 1 + drivers/gpio/gpio-starfive-jh7100.c | 545 ++++++++++++++++++++++++++++ include/linux/gpio-starfive-vic.h | 384 ++++++++++++++++++++ 4 files changed, 938 insertions(+) create mode 100755 drivers/gpio/gpio-starfive-jh7100.c create mode 100644 include/linux/gpio-starfive-vic.h diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 1dd0ec6727fde2..26630e4852c077 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -542,6 +542,14 @@ config GPIO_SIFIVE help Say yes here to support the GPIO device on SiFive SoCs. +config GPIO_STARFIVE_JH7100 + bool "StarFive JH7100 GPIO support" + depends on OF_GPIO + select GPIOLIB_IRQCHIP + default y if SOC_STARFIVE_VIC7100 + help + Say yes here to support the GPIO device on StarFive JH7100 SoC. + config GPIO_SIOX tristate "SIOX GPIO support" depends on SIOX diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index d7c81e1611a4d5..939922eaf5f355 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -132,6 +132,7 @@ obj-$(CONFIG_GPIO_SAMA5D2_PIOBU) += gpio-sama5d2-piobu.o obj-$(CONFIG_GPIO_SCH311X) += gpio-sch311x.o obj-$(CONFIG_GPIO_SCH) += gpio-sch.o obj-$(CONFIG_GPIO_SIFIVE) += gpio-sifive.o +obj-$(CONFIG_GPIO_STARFIVE_JH7100) += gpio-starfive-jh7100.o obj-$(CONFIG_GPIO_SIOX) += gpio-siox.o obj-$(CONFIG_GPIO_SL28CPLD) += gpio-sl28cpld.o obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o diff --git a/drivers/gpio/gpio-starfive-jh7100.c b/drivers/gpio/gpio-starfive-jh7100.c new file mode 100755 index 00000000000000..d1a8d1a517b34c --- /dev/null +++ b/drivers/gpio/gpio-starfive-jh7100.c @@ -0,0 +1,545 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPIO driver for StarFive JH7100 SoC + * + * Copyright (C) 2020 Shanghai StarFive Technology Co., Ltd. + */ + +#include +#include +#include +#include + +#define GPIO_EN 0x0 +#define GPIO_IS_LOW 0x10 +#define GPIO_IS_HIGH 0x14 +#define GPIO_IBE_LOW 0x18 +#define GPIO_IBE_HIGH 0x1c +#define GPIO_IEV_LOW 0x20 +#define GPIO_IEV_HIGH 0x24 +#define GPIO_IE_LOW 0x28 +#define GPIO_IE_HIGH 0x2c +#define GPIO_IC_LOW 0x30 +#define GPIO_IC_HIGH 0x34 +//read only +#define GPIO_RIS_LOW 0x38 +#define GPIO_RIS_HIGH 0x3c +#define GPIO_MIS_LOW 0x40 +#define GPIO_MIS_HIGH 0x44 +#define GPIO_DIN_LOW 0x48 +#define GPIO_DIN_HIGH 0x4c + +#define GPIO_DOUT_X_REG 0x50 +#define GPIO_DOEN_X_REG 0x54 + +#define MAX_GPIO 64 + +struct starfive_gpio { + raw_spinlock_t lock; + void __iomem *base; + struct gpio_chip gc; + unsigned long enabled; + unsigned int trigger[MAX_GPIO]; + unsigned int irq_parent[MAX_GPIO]; +}; + +static DEFINE_SPINLOCK(sfg_lock); + +static void __iomem *gpio_base; + +static int starfive_direction_input(struct gpio_chip *gc, unsigned int offset) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned long flags; + + if (offset >= gc->ngpio) + return -EINVAL; + + raw_spin_lock_irqsave(&chip->lock, flags); + writel_relaxed(0x1, chip->base + GPIO_DOEN_X_REG + offset * 8); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int starfive_direction_output(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned long flags; + + if (offset >= gc->ngpio) + return -EINVAL; + + raw_spin_lock_irqsave(&chip->lock, flags); + writel_relaxed(0x0, chip->base + GPIO_DOEN_X_REG + offset * 8); + writel_relaxed(value, chip->base + GPIO_DOUT_X_REG + offset * 8); + raw_spin_unlock_irqrestore(&chip->lock, flags); + + return 0; +} + +static int starfive_get_direction(struct gpio_chip *gc, unsigned int offset) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + + if (offset >= gc->ngpio) + return -EINVAL; + + return readl_relaxed(chip->base + GPIO_DOEN_X_REG + offset * 8) & 0x1; +} + +static int starfive_get_value(struct gpio_chip *gc, unsigned int offset) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + int value; + + if (offset >= gc->ngpio) + return -EINVAL; + + if (offset < 32) { + value = readl_relaxed(chip->base + GPIO_DIN_LOW); + value = (value >> offset) & 0x1; + } else { + value = readl_relaxed(chip->base + GPIO_DIN_HIGH); + value = (value >> (offset - 32)) & 0x1; + } + + return value; +} + +static void starfive_set_value(struct gpio_chip *gc, unsigned int offset, int value) +{ + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned long flags; + + if (offset >= gc->ngpio) + return; + + raw_spin_lock_irqsave(&chip->lock, flags); + writel_relaxed(value, chip->base + GPIO_DOUT_X_REG + offset * 8); + raw_spin_unlock_irqrestore(&chip->lock, flags); +} + +static void starfive_set_ie(struct starfive_gpio *chip, int offset) +{ + unsigned long flags; + int old_value, new_value; + int reg_offset, index; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + raw_spin_lock_irqsave(&chip->lock, flags); + old_value = readl_relaxed(chip->base + GPIO_IE_LOW + reg_offset); + new_value = old_value | (1 << index); + writel_relaxed(new_value, chip->base + GPIO_IE_LOW + reg_offset); + raw_spin_unlock_irqrestore(&chip->lock, flags); +} + +static int starfive_irq_set_type(struct irq_data *d, unsigned int trigger) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + unsigned int reg_is, reg_ibe, reg_iev; + int reg_offset, index; + + if (offset < 0 || offset >= gc->ngpio) + return -EINVAL; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + switch (trigger) { + case IRQ_TYPE_LEVEL_HIGH: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is &= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev |= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_LEVEL_LOW: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is &= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev &= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_EDGE_BOTH: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + //reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is |= ~(1 << index); + reg_ibe |= ~(1 << index); + //reg_iev |= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + //writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_EDGE_RISING: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is |= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev |= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + case IRQ_TYPE_EDGE_FALLING: + reg_is = readl_relaxed(chip->base + GPIO_IS_LOW + reg_offset); + reg_ibe = readl_relaxed(chip->base + GPIO_IBE_LOW + reg_offset); + reg_iev = readl_relaxed(chip->base + GPIO_IEV_LOW + reg_offset); + reg_is |= ~(1 << index); + reg_ibe &= ~(1 << index); + reg_iev &= (1 << index); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + writel_relaxed(reg_is, chip->base + GPIO_IS_LOW + reg_offset); + break; + } + + chip->trigger[offset] = trigger; + starfive_set_ie(chip, offset); + return 0; +} + +/* chained_irq_{enter,exit} already mask the parent */ +static void starfive_irq_mask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned int value; + int offset = irqd_to_hwirq(d); + int reg_offset, index; + + if (offset < 0 || offset >= gc->ngpio) + return; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + + value = readl_relaxed(chip->base + GPIO_IE_LOW + reg_offset); + value &= ~(0x1 << index); + writel_relaxed(value, chip->base + GPIO_IE_LOW + reg_offset); +} + +static void starfive_irq_unmask(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + unsigned int value; + int offset = irqd_to_hwirq(d); + int reg_offset, index; + + if (offset < 0 || offset >= gc->ngpio) + return; + + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + + value = readl_relaxed(chip->base + GPIO_IE_LOW + reg_offset); + value |= (0x1 << index); + writel_relaxed(value, chip->base + GPIO_IE_LOW + reg_offset); +} + +static void starfive_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d); + + starfive_irq_unmask(d); + assign_bit(offset, &chip->enabled, 1); +} + +static void starfive_irq_disable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct starfive_gpio *chip = gpiochip_get_data(gc); + int offset = irqd_to_hwirq(d) % MAX_GPIO; // must not fail + + assign_bit(offset, &chip->enabled, 0); + starfive_set_ie(chip, offset); +} + +static struct irq_chip starfive_irqchip = { + .name = "starfive-jh7100-gpio", + .irq_set_type = starfive_irq_set_type, + .irq_mask = starfive_irq_mask, + .irq_unmask = starfive_irq_unmask, + .irq_enable = starfive_irq_enable, + .irq_disable = starfive_irq_disable, +}; + +static irqreturn_t starfive_irq_handler(int irq, void *gc) +{ + int offset; + int reg_offset, index; + unsigned int value; + unsigned long flags; + struct starfive_gpio *chip = gc; + + for (offset = 0; offset < 64; offset++) { + if (offset < 32) { + reg_offset = 0; + index = offset; + } else { + reg_offset = 4; + index = offset - 32; + } + + raw_spin_lock_irqsave(&chip->lock, flags); + value = readl_relaxed(chip->base + GPIO_MIS_LOW + reg_offset); + if (value & BIT(index)) + writel_relaxed(BIT(index), chip->base + GPIO_IC_LOW + + reg_offset); + + /* generic_handle_irq(irq_find_mapping(chip->gc.irq.domain, offset)); */ + raw_spin_unlock_irqrestore(&chip->lock, flags); + } + + return IRQ_HANDLED; +} + +void sf_vic_gpio_dout_reverse(int gpio, int en) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOUT_X_REG; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0x1 << 31); + value |= (en & 0x1) << 31; + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_reverse); + +void sf_vic_gpio_dout_value(int gpio, int v) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOUT_X_REG; + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0xFF); + value |= (v&0xFF); + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_value); + +void sf_vic_gpio_dout_low(int gpio) +{ + sf_vic_gpio_dout_value(gpio, 0); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_low); + +void sf_vic_gpio_dout_high(int gpio) +{ + sf_vic_gpio_dout_value(gpio, 1); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_dout_high); + +void sf_vic_gpio_doen_reverse(int gpio, int en) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOEN_X_REG; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0x1 << 31); + value |= (en & 0x1) << 31; + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_reverse); + +void sf_vic_gpio_doen_value(int gpio, int v) +{ + unsigned int value; + int offset; + + if (!gpio_base) + return; + + offset = gpio * 8 + GPIO_DOEN_X_REG; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0xFF); + value |= (v&0xFF); + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_value); + +void sf_vic_gpio_doen_low(int gpio) +{ + sf_vic_gpio_doen_value(gpio, 0); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_low); + +void sf_vic_gpio_doen_high(int gpio) +{ + sf_vic_gpio_doen_value(gpio, 1); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_doen_high); + +void sf_vic_gpio_manual(int offset, int v) +{ + unsigned int value; + + if (!gpio_base) + return; + + spin_lock(&sfg_lock); + value = ioread32(gpio_base + offset); + value &= ~(0xFF); + value |= (v&0xFF); + iowrite32(value, gpio_base + offset); + spin_unlock(&sfg_lock); +} +EXPORT_SYMBOL_GPL(sf_vic_gpio_manual); + +static int starfive_gpio_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct starfive_gpio *chip; + struct gpio_irq_chip *girq; + struct resource *res; + int irq, ret, ngpio; + + chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + chip->base = devm_ioremap_resource(dev, res); + if (IS_ERR(chip->base)) { + dev_err(dev, "failed to allocate device memory\n"); + return PTR_ERR(chip->base); + } + gpio_base = chip->base; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "Cannot get IRQ resource\n"); + return irq; + } + + raw_spin_lock_init(&chip->lock); + chip->gc.direction_input = starfive_direction_input; + chip->gc.direction_output = starfive_direction_output; + chip->gc.get_direction = starfive_get_direction; + chip->gc.get = starfive_get_value; + chip->gc.set = starfive_set_value; + chip->gc.base = 0; + chip->gc.ngpio = 64; + chip->gc.label = dev_name(dev); + chip->gc.parent = dev; + chip->gc.owner = THIS_MODULE; + + girq = &chip->gc.irq; + girq->chip = &starfive_irqchip; + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; + + ret = gpiochip_add_data(&chip->gc, chip); + if (ret) { + dev_err(dev, "gpiochip_add_data ret=%d!\n", ret); + return ret; + } + + /* Disable all GPIO interrupts before enabling parent interrupts */ + iowrite32(0, chip->base + GPIO_IE_HIGH); + iowrite32(0, chip->base + GPIO_IE_LOW); + chip->enabled = 0; + + ret = devm_request_irq(dev, irq, starfive_irq_handler, IRQF_SHARED, + dev_name(dev), chip); + if (ret) { + dev_err(dev, "IRQ handler registering failed (%d)\n", ret); + return ret; + } + + writel_relaxed(1, chip->base + GPIO_EN); + + dev_info(dev, "StarFive GPIO chip registered %d GPIOs\n", ngpio); + + return 0; +} + +static const struct of_device_id starfive_gpio_match[] = { + { .compatible = "starfive,jh7100-gpio", }, + { }, +}; + +static struct platform_driver starfive_gpio_driver = { + .probe = starfive_gpio_probe, + .driver = { + .name = "gpio_starfive_jh7100", + .of_match_table = of_match_ptr(starfive_gpio_match), + }, +}; + +static int __init starfive_gpio_init(void) +{ + return platform_driver_register(&starfive_gpio_driver); +} +subsys_initcall(starfive_gpio_init); + +static void __exit starfive_gpio_exit(void) +{ + platform_driver_unregister(&starfive_gpio_driver); +} +module_exit(starfive_gpio_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huan Feng "); +MODULE_DESCRIPTION("StarFive JH7100 GPIO driver"); diff --git a/include/linux/gpio-starfive-vic.h b/include/linux/gpio-starfive-vic.h new file mode 100644 index 00000000000000..0afcaf1876fbe5 --- /dev/null +++ b/include/linux/gpio-starfive-vic.h @@ -0,0 +1,384 @@ +#ifndef __GPIO_STARFIVE_VIC_H +#define __GPIO_STARFIVE_VIC_H + +extern void sf_vic_gpio_dout_reverse(int gpio, int en); +/* + * #define SET_GPIO_0_dout_cpu_jtag_tdo { \ + * uint32_t _ezchip_macro_read_value_=MA_INW(gpio_0_dout_REG_ADDR); \ + * _ezchip_macro_read_value_ &= ~(0xFF); \ + * _ezchip_macro_read_value_ |= (0x3&0xFF); \ + * MA_OUTW(gpio_0_dout_REG_ADDR,_ezchip_macro_read_value_); \ + * } + * in this example gpio is: 0, and v is: 0x3 + */ +extern void sf_vic_gpio_dout_value(int gpio, int v); +extern void sf_vic_gpio_dout_low(int gpio); +extern void sf_vic_gpio_dout_high(int gpio); + +extern void sf_vic_gpio_doen_reverse(int gpio, int en); +/* + * the same as sf_vic_gpio_dout_value + */ +extern void sf_vic_gpio_doen_value(int gpio, int v); +extern void sf_vic_gpio_doen_low(int gpio); +extern void sf_vic_gpio_doen_high(int gpio); + +/* + *#define SET_GPIO_uart2_pad_sin(gpio) { \ + * uint32_t _ezchip_macro_read_value_=MA_INW(gpio_uart2_pad_sin_REG_ADDR); \ + * _ezchip_macro_read_value_ &= ~(0xFF); \ + * _ezchip_macro_read_value_ |= ((gpio+2)&0xFF); \ + * MA_OUTW(gpio_uart2_pad_sin_REG_ADDR,_ezchip_macro_read_value_); \ + *} + * in this example offset is: 0x370, the offset of gpio_uart2_pad_sin_REG_ADDR + * and v is: gpio + 2 + */ +extern void sf_vic_gpio_manual(int offset, int v); + +#define SET_GPIO_dout_reverse_(gpionum, en) sf_vic_gpio_dout_reverse(gpionum, en) +#define SET_GPIO_dout_LOW(gpionum) sf_vic_gpio_dout_value(gpionum, 0x0) +#define SET_GPIO_dout_HIGH(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1) +#define SET_GPIO_dout_clk_gmac_tophyref(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2) +#define SET_GPIO_dout_cpu_jtag_tdo(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3) +#define SET_GPIO_dout_cpu_jtag_tdo_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4) +#define SET_GPIO_dout_dmic_clk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5) +#define SET_GPIO_dout_dsp_JTDOEn_pad(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6) +#define SET_GPIO_dout_dsp_JTDO_pad(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7) +#define SET_GPIO_dout_i2c0_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x8) +#define SET_GPIO_dout_i2c0_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x9) +#define SET_GPIO_dout_i2c1_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xa) +#define SET_GPIO_dout_i2c1_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xb) +#define SET_GPIO_dout_i2c2_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xc) +#define SET_GPIO_dout_i2c2_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xd) +#define SET_GPIO_dout_i2c3_pad_sck_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xe) +#define SET_GPIO_dout_i2c3_pad_sda_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0xf) +#define SET_GPIO_dout_i2srx_bclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x10) +#define SET_GPIO_dout_i2srx_bclk_out_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x11) +#define SET_GPIO_dout_i2srx_lrck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x12) +#define SET_GPIO_dout_i2srx_lrck_out_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x13) +#define SET_GPIO_dout_i2srx_mclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x14) +#define SET_GPIO_dout_i2stx_bclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x15) +#define SET_GPIO_dout_i2stx_bclk_out_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x16) +#define SET_GPIO_dout_i2stx_lrck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x17) +#define SET_GPIO_dout_i2stx_lrckout_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x18) +#define SET_GPIO_dout_i2stx_mclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x19) +#define SET_GPIO_dout_i2stx_sdout0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1a) +#define SET_GPIO_dout_i2stx_sdout1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1b) +#define SET_GPIO_dout_lcd_pad_csm_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1c) +#define SET_GPIO_dout_pwm_pad_oe_n_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1d) +#define SET_GPIO_dout_pwm_pad_oe_n_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1e) +#define SET_GPIO_dout_pwm_pad_oe_n_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x1f) +#define SET_GPIO_dout_pwm_pad_oe_n_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x20) +#define SET_GPIO_dout_pwm_pad_oe_n_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x21) +#define SET_GPIO_dout_pwm_pad_oe_n_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x22) +#define SET_GPIO_dout_pwm_pad_oe_n_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x23) +#define SET_GPIO_dout_pwm_pad_oe_n_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x24) +#define SET_GPIO_dout_pwm_pad_out_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x25) +#define SET_GPIO_dout_pwm_pad_out_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x26) +#define SET_GPIO_dout_pwm_pad_out_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x27) +#define SET_GPIO_dout_pwm_pad_out_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x28) +#define SET_GPIO_dout_pwm_pad_out_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x29) +#define SET_GPIO_dout_pwm_pad_out_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2a) +#define SET_GPIO_dout_pwm_pad_out_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2b) +#define SET_GPIO_dout_pwm_pad_out_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2c) +#define SET_GPIO_dout_pwmdac_left_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2d) +#define SET_GPIO_dout_pwmdac_right_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2e) +#define SET_GPIO_dout_qspi_csn1_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x2f) +#define SET_GPIO_dout_qspi_csn2_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x30) +#define SET_GPIO_dout_qspi_csn3_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x31) +#define SET_GPIO_dout_register23_SCFG_cmsensor_rst0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x32) +#define SET_GPIO_dout_register23_SCFG_cmsensor_rst1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x33) +#define SET_GPIO_dout_register32_SCFG_gmac_phy_rstn(gpionum) sf_vic_gpio_dout_value(gpionum, 0x34) +#define SET_GPIO_dout_sdio0_pad_card_power_en(gpionum) sf_vic_gpio_dout_value(gpionum, 0x35) +#define SET_GPIO_dout_sdio0_pad_cclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x36) +#define SET_GPIO_dout_sdio0_pad_ccmd_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x37) +#define SET_GPIO_dout_sdio0_pad_ccmd_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x38) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x39) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3a) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3b) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3c) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3d) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3e) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x3f) +#define SET_GPIO_dout_sdio0_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x40) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x41) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x42) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x43) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x44) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x45) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x46) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x47) +#define SET_GPIO_dout_sdio0_pad_cdata_out_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x48) +#define SET_GPIO_dout_sdio0_pad_rst_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x49) +#define SET_GPIO_dout_sdio1_pad_card_power_en(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4a) +#define SET_GPIO_dout_sdio1_pad_cclk_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4b) +#define SET_GPIO_dout_sdio1_pad_ccmd_oe(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4c) +#define SET_GPIO_dout_sdio1_pad_ccmd_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4d) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4e) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x4f) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x50) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x51) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x52) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x53) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x54) +#define SET_GPIO_dout_sdio1_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x55) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x56) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x57) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x58) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x59) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit4(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5a) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit5(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5b) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit6(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5c) +#define SET_GPIO_dout_sdio1_pad_cdata_out_bit7(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5d) +#define SET_GPIO_dout_sdio1_pad_rst_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5e) +#define SET_GPIO_dout_spdif_tx_sdout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x5f) +#define SET_GPIO_dout_spdif_tx_sdout_oen(gpionum) sf_vic_gpio_dout_value(gpionum, 0x60) +#define SET_GPIO_dout_spi0_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x61) +#define SET_GPIO_dout_spi0_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x62) +#define SET_GPIO_dout_spi0_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x63) +#define SET_GPIO_dout_spi0_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x64) +#define SET_GPIO_dout_spi0_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x65) +#define SET_GPIO_dout_spi1_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x66) +#define SET_GPIO_dout_spi1_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x67) +#define SET_GPIO_dout_spi1_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x68) +#define SET_GPIO_dout_spi1_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x69) +#define SET_GPIO_dout_spi1_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6a) +#define SET_GPIO_dout_spi2_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6b) +#define SET_GPIO_dout_spi2_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6c) +#define SET_GPIO_dout_spi2_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6d) +#define SET_GPIO_dout_spi2_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6e) +#define SET_GPIO_dout_spi2_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x6f) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x70) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x71) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x72) +#define SET_GPIO_dout_spi2ahb_pad_oe_n_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x73) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit0(gpionum) sf_vic_gpio_dout_value(gpionum, 0x74) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit1(gpionum) sf_vic_gpio_dout_value(gpionum, 0x75) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit2(gpionum) sf_vic_gpio_dout_value(gpionum, 0x76) +#define SET_GPIO_dout_spi2ahb_pad_txd_bit3(gpionum) sf_vic_gpio_dout_value(gpionum, 0x77) +#define SET_GPIO_dout_spi3_pad_oe_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x78) +#define SET_GPIO_dout_spi3_pad_sck_out(gpionum) sf_vic_gpio_dout_value(gpionum, 0x79) +#define SET_GPIO_dout_spi3_pad_ss_0_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7a) +#define SET_GPIO_dout_spi3_pad_ss_1_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7b) +#define SET_GPIO_dout_spi3_pad_txd(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7c) +#define SET_GPIO_dout_uart0_pad_dtrn(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7d) +#define SET_GPIO_dout_uart0_pad_rtsn(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7e) +#define SET_GPIO_dout_uart0_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x7f) +#define SET_GPIO_dout_uart1_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x80) +#define SET_GPIO_dout_uart2_pad_dtr_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x81) +#define SET_GPIO_dout_uart2_pad_rts_n(gpionum) sf_vic_gpio_dout_value(gpionum, 0x82) +#define SET_GPIO_dout_uart2_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x83) +#define SET_GPIO_dout_uart3_pad_sout(gpionum) sf_vic_gpio_dout_value(gpionum, 0x84) +#define SET_GPIO_dout_usb_drv_bus(gpionum) sf_vic_gpio_dout_value(gpionum, 0x85) +#define SET_GPIO_doen_reverse_(gpionum, en) sf_vic_gpio_doen_reverse(gpionum, en) +#define SET_GPIO_doen_LOW(gpionum) sf_vic_gpio_doen_value(gpionum, 0x0) +#define SET_GPIO_doen_HIGH(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1) +#define SET_GPIO_doen_clk_gmac_tophyref(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2) +#define SET_GPIO_doen_cpu_jtag_tdo(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3) +#define SET_GPIO_doen_cpu_jtag_tdo_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4) +#define SET_GPIO_doen_dmic_clk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5) +#define SET_GPIO_doen_dsp_JTDOEn_pad(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6) +#define SET_GPIO_doen_dsp_JTDO_pad(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7) +#define SET_GPIO_doen_i2c0_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x8) +#define SET_GPIO_doen_i2c0_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x9) +#define SET_GPIO_doen_i2c1_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xa) +#define SET_GPIO_doen_i2c1_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xb) +#define SET_GPIO_doen_i2c2_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xc) +#define SET_GPIO_doen_i2c2_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xd) +#define SET_GPIO_doen_i2c3_pad_sck_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xe) +#define SET_GPIO_doen_i2c3_pad_sda_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0xf) +#define SET_GPIO_doen_i2srx_bclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x10) +#define SET_GPIO_doen_i2srx_bclk_out_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x11) +#define SET_GPIO_doen_i2srx_lrck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x12) +#define SET_GPIO_doen_i2srx_lrck_out_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x13) +#define SET_GPIO_doen_i2srx_mclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x14) +#define SET_GPIO_doen_i2stx_bclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x15) +#define SET_GPIO_doen_i2stx_bclk_out_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x16) +#define SET_GPIO_doen_i2stx_lrck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x17) +#define SET_GPIO_doen_i2stx_lrckout_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x18) +#define SET_GPIO_doen_i2stx_mclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x19) +#define SET_GPIO_doen_i2stx_sdout0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1a) +#define SET_GPIO_doen_i2stx_sdout1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1b) +#define SET_GPIO_doen_lcd_pad_csm_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1c) +#define SET_GPIO_doen_pwm_pad_oe_n_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1d) +#define SET_GPIO_doen_pwm_pad_oe_n_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1e) +#define SET_GPIO_doen_pwm_pad_oe_n_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x1f) +#define SET_GPIO_doen_pwm_pad_oe_n_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x20) +#define SET_GPIO_doen_pwm_pad_oe_n_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x21) +#define SET_GPIO_doen_pwm_pad_oe_n_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x22) +#define SET_GPIO_doen_pwm_pad_oe_n_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x23) +#define SET_GPIO_doen_pwm_pad_oe_n_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x24) +#define SET_GPIO_doen_pwm_pad_out_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x25) +#define SET_GPIO_doen_pwm_pad_out_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x26) +#define SET_GPIO_doen_pwm_pad_out_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x27) +#define SET_GPIO_doen_pwm_pad_out_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x28) +#define SET_GPIO_doen_pwm_pad_out_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x29) +#define SET_GPIO_doen_pwm_pad_out_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2a) +#define SET_GPIO_doen_pwm_pad_out_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2b) +#define SET_GPIO_doen_pwm_pad_out_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2c) +#define SET_GPIO_doen_pwmdac_left_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2d) +#define SET_GPIO_doen_pwmdac_right_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2e) +#define SET_GPIO_doen_qspi_csn1_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x2f) +#define SET_GPIO_doen_qspi_csn2_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x30) +#define SET_GPIO_doen_qspi_csn3_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x31) +#define SET_GPIO_doen_register23_SCFG_cmsensor_rst0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x32) +#define SET_GPIO_doen_register23_SCFG_cmsensor_rst1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x33) +#define SET_GPIO_doen_register32_SCFG_gmac_phy_rstn(gpionum) sf_vic_gpio_doen_value(gpionum, 0x34) +#define SET_GPIO_doen_sdio0_pad_card_power_en(gpionum) sf_vic_gpio_doen_value(gpionum, 0x35) +#define SET_GPIO_doen_sdio0_pad_cclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x36) +#define SET_GPIO_doen_sdio0_pad_ccmd_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x37) +#define SET_GPIO_doen_sdio0_pad_ccmd_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x38) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x39) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3a) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3b) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3c) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3d) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3e) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x3f) +#define SET_GPIO_doen_sdio0_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x40) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x41) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x42) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x43) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x44) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x45) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x46) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x47) +#define SET_GPIO_doen_sdio0_pad_cdata_out_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x48) +#define SET_GPIO_doen_sdio0_pad_rst_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x49) +#define SET_GPIO_doen_sdio1_pad_card_power_en(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4a) +#define SET_GPIO_doen_sdio1_pad_cclk_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4b) +#define SET_GPIO_doen_sdio1_pad_ccmd_oe(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4c) +#define SET_GPIO_doen_sdio1_pad_ccmd_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4d) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4e) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x4f) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x50) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x51) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x52) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x53) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x54) +#define SET_GPIO_doen_sdio1_pad_cdata_oe_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x55) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x56) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x57) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x58) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x59) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit4(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5a) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit5(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5b) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit6(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5c) +#define SET_GPIO_doen_sdio1_pad_cdata_out_bit7(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5d) +#define SET_GPIO_doen_sdio1_pad_rst_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5e) +#define SET_GPIO_doen_spdif_tx_sdout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x5f) +#define SET_GPIO_doen_spdif_tx_sdout_oen(gpionum) sf_vic_gpio_doen_value(gpionum, 0x60) +#define SET_GPIO_doen_spi0_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x61) +#define SET_GPIO_doen_spi0_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x62) +#define SET_GPIO_doen_spi0_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x63) +#define SET_GPIO_doen_spi0_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x64) +#define SET_GPIO_doen_spi0_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x65) +#define SET_GPIO_doen_spi1_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x66) +#define SET_GPIO_doen_spi1_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x67) +#define SET_GPIO_doen_spi1_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x68) +#define SET_GPIO_doen_spi1_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x69) +#define SET_GPIO_doen_spi1_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6a) +#define SET_GPIO_doen_spi2_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6b) +#define SET_GPIO_doen_spi2_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6c) +#define SET_GPIO_doen_spi2_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6d) +#define SET_GPIO_doen_spi2_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6e) +#define SET_GPIO_doen_spi2_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x6f) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x70) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x71) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x72) +#define SET_GPIO_doen_spi2ahb_pad_oe_n_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x73) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit0(gpionum) sf_vic_gpio_doen_value(gpionum, 0x74) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit1(gpionum) sf_vic_gpio_doen_value(gpionum, 0x75) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit2(gpionum) sf_vic_gpio_doen_value(gpionum, 0x76) +#define SET_GPIO_doen_spi2ahb_pad_txd_bit3(gpionum) sf_vic_gpio_doen_value(gpionum, 0x77) +#define SET_GPIO_doen_spi3_pad_oe_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x78) +#define SET_GPIO_doen_spi3_pad_sck_out(gpionum) sf_vic_gpio_doen_value(gpionum, 0x79) +#define SET_GPIO_doen_spi3_pad_ss_0_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7a) +#define SET_GPIO_doen_spi3_pad_ss_1_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7b) +#define SET_GPIO_doen_spi3_pad_txd(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7c) +#define SET_GPIO_doen_uart0_pad_dtrn(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7d) +#define SET_GPIO_doen_uart0_pad_rtsn(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7e) +#define SET_GPIO_doen_uart0_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x7f) +#define SET_GPIO_doen_uart1_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x80) +#define SET_GPIO_doen_uart2_pad_dtr_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x81) +#define SET_GPIO_doen_uart2_pad_rts_n(gpionum) sf_vic_gpio_doen_value(gpionum, 0x82) +#define SET_GPIO_doen_uart2_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x83) +#define SET_GPIO_doen_uart3_pad_sout(gpionum) sf_vic_gpio_doen_value(gpionum, 0x84) +#define SET_GPIO_doen_usb_drv_bus(gpionum) sf_vic_gpio_doen_value(gpionum, 0x85) +#define SET_GPIO_cpu_jtag_tck(gpionum) sf_vic_gpio_manual(0x250, gpionum + 2) +#define SET_GPIO_cpu_jtag_tdi(gpionum) sf_vic_gpio_manual(0x254, gpionum + 2) +#define SET_GPIO_cpu_jtag_tms(gpionum) sf_vic_gpio_manual(0x258, gpionum + 2) +#define SET_GPIO_cpu_jtag_trst(gpionum) sf_vic_gpio_manual(0x25c, gpionum + 2) +#define SET_GPIO_dmic_sdin_bit0(gpionum) sf_vic_gpio_manual(0x260, gpionum + 2) +#define SET_GPIO_dmic_sdin_bit1(gpionum) sf_vic_gpio_manual(0x264, gpionum + 2) +#define SET_GPIO_dsp_JTCK_pad(gpionum) sf_vic_gpio_manual(0x268, gpionum + 2) +#define SET_GPIO_dsp_JTDI_pad(gpionum) sf_vic_gpio_manual(0x26c, gpionum + 2) +#define SET_GPIO_dsp_JTMS_pad(gpionum) sf_vic_gpio_manual(0x270, gpionum + 2) +#define SET_GPIO_dsp_TRST_pad(gpionum) sf_vic_gpio_manual(0x274, gpionum + 2) +#define SET_GPIO_i2c0_pad_sck_in(gpionum) sf_vic_gpio_manual(0x278, gpionum + 2) +#define SET_GPIO_i2c0_pad_sda_in(gpionum) sf_vic_gpio_manual(0x27c, gpionum + 2) +#define SET_GPIO_i2c1_pad_sck_in(gpionum) sf_vic_gpio_manual(0x280, gpionum + 2) +#define SET_GPIO_i2c1_pad_sda_in(gpionum) sf_vic_gpio_manual(0x284, gpionum + 2) +#define SET_GPIO_i2c2_pad_sck_in(gpionum) sf_vic_gpio_manual(0x288, gpionum + 2) +#define SET_GPIO_i2c2_pad_sda_in(gpionum) sf_vic_gpio_manual(0x28c, gpionum + 2) +#define SET_GPIO_i2c3_pad_sck_in(gpionum) sf_vic_gpio_manual(0x290, gpionum + 2) +#define SET_GPIO_i2c3_pad_sda_in(gpionum) sf_vic_gpio_manual(0x294, gpionum + 2) +#define SET_GPIO_i2srx_bclk_in(gpionum) sf_vic_gpio_manual(0x298, gpionum + 2) +#define SET_GPIO_i2srx_lrck_in(gpionum) sf_vic_gpio_manual(0x29c, gpionum + 2) +#define SET_GPIO_i2srx_sdin_bit0(gpionum) sf_vic_gpio_manual(0x2a0, gpionum + 2) +#define SET_GPIO_i2srx_sdin_bit1(gpionum) sf_vic_gpio_manual(0x2a4, gpionum + 2) +#define SET_GPIO_i2srx_sdin_bit2(gpionum) sf_vic_gpio_manual(0x2a8, gpionum + 2) +#define SET_GPIO_i2stx_bclk_in(gpionum) sf_vic_gpio_manual(0x2ac, gpionum + 2) +#define SET_GPIO_i2stx_lrck_in(gpionum) sf_vic_gpio_manual(0x2b0, gpionum + 2) +#define SET_GPIO_sdio0_pad_card_detect_n(gpionum) sf_vic_gpio_manual(0x2b4, gpionum + 2) +#define SET_GPIO_sdio0_pad_card_write_prt(gpionum) sf_vic_gpio_manual(0x2b8, gpionum + 2) +#define SET_GPIO_sdio0_pad_ccmd_in(gpionum) sf_vic_gpio_manual(0x2bc, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit0(gpionum) sf_vic_gpio_manual(0x2c0, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit1(gpionum) sf_vic_gpio_manual(0x2c4, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit2(gpionum) sf_vic_gpio_manual(0x2c8, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit3(gpionum) sf_vic_gpio_manual(0x2cc, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit4(gpionum) sf_vic_gpio_manual(0x2d0, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit5(gpionum) sf_vic_gpio_manual(0x2d4, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit6(gpionum) sf_vic_gpio_manual(0x2d8, gpionum + 2) +#define SET_GPIO_sdio0_pad_cdata_in_bit7(gpionum) sf_vic_gpio_manual(0x2dc, gpionum + 2) +#define SET_GPIO_sdio1_pad_card_detect_n(gpionum) sf_vic_gpio_manual(0x2e0, gpionum + 2) +#define SET_GPIO_sdio1_pad_card_write_prt(gpionum) sf_vic_gpio_manual(0x2e4, gpionum + 2) +#define SET_GPIO_sdio1_pad_ccmd_in(gpionum) sf_vic_gpio_manual(0x2e8, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit0(gpionum) sf_vic_gpio_manual(0x2ec, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit1(gpionum) sf_vic_gpio_manual(0x2f0, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit2(gpionum) sf_vic_gpio_manual(0x2f4, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit3(gpionum) sf_vic_gpio_manual(0x2f8, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit4(gpionum) sf_vic_gpio_manual(0x2fc, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit5(gpionum) sf_vic_gpio_manual(0x300, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit6(gpionum) sf_vic_gpio_manual(0x304, gpionum + 2) +#define SET_GPIO_sdio1_pad_cdata_in_bit7(gpionum) sf_vic_gpio_manual(0x308, gpionum + 2) +#define SET_GPIO_spdif_rx_sdin(gpionum) sf_vic_gpio_manual(0x30c, gpionum + 2) +#define SET_GPIO_spi0_pad_rxd(gpionum) sf_vic_gpio_manual(0x310, gpionum + 2) +#define SET_GPIO_spi0_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x314, gpionum + 2) +#define SET_GPIO_spi1_pad_rxd(gpionum) sf_vic_gpio_manual(0x318, gpionum + 2) +#define SET_GPIO_spi1_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x31c, gpionum + 2) +#define SET_GPIO_spi2_pad_rxd(gpionum) sf_vic_gpio_manual(0x320, gpionum + 2) +#define SET_GPIO_spi2_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x324, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit0(gpionum) sf_vic_gpio_manual(0x328, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit1(gpionum) sf_vic_gpio_manual(0x32c, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit2(gpionum) sf_vic_gpio_manual(0x330, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_rxd_bit3(gpionum) sf_vic_gpio_manual(0x334, gpionum + 2) +#define SET_GPIO_spi2ahb_pad_ss_n(gpionum) sf_vic_gpio_manual(0x338, gpionum + 2) +#define SET_GPIO_spi2ahb_slv_sclkin(gpionum) sf_vic_gpio_manual(0x33c, gpionum + 2) +#define SET_GPIO_spi3_pad_rxd(gpionum) sf_vic_gpio_manual(0x340, gpionum + 2) +#define SET_GPIO_spi3_pad_ss_in_n(gpionum) sf_vic_gpio_manual(0x344, gpionum + 2) +#define SET_GPIO_uart0_pad_ctsn(gpionum) sf_vic_gpio_manual(0x348, gpionum + 2) +#define SET_GPIO_uart0_pad_dcdn(gpionum) sf_vic_gpio_manual(0x34c, gpionum + 2) +#define SET_GPIO_uart0_pad_dsrn(gpionum) sf_vic_gpio_manual(0x350, gpionum + 2) +#define SET_GPIO_uart0_pad_rin(gpionum) sf_vic_gpio_manual(0x354, gpionum + 2) +#define SET_GPIO_uart0_pad_sin(gpionum) sf_vic_gpio_manual(0x358, gpionum + 2) +#define SET_GPIO_uart1_pad_sin(gpionum) sf_vic_gpio_manual(0x35c, gpionum + 2) +#define SET_GPIO_uart2_pad_cts_n(gpionum) sf_vic_gpio_manual(0x360, gpionum + 2) +#define SET_GPIO_uart2_pad_dcd_n(gpionum) sf_vic_gpio_manual(0x364, gpionum + 2) +#define SET_GPIO_uart2_pad_dsr_n(gpionum) sf_vic_gpio_manual(0x368, gpionum + 2) +#define SET_GPIO_uart2_pad_ri_n(gpionum) sf_vic_gpio_manual(0x36c, gpionum + 2) +#define SET_GPIO_uart2_pad_sin(gpionum) sf_vic_gpio_manual(0x370, gpionum + 2) +#define SET_GPIO_uart3_pad_sin(gpionum) sf_vic_gpio_manual(0x374, gpionum + 2) +#define SET_GPIO_usb_over_current(gpionum) sf_vic_gpio_manual(0x378, gpionum + 2) + +#endif /* __GPIO_PXA_H */ From 173bb4d1a3ecc4d61c97649a509035253d6c7088 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 27 May 2021 20:13:43 +0200 Subject: [PATCH 25/62] [WIP] dt-bindings: dma: dw-axi-dmac: Increase DMA channel limit to 16 The first DMAC instance in the StarFive JH7100 SoC supports 16 DMA channels. FIXME Given there are more changes to the driver than just increasing DMAC_MAX_CHANNELS, we probably need a new compatible value, too. Signed-off-by: Geert Uytterhoeven --- Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml index 79e241498e2532..09d3e9ee3939e1 100644 --- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml +++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml @@ -51,7 +51,7 @@ properties: dma-channels: minimum: 1 - maximum: 8 + maximum: 16 snps,dma-masters: description: | @@ -71,14 +71,14 @@ properties: Channel priority specifier associated with the DMA channels. $ref: /schemas/types.yaml#/definitions/uint32-array minItems: 1 - maxItems: 8 + maxItems: 16 snps,block-size: description: | Channel block size specifier associated with the DMA channels. $ref: /schemas/types.yaml#/definitions/uint32-array minItems: 1 - maxItems: 8 + maxItems: 16 snps,axi-max-burst-len: description: | From 3cdd633d974c6ceee9b4b3ac6a68aca9f135b3f2 Mon Sep 17 00:00:00 2001 From: Samin Guo Date: Fri, 8 Jan 2021 03:11:04 +0800 Subject: [PATCH 26/62] drivers/tty/serial/8250: update driver for VIC7100 --- drivers/tty/serial/8250/8250_port.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fc5ab203228213..4fd09d3dd6df76 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -73,8 +73,16 @@ static const struct serial8250_config uart_config[] = { }, [PORT_16550] = { .name = "16550", +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + .fifo_size = 16, + .tx_loadsz = 16, + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_00, + .rxtrig_bytes = {1, 4, 8, 14}, + .flags = UART_CAP_FIFO, +#else .fifo_size = 1, .tx_loadsz = 1, +#endif }, [PORT_16550A] = { .name = "16550A", From 9220cd3bd6755b2d4e33e4aad6ddacd9755e0d53 Mon Sep 17 00:00:00 2001 From: Huan Feng Date: Fri, 8 Jan 2021 03:35:42 +0800 Subject: [PATCH 27/62] drivers/hw_random: Add Starfive VIC Random Number Generator driver --- drivers/char/hw_random/Kconfig | 13 ++ drivers/char/hw_random/Makefile | 1 + drivers/char/hw_random/starfive-vic-rng.c | 256 ++++++++++++++++++++++ drivers/char/hw_random/starfive-vic-rng.h | 167 ++++++++++++++ 4 files changed, 437 insertions(+) create mode 100644 drivers/char/hw_random/starfive-vic-rng.c create mode 100644 drivers/char/hw_random/starfive-vic-rng.h diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 1fe006f3f12fab..b21b7d33357e24 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -335,6 +335,19 @@ config HW_RANDOM_POWERNV If unsure, say Y. +config HW_RANDOM_STARFIVE_VIC + tristate "Starfive VIC Random Number Generator support" + depends on HW_RANDOM + default y if SOC_STARFIVE_VIC7100 + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Starfive VIC SoC. + + To compile this driver as a module, choose M here: the + module will be called starfive-vic-rng. + + If unsure, say Y. + config HW_RANDOM_HISI tristate "Hisilicon Random Number Generator support" depends on HW_RANDOM && ARCH_HISI diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 8933fada74f2fb..9b959cfc1b3086 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o obj-$(CONFIG_HW_RANDOM_POWERNV) += powernv-rng.o +obj-$(CONFIG_HW_RANDOM_STARFIVE_VIC) += starfive-vic-rng.o obj-$(CONFIG_HW_RANDOM_HISI) += hisi-rng.o obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o diff --git a/drivers/char/hw_random/starfive-vic-rng.c b/drivers/char/hw_random/starfive-vic-rng.c new file mode 100644 index 00000000000000..6142b6a7ace6be --- /dev/null +++ b/drivers/char/hw_random/starfive-vic-rng.c @@ -0,0 +1,256 @@ +/* + ****************************************************************************** + * @file starfive-vic-rng.c + * @author StarFive Technology + * @version V1.0 + * @date 08/13/2020 + * @brief + ****************************************************************************** + * @copy + * + * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS + * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE + * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY + * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING + * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE + * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. + * + * COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "starfive-vic-rng.h" + +#define to_vic_rng(p) container_of(p, struct vic_rng, rng) + +struct vic_rng { + struct device *dev; + void __iomem *base; + struct hwrng rng; +}; + +static inline void vic_wait_till_idle(struct vic_rng *hrng) +{ + while(readl(hrng->base + VIC_STAT) & VIC_STAT_BUSY) + ; +} + +static inline void vic_rng_irq_mask_clear(struct vic_rng *hrng) +{ + // clear register: ISTAT + u32 data = readl(hrng->base + VIC_ISTAT); + writel(data, hrng->base + VIC_ISTAT); + writel(0, hrng->base + VIC_ALARM); +} + +static int vic_trng_cmd(struct vic_rng *hrng, u32 cmd) { + int res = 0; + // wait till idle + vic_wait_till_idle(hrng); + switch (cmd) { + case VIC_CTRL_CMD_NOP: + case VIC_CTRL_CMD_GEN_NOISE: + case VIC_CTRL_CMD_GEN_NONCE: + case VIC_CTRL_CMD_CREATE_STATE: + case VIC_CTRL_CMD_RENEW_STATE: + case VIC_CTRL_CMD_REFRESH_ADDIN: + case VIC_CTRL_CMD_GEN_RANDOM: + case VIC_CTRL_CMD_ADVANCE_STATE: + case VIC_CTRL_CMD_KAT: + case VIC_CTRL_CMD_ZEROIZE: + writel(cmd, hrng->base + VIC_CTRL); + break; + default: + res = -1; + break; + } + + return res; +} + +static int vic_rng_init(struct hwrng *rng) +{ + struct vic_rng *hrng = to_vic_rng(rng); + + // wait till idle + + // clear register: ISTAT + vic_rng_irq_mask_clear(hrng); + + // set mission mode + writel(VIC_SMODE_SECURE_EN(1), hrng->base + VIC_SMODE); + + vic_trng_cmd(hrng, VIC_CTRL_CMD_GEN_NOISE); + vic_wait_till_idle(hrng); + + // set interrupt + writel(VIC_IE_ALL, hrng->base + VIC_IE); + + // zeroize + vic_trng_cmd(hrng, VIC_CTRL_CMD_ZEROIZE); + + vic_wait_till_idle(hrng); + + return 0; +} + +static irqreturn_t vic_rng_irq(int irq, void *priv) +{ + u32 status, val; + struct vic_rng *hrng = (struct vic_rng *)priv; + + /* + * clearing the interrupt will also clear the error register + * read error and status before clearing + */ + status = readl(hrng->base + VIC_ISTAT); + + if (status & VIC_ISTAT_ALARMS) { + writel(VIC_ISTAT_ALARMS, hrng->base + VIC_ISTAT); + val = readl(hrng->base + VIC_ALARM); + if (val & VIC_ALARM_ILLEGAL_CMD_SEQ) { + writel(VIC_ALARM_ILLEGAL_CMD_SEQ, hrng->base + VIC_ALARM); + //dev_info(hrng->dev, "ILLEGAL CMD SEQ: LAST_CMD=0x%x\r\n", + //VIC_STAT_LAST_CMD(readl(hrng->base + VIC_STAT))); + } else { + dev_info(hrng->dev, "Failed test: %x\r\n", val); + } + } + + if (status & VIC_ISTAT_ZEROIZE) { + writel(VIC_ISTAT_ZEROIZE, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "zeroized\r\n"); + } + + if (status & VIC_ISTAT_KAT_COMPLETE) { + writel(VIC_ISTAT_KAT_COMPLETE, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "kat_completed\r\n"); + } + + if (status & VIC_ISTAT_NOISE_RDY) { + writel(VIC_ISTAT_NOISE_RDY, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "noise_rdy\r\n"); + } + + if (status & VIC_ISTAT_DONE) { + writel(VIC_ISTAT_DONE, hrng->base + VIC_ISTAT); + //dev_info(hrng->dev, "done\r\n"); + /* + if (VIC_STAT_LAST_CMD(readl(hrng->base + VIC_STAT)) == + VIC_CTRL_CMD_GEN_RANDOM) { + dev_info(hrng->dev, "Need Update Buffer\r\n"); + } + */ + } + vic_rng_irq_mask_clear(hrng); + + return IRQ_HANDLED; +} + +static void vic_rng_cleanup(struct hwrng *rng) +{ + struct vic_rng *hrng = to_vic_rng(rng); + + writel(0, hrng->base + VIC_CTRL); +} + +static int vic_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) +{ + struct vic_rng *hrng = to_vic_rng(rng); + + vic_trng_cmd(hrng, VIC_CTRL_CMD_ZEROIZE); + vic_trng_cmd(hrng, VIC_CTRL_CMD_GEN_NOISE); + vic_trng_cmd(hrng, VIC_CTRL_CMD_CREATE_STATE); + + vic_wait_till_idle(hrng); + max = min_t(size_t, max, (VIC_RAND_LEN * 4)); + + writel(0x0, hrng->base + VIC_MODE); + vic_trng_cmd(hrng, VIC_CTRL_CMD_GEN_RANDOM); + + vic_wait_till_idle(hrng); + memcpy_fromio(buf, hrng->base + VIC_RAND0, max); + vic_trng_cmd(hrng, VIC_CTRL_CMD_ZEROIZE); + + vic_wait_till_idle(hrng); + return max; +} + +static int vic_rng_probe(struct platform_device *pdev) +{ + int ret; + int irq; + struct vic_rng *rng; + struct resource *res; + + rng = devm_kzalloc(&pdev->dev, sizeof(*rng), GFP_KERNEL); + if (!rng){ + return -ENOMEM; + } + + platform_set_drvdata(pdev, rng); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rng->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rng->base)){ + return PTR_ERR(rng->base); + } + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(&pdev->dev, "Couldn't get irq %d\n", irq); + return irq; + } + + ret = devm_request_irq(&pdev->dev, irq, vic_rng_irq, 0, pdev->name, + (void *)rng); + if (ret) { + dev_err(&pdev->dev, "Can't get interrupt working.\n"); + return ret; + } + + rng->rng.name = pdev->name; + rng->rng.init = vic_rng_init; + rng->rng.cleanup = vic_rng_cleanup; + rng->rng.read = vic_rng_read; + + rng->dev = &pdev->dev; + + ret = devm_hwrng_register(&pdev->dev, &rng->rng); + if (ret) { + dev_err(&pdev->dev, "failed to register hwrng\n"); + return ret; + } + + dev_info(&pdev->dev, "Initialized\n"); + + return 0; +} + +static const struct of_device_id vic_rng_dt_ids[] = { + { .compatible = "starfive,vic-rng" }, + { } +}; +MODULE_DEVICE_TABLE(of, vic_rng_dt_ids); + +static struct platform_driver vic_rng_driver = { + .probe = vic_rng_probe, + .driver = { + .name = "vic-rng", + .of_match_table = of_match_ptr(vic_rng_dt_ids), + }, +}; + +module_platform_driver(vic_rng_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Huan Feng "); +MODULE_DESCRIPTION("Starfive VIC random number generator driver"); diff --git a/drivers/char/hw_random/starfive-vic-rng.h b/drivers/char/hw_random/starfive-vic-rng.h new file mode 100644 index 00000000000000..b3bbabde0cfb13 --- /dev/null +++ b/drivers/char/hw_random/starfive-vic-rng.h @@ -0,0 +1,167 @@ +/* + ****************************************************************************** + * @file starfive-vic-rng.h + * @author StarFive Technology + * @version V1.0 + * @date 08/13/2020 + * @brief + ****************************************************************************** + * @copy + * + * THE PRESENT SOFTWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS + * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE + * TIME. AS A RESULT, STARFIVE SHALL NOT BE HELD LIABLE FOR ANY + * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING + * FROM THE CONTENT OF SUCH SOFTWARE AND/OR THE USE MADE BY CUSTOMERS OF THE + * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. + * + * COPYRIGHT 2020 Shanghai StarFive Technology Co., Ltd. + */ + +#define VIC_CTRL 0x00 +#define VIC_MODE 0x04 +#define VIC_SMODE 0x08 +#define VIC_STAT 0x0C +#define VIC_IE 0x10 +#define VIC_ISTAT 0x14 +#define VIC_ALARM 0x18 +#define VIC_BUILD_ID 0x1C +#define VIC_FEATURES 0x20 +#define VIC_RAND0 0x24 +#define VIC_NPA_DATA0 0x34 +#define VIC_SEED0 0x74 +#define VIC_IA_RDATA 0xA4 +#define VIC_IA_WDATA 0xA8 +#define VIC_IA_ADDR 0xAC +#define VIC_IA_CMD 0xB0 + +/* CTRL */ +#define VIC_CTRL_CMD_NOP 0 +#define VIC_CTRL_CMD_GEN_NOISE 1 +#define VIC_CTRL_CMD_GEN_NONCE 2 +#define VIC_CTRL_CMD_CREATE_STATE 3 +#define VIC_CTRL_CMD_RENEW_STATE 4 +#define VIC_CTRL_CMD_REFRESH_ADDIN 5 +#define VIC_CTRL_CMD_GEN_RANDOM 6 +#define VIC_CTRL_CMD_ADVANCE_STATE 7 +#define VIC_CTRL_CMD_KAT 8 +#define VIC_CTRL_CMD_ZEROIZE 15 + +/* MODE */ +#define _VIC_MODE_ADDIN_PRESENT 4 +#define _VIC_MODE_PRED_RESIST 3 +#define _VIC_MODE_KAT_SEL 2 +#define _VIC_MODE_KAT_VEC 1 +#define _VIC_MODE_SEC_ALG 0 + +#define VIC_MODE_ADDIN_PRESENT (1UL << _VIC_MODE_ADDIN_PRESENT) +#define VIC_MODE_PRED_RESIST (1UL << _VIC_MODE_PRED_RESIST) +#define VIC_MODE_KAT_SEL (1UL << _VIC_MODE_KAT_SEL) +#define VIC_MODE_KAT_VEC (1UL << _VIC_MODE_KAT_VEC) +#define VIC_MODE_SEC_ALG (1UL << _VIC_MODE_SEC_ALG) + +/* SMODE */ +#define _VIC_SMODE_MAX_REJECTS 2 +#define _VIC_SMODE_SECURE_EN 1 +#define _VIC_SMODE_NONCE 0 + +#define VIC_SMODE_MAX_REJECTS(x) ((x) << _VIC_SMODE_MAX_REJECTS) +#define VIC_SMODE_SECURE_EN(x) ((x) << _VIC_SMODE_SECURE_EN) +#define VIC_SMODE_NONCE (1UL << _VIC_SMODE_NONCE) + +/* STAT */ +#define _VIC_STAT_BUSY 31 +#define _VIC_STAT_DRBG_STATE 7 +#define _VIC_STAT_SECURE 6 +#define _VIC_STAT_NONCE_MODE 5 +#define _VIC_STAT_SEC_ALG 4 +#define _VIC_STAT_LAST_CMD 0 + +#define VIC_STAT_BUSY (1UL << _VIC_STAT_BUSY) +#define VIC_STAT_DRBG_STATE (1UL << _VIC_STAT_DRBG_STATE) +#define VIC_STAT_SECURE (1UL << _VIC_STAT_SECURE) +#define VIC_STAT_NONCE_MODE (1UL << _VIC_STAT_NONCE_MODE) +#define VIC_STAT_SEC_ALG (1UL << _VIC_STAT_SEC_ALG) +#define VIC_STAT_LAST_CMD(x) (((x) >> _VIC_STAT_LAST_CMD) & 0xF) + +/* IE */ +#define _VIC_IE_GLBL 31 +#define _VIC_IE_DONE 4 +#define _VIC_IE_ALARMS 3 +#define _VIC_IE_NOISE_RDY 2 +#define _VIC_IE_KAT_COMPLETE 1 +#define _VIC_IE_ZEROIZE 0 + +#define VIC_IE_GLBL (1UL << _VIC_IE_GLBL) +#define VIC_IE_DONE (1UL << _VIC_IE_DONE) +#define VIC_IE_ALARMS (1UL << _VIC_IE_ALARMS) +#define VIC_IE_NOISE_RDY (1UL << _VIC_IE_NOISE_RDY) +#define VIC_IE_KAT_COMPLETE (1UL << _VIC_IE_KAT_COMPLETE) +#define VIC_IE_ZEROIZE (1UL << _VIC_IE_ZEROIZE) +#define VIC_IE_ALL (VIC_IE_GLBL | VIC_IE_DONE | VIC_IE_ALARMS | \ + VIC_IE_NOISE_RDY | VIC_IE_KAT_COMPLETE | VIC_IE_ZEROIZE) + +/* ISTAT */ +#define _VIC_ISTAT_DONE 4 +#define _VIC_ISTAT_ALARMS 3 +#define _VIC_ISTAT_NOISE_RDY 2 +#define _VIC_ISTAT_KAT_COMPLETE 1 +#define _VIC_ISTAT_ZEROIZE 0 + +#define VIC_ISTAT_DONE (1UL << _VIC_ISTAT_DONE) +#define VIC_ISTAT_ALARMS (1UL << _VIC_ISTAT_ALARMS) +#define VIC_ISTAT_NOISE_RDY (1UL << _VIC_ISTAT_NOISE_RDY) +#define VIC_ISTAT_KAT_COMPLETE (1UL << _VIC_ISTAT_KAT_COMPLETE) +#define VIC_ISTAT_ZEROIZE (1UL << _VIC_ISTAT_ZEROIZE) + +/* ALARMS */ +#define VIC_ALARM_ILLEGAL_CMD_SEQ (1UL << 4) +#define VIC_ALARM_FAILED_TEST_ID_OK 0 +#define VIC_ALARM_FAILED_TEST_ID_KAT_STAT 1 +#define VIC_ALARM_FAILED_TEST_ID_KAT 2 +#define VIC_ALARM_FAILED_TEST_ID_MONOBIT 3 +#define VIC_ALARM_FAILED_TEST_ID_RUN 4 +#define VIC_ALARM_FAILED_TEST_ID_LONGRUN 5 +#define VIC_ALARM_FAILED_TEST_ID_AUTOCORRELATION 6 +#define VIC_ALARM_FAILED_TEST_ID_POKER 7 +#define VIC_ALARM_FAILED_TEST_ID_REPETITION_COUNT 8 +#define VIC_ALARM_FAILED_TEST_ID_ADAPATIVE_PROPORTION 9 + +/* BUILD_ID */ +#define VIC_BUILD_ID_STEPPING(x) (((x) >> 28) & 0xF) +#define VIC_BUILD_ID_EPN(x) ((x) & 0xFFFF) + +/* FEATURES */ +#define VIC_FEATURES_AES_256(x) (((x) >> 9) & 1) +#define VIC_FEATURES_EXTRA_PS_PRESENT(x) (((x) >> 8) & 1) +#define VIC_FEATURES_DIAG_LEVEL_NS(x) (((x) >> 7) & 1) +#define VIC_FEATURES_DIAG_LEVEL_CLP800(x) (((x) >> 4) & 7) +#define VIC_FEATURES_DIAG_LEVEL_ST_HLT(x) (((x) >> 1) & 7) +#define VIC_FEATURES_SECURE_RST_STATE(x) ((x) & 1) + +/* IA_CMD */ +#define VIC_IA_CMD_GO (1UL << 31) +#define VIC_IA_CMD_WR (1) + +#define _VIC_SMODE_MAX_REJECTS_MASK 255UL +#define _VIC_SMODE_SECURE_EN_MASK 1UL +#define _VIC_SMODE_NONCE_MASK 1UL +#define _VIC_MODE_SEC_ALG_MASK 1UL +#define _VIC_MODE_ADDIN_PRESENT_MASK 1UL +#define _VIC_MODE_PRED_RESIST_MASK 1UL + +#define VIC_SMODE_SET_MAX_REJECTS(y, x) (((y) & ~(_VIC_SMODE_MAX_REJECTS_MASK << _VIC_SMODE_MAX_REJECTS)) | ((x) << _VIC_SMODE_MAX_REJECTS)) +#define VIC_SMODE_SET_SECURE_EN(y, x) (((y) & ~(_VIC_SMODE_SECURE_EN_MASK << _VIC_SMODE_SECURE_EN)) | ((x) << _VIC_SMODE_SECURE_EN)) +#define VIC_SMODE_SET_NONCE(y, x) (((y) & ~(_VIC_SMODE_NONCE_MASK << _VIC_SMODE_NONCE)) | ((x) << _VIC_SMODE_NONCE)) +#define VIC_SMODE_GET_MAX_REJECTS(x) (((x) >> _VIC_SMODE_MAX_REJECTS) & _VIC_SMODE_MAX_REJECTS_MASK) +#define VIC_SMODE_GET_SECURE_EN(x) (((x) >> _VIC_SMODE_SECURE_EN) & _VIC_SMODE_SECURE_EN_MASK) +#define VIC_SMODE_GET_NONCE(x) (((x) >> _VIC_SMODE_NONCE) & _VIC_SMODE_NONCE_MASK) + +#define VIC_MODE_SET_SEC_ALG(y, x) (((y) & ~(_VIC_MODE_SEC_ALG_MASK << _VIC_MODE_SEC_ALG)) | ((x) << _VIC_MODE_SEC_ALG)) +#define VIC_MODE_SET_PRED_RESIST(y, x) (((y) & ~(_VIC_MODE_PRED_RESIST_MASK << _VIC_MODE_PRED_RESIST)) | ((x) << _VIC_MODE_PRED_RESIST)) +#define VIC_MODE_SET_ADDIN_PRESENT(y, x) (((y) & ~(_VIC_MODE_ADDIN_PRESENT_MASK << _VIC_MODE_ADDIN_PRESENT)) | ((x) << _VIC_MODE_ADDIN_PRESENT)) +#define VIC_MODE_GET_SEC_ALG(x) (((x) >> _VIC_MODE_SEC_ALG) & _VIC_MODE_SEC_ALG_MASK) +#define VIC_MODE_GET_PRED_RESIST(x) (((x) >> _VIC_MODE_PRED_RESIST) & _VIC_MODE_PRED_RESIST_MASK) +#define VIC_MODE_GET_ADDIN_PRESENT(x) (((x) >> _VIC_MODE_ADDIN_PRESENT) & _VIC_MODE_ADDIN_PRESENT_MASK) + +#define VIC_RAND_LEN 4 From e51061c684f5e004a04a4420471bf925c673000a Mon Sep 17 00:00:00 2001 From: Emil Renner Berthing Date: Sun, 6 Jun 2021 22:15:22 +0200 Subject: [PATCH 28/62] dt-bindings: hwmon: add starfive,jh7100-temp bindings Add bindings for the temperature sensor on the Starfive JH7100 SoC. Signed-off-by: Emil Renner Berthing --- .../bindings/hwmon/starfive,jh7100-temp.yaml | 43 +++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 Documentation/devicetree/bindings/hwmon/starfive,jh7100-temp.yaml diff --git a/Documentation/devicetree/bindings/hwmon/starfive,jh7100-temp.yaml b/Documentation/devicetree/bindings/hwmon/starfive,jh7100-temp.yaml new file mode 100644 index 00000000000000..5ca52c08d142e5 --- /dev/null +++ b/Documentation/devicetree/bindings/hwmon/starfive,jh7100-temp.yaml @@ -0,0 +1,43 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/hwmon/starfive,jh7100-temp.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: StarFive JH7100 Temperature Sensor + +maintainers: + - Emil Renner Berthing + +description: | + StarFive Technology Co. JH7100 embedded temperature sensor + +properties: + compatible: + enum: + - starfive,jh7100-temp + + reg: + maxItems: 1 + + '#thermal-sensor-cells': + const: 0 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + tmon: tmon@124a0000 { + compatible = "starfive,jh7100-temp"; + reg = <0x124a0000 0x10000>; + #thermal-sensor-cells = <0>; + interrupts = <122>; + }; From 057ab4512bfacf0fcd8c98ddbba140ff207009c4 Mon Sep 17 00:00:00 2001 From: Emil Renner Berthing Date: Sun, 6 Jun 2021 22:31:18 +0200 Subject: [PATCH 29/62] hwmon: (sfctemp) Add StarFive JH7100 temperature sensor Register definitions based on sfctemp driver in the StarFive 5.10 kernel by Samin Guo . Signed-off-by: Emil Renner Berthing --- drivers/hwmon/Kconfig | 9 ++ drivers/hwmon/Makefile | 1 + drivers/hwmon/sfctemp.c | 309 ++++++++++++++++++++++++++++++++++++++++ 3 files changed, 319 insertions(+) create mode 100644 drivers/hwmon/sfctemp.c diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 87624902ea8090..fa7562920dfac0 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1751,6 +1751,15 @@ config SENSORS_STTS751 This driver can also be built as a module. If so, the module will be called stts751. +config SENSORS_SFCTEMP + tristate "Starfive JH7100 temperature sensor" + help + If you say yes here you get support for temperature sensor + on the Starfive JH7100 SoC. + + This driver can also be built as a module. If so, the module + will be called sfctemp. + config SENSORS_SMM665 tristate "Summit Microelectronics SMM665" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 59e78bc212cf3c..3723eb580bf3e7 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -167,6 +167,7 @@ obj-$(CONFIG_SENSORS_SBTSI) += sbtsi_temp.o obj-$(CONFIG_SENSORS_SCH56XX_COMMON)+= sch56xx-common.o obj-$(CONFIG_SENSORS_SCH5627) += sch5627.o obj-$(CONFIG_SENSORS_SCH5636) += sch5636.o +obj-$(CONFIG_SENSORS_SFCTEMP) += sfctemp.o obj-$(CONFIG_SENSORS_SL28CPLD) += sl28cpld-hwmon.o obj-$(CONFIG_SENSORS_SHT15) += sht15.o obj-$(CONFIG_SENSORS_SHT21) += sht21.o diff --git a/drivers/hwmon/sfctemp.c b/drivers/hwmon/sfctemp.c new file mode 100644 index 00000000000000..62a838063e4e75 --- /dev/null +++ b/drivers/hwmon/sfctemp.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Emil Renner Berthing + * Copyright (C) 2021 Samin Guo + */ +#include +#include +#include +#include +#include +#include +#include +#include + +/* TempSensor reset. The RSTN can be de-asserted once the analog core has + * powered up. Trst(min 100ns) + * 0:reset 1:de-assert */ +#define SFCTEMP_RSTN BIT(0) + +/* TempSensor analog core power down. The analog core will be powered up + * Tpu(min 50us) after PD is de-asserted. RSTN should be held low until the + * analog core is powered up. + * 0:power up 1:power down */ +#define SFCTEMP_PD BIT(1) + +/* TempSensor start conversion enable. + * 0:disable 1:enable */ +#define SFCTEMP_RUN BIT(2) + +/* TempSensor calibration mode enable. + * 0:disable 1:enable */ +#define SFCTEMP_CAL BIT(4) + +/* TempSensor signature enable. Generate a toggle value outputting on DOUT for + * test purpose. + * 0:disable 1:enable */ +#define SFCTEMP_SGN BIT(5) + +/* TempSensor test access control. + * 0000:normal 0001:Test1 0010:Test2 0011:Test3 + * 0100:Test4 1000:Test8 1001:Test9 */ +#define SFCTEMP_TM_Pos 12 +#define SFCTEMP_TM_Msk GENMASK(15, 12) + +/* TempSensor conversion value output. + * Temp(c)=DOUT*Y/4094 - K */ +#define SFCTEMP_DOUT_Pos 16 +#define SFCTEMP_DOUT_Msk GENMASK(27, 16) + +/* TempSensor digital test output. */ +#define SFCTEMP_DIGO BIT(31) + +/* DOUT to Celcius conversion constants */ +#define SFCTEMP_Y1000 237500L +#define SFCTEMP_Z 4094L +#define SFCTEMP_K1000 81100L + +struct sfctemp { + struct mutex lock; + struct completion conversion_done; + void __iomem *regs; + bool enabled; +}; + +static irqreturn_t sfctemp_isr(int irq, void *data) +{ + struct sfctemp *sfctemp = data; + + complete(&sfctemp->conversion_done); + return IRQ_HANDLED; +} + +static void sfctemp_power_up(struct sfctemp *sfctemp) +{ + /* make sure we're powered down first */ + writel(SFCTEMP_PD, sfctemp->regs); + udelay(1); + + writel(0, sfctemp->regs); + /* wait t_pu(50us) + t_rst(100ns) */ + usleep_range(60, 200); + + /* de-assert reset */ + writel(SFCTEMP_RSTN, sfctemp->regs); + udelay(1); /* wait t_su(500ps) */ +} + +static void sfctemp_power_down(struct sfctemp *sfctemp) +{ + writel(SFCTEMP_PD, sfctemp->regs); +} + +static void sfctemp_run_single(struct sfctemp *sfctemp) +{ + writel(SFCTEMP_RSTN | SFCTEMP_RUN, sfctemp->regs); + udelay(1); + writel(SFCTEMP_RSTN, sfctemp->regs); +} + +static int sfctemp_enable(struct sfctemp *sfctemp) +{ + mutex_lock(&sfctemp->lock); + if (sfctemp->enabled) + goto done; + + sfctemp_power_up(sfctemp); + sfctemp->enabled = true; +done: + mutex_unlock(&sfctemp->lock); + return 0; +} + +static int sfctemp_disable(struct sfctemp *sfctemp) +{ + mutex_lock(&sfctemp->lock); + if (!sfctemp->enabled) + goto done; + + sfctemp_power_down(sfctemp); + sfctemp->enabled = false; +done: + mutex_unlock(&sfctemp->lock); + return 0; +} + +static int sfctemp_convert(struct sfctemp *sfctemp, long *val) +{ + long ret; + + mutex_lock(&sfctemp->lock); + if (!sfctemp->enabled) { + ret = -ENODATA; + goto out; + } + + sfctemp_run_single(sfctemp); + + ret = wait_for_completion_interruptible_timeout(&sfctemp->conversion_done, + msecs_to_jiffies(10)); + if (ret < 0) + goto out; + + /* calculate temperature in milli Celcius */ + *val = (long)((readl(sfctemp->regs) & SFCTEMP_DOUT_Msk) >> SFCTEMP_DOUT_Pos) + * SFCTEMP_Y1000 / SFCTEMP_Z - SFCTEMP_K1000; + + ret = 0; +out: + mutex_unlock(&sfctemp->lock); + return ret; +} + +static umode_t sfctemp_is_visible(const void *data, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_enable: + return 0644; + case hwmon_temp_input: + return 0444; + } + return 0; + default: + return 0; + } +} + +static int sfctemp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct sfctemp *sfctemp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_enable: + *val = sfctemp->enabled; + return 0; + case hwmon_temp_input: + return sfctemp_convert(sfctemp, val); + } + return -EINVAL; + default: + return -EINVAL; + } +} + +static int sfctemp_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct sfctemp *sfctemp = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_enable: + if (val == 0) + return sfctemp_disable(sfctemp); + if (val == 1) + return sfctemp_enable(sfctemp); + break; + } + return -EINVAL; + default: + return -EINVAL; + } +} + +static const struct hwmon_channel_info *sfctemp_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_ENABLE | HWMON_T_INPUT), + NULL +}; + +static const struct hwmon_ops sfctemp_hwmon_ops = { + .is_visible = sfctemp_is_visible, + .read = sfctemp_read, + .write = sfctemp_write, +}; + +static const struct hwmon_chip_info sfctemp_chip_info = { + .ops = &sfctemp_hwmon_ops, + .info = sfctemp_info, +}; + +static int sfctemp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device *hwmon_dev; + struct resource *mem; + struct sfctemp *sfctemp; + long val; + int ret; + + sfctemp = devm_kzalloc(dev, sizeof(*sfctemp), GFP_KERNEL); + if (!sfctemp) + return -ENOMEM; + + dev_set_drvdata(dev, sfctemp); + mutex_init(&sfctemp->lock); + init_completion(&sfctemp->conversion_done); + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sfctemp->regs = devm_ioremap_resource(dev, mem); + if (IS_ERR(sfctemp->regs)) + return PTR_ERR(sfctemp->regs); + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + ret = devm_request_irq(dev, ret, sfctemp_isr, + IRQF_SHARED, pdev->name, sfctemp); + if (ret) { + dev_err(dev, "request irq failed: %d\n", ret); + return ret; + } + + ret = sfctemp_enable(sfctemp); + if (ret) + return ret; + + hwmon_dev = hwmon_device_register_with_info(dev, pdev->name, sfctemp, + &sfctemp_chip_info, NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + /* do a conversion to check everything works */ + ret = sfctemp_convert(sfctemp, &val); + if (ret) { + hwmon_device_unregister(hwmon_dev); + return ret; + } + + dev_info(dev, "%ld.%03ld C\n", val / 1000, val % 1000); + return 0; +} + +static int sfctemp_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sfctemp *sfctemp = dev_get_drvdata(dev); + + hwmon_device_unregister(dev); + return sfctemp_disable(sfctemp); +} + +static const struct of_device_id sfctemp_of_match[] = { + { .compatible = "starfive,jh7100-temp" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, sfctemp_of_match); + +static struct platform_driver sfctemp_driver = { + .driver = { + .name = "sfctemp", + .of_match_table = of_match_ptr(sfctemp_of_match), + }, + .probe = sfctemp_probe, + .remove = sfctemp_remove, +}; +module_platform_driver(sfctemp_driver); + +MODULE_AUTHOR("Emil Renner Berthing"); +MODULE_DESCRIPTION("StarFive JH7100 temperature sensor driver"); +MODULE_LICENSE("GPL"); From 45f9d2bdcc6fc19bc923100e9dc524bab6246fea Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 02:54:51 +0800 Subject: [PATCH 30/62] sifive/sifive_l2_cache: Add sifive_l2_flush64_range function --- drivers/soc/sifive/Kconfig | 15 ++++++++++ drivers/soc/sifive/sifive_l2_cache.c | 41 +++++++++++++++++++++++++++- include/soc/sifive/sifive_l2_cache.h | 4 +++ 3 files changed, 59 insertions(+), 1 deletion(-) diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index 58cf8c40d08d53..4d0fdab56e81a6 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -7,4 +7,19 @@ config SIFIVE_L2 help Support for the L2 cache controller on SiFive platforms. +config SIFIVE_L2_FLUSH + bool "Support Level 2 Cache Controller Flush operation of SiFive Soc" + +if SIFIVE_L2_FLUSH + +config SIFIVE_L2_FLUSH_START + hex "Level 2 Cache Flush operation start" + default 0x80000000 + +config SIFIVE_L2_FLUSH_SIZE + hex "Level 2 Cache Flush operation size" + default 0x800000000 + +endif # SIFIVE_L2_FLUSH + endif diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index 59640a1d0b28a1..0b9e9e852ee44d 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -29,13 +29,17 @@ #define SIFIVE_L2_DATECCFAIL_HIGH 0x164 #define SIFIVE_L2_DATECCFAIL_COUNT 0x168 +#define SIFIVE_L2_FLUSH64 0x200 + #define SIFIVE_L2_CONFIG 0x00 #define SIFIVE_L2_WAYENABLE 0x08 #define SIFIVE_L2_ECCINJECTERR 0x40 #define SIFIVE_L2_MAX_ECCINTR 4 -static void __iomem *l2_base; +#define SIFIVE_L2_FLUSH64_LINE_LEN 64 + +static void __iomem *l2_base = NULL; static int g_irq[SIFIVE_L2_MAX_ECCINTR]; static struct riscv_cacheinfo_ops l2_cache_ops; @@ -116,6 +120,41 @@ int unregister_sifive_l2_error_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(unregister_sifive_l2_error_notifier); +#ifdef CONFIG_SIFIVE_L2_FLUSH +void sifive_l2_flush64_range(unsigned long start, unsigned long len) +{ + unsigned long line; + + if(!l2_base) { + pr_warn("L2CACHE: base addr invalid, skipping flush\n"); + return; + } + + /* TODO: if (len == 0), skipping flush or going on? */ + if(!len) { + pr_debug("L2CACHE: flush64 range @ 0x%lx(len:0)\n", start); + return; + } + + /* make sure the address is in the range */ + if(start < CONFIG_SIFIVE_L2_FLUSH_START || + (start + len) > (CONFIG_SIFIVE_L2_FLUSH_START + + CONFIG_SIFIVE_L2_FLUSH_SIZE)) { + pr_warn("L2CACHE: flush64 out of range: %lx(%lx), skip flush\n", + start, len); + return; + } + + mb(); /* sync */ + for (line = start; line < start + len; + line += SIFIVE_L2_FLUSH64_LINE_LEN) { + writeq(line, l2_base + SIFIVE_L2_FLUSH64); + mb(); + } +} +EXPORT_SYMBOL_GPL(sifive_l2_flush64_range); +#endif + static int l2_largest_wayenabled(void) { return readl(l2_base + SIFIVE_L2_WAYENABLE) & 0xFF; diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h index 92ade10ed67e94..dd3e56787d3167 100644 --- a/include/soc/sifive/sifive_l2_cache.h +++ b/include/soc/sifive/sifive_l2_cache.h @@ -7,6 +7,10 @@ #ifndef __SOC_SIFIVE_L2_CACHE_H #define __SOC_SIFIVE_L2_CACHE_H +#ifdef CONFIG_SIFIVE_L2_FLUSH +extern void sifive_l2_flush64_range(unsigned long start, unsigned long len); +#endif + extern int register_sifive_l2_error_notifier(struct notifier_block *nb); extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb); From f1c032d6c0ce86525b4655e22eb0d15db067ec3b Mon Sep 17 00:00:00 2001 From: Tom Date: Mon, 15 Feb 2021 23:59:46 +0800 Subject: [PATCH 31/62] sifive/sifive_l2_cache: Add Starfive support --- drivers/soc/sifive/sifive_l2_cache.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index 0b9e9e852ee44d..5f2b295fc5efdf 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -103,6 +103,7 @@ static void l2_config_read(void) static const struct of_device_id sifive_l2_ids[] = { { .compatible = "sifive,fu540-c000-ccache" }, { .compatible = "sifive,fu740-c000-ccache" }, + { .compatible = "starfive,ccache0" }, { /* end of table */ }, }; From cfa6ef69ae308f119c6c5763aa29569622d1ea1c Mon Sep 17 00:00:00 2001 From: Tom Date: Sat, 13 Feb 2021 22:25:17 +0800 Subject: [PATCH 32/62] sifive/sifive_l2_cache: Add disabling IRQ option (workaround) --- drivers/irqchip/irq-sifive-plic.c | 41 ++++++++++++++++++++++++++++ drivers/soc/sifive/Kconfig | 4 +++ drivers/soc/sifive/sifive_l2_cache.c | 8 ++++++ 3 files changed, 53 insertions(+) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 97d4d04b0a80eb..63a8dea3fae5da 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -273,6 +273,44 @@ static int plic_starting_cpu(unsigned int cpu) return 0; } +#if IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) +#ifdef CONFIG_SOC_STARFIVE_VIC7100 +#define SIFIVE_L2_MAX_ECCINTR 4 +#else +#define SIFIVE_L2_MAX_ECCINTR 3 +#endif +static const struct of_device_id sifive_l2_ids[] = { + { .compatible = "sifive,fu540-c000-ccache" }, + { .compatible = "starfive,ccache0" }, + { /* end of table */ }, +}; + +static void sifive_l2_irq_disable(struct plic_handler *handler) +{ + int i, irq; + struct of_phandle_args oirq; + + struct device_node *np = of_find_matching_node(NULL, sifive_l2_ids); + if (!np) { + pr_err("Can't get L2 cache device node.\n"); + return; + } + + for (i = 0; i < SIFIVE_L2_MAX_ECCINTR; i++) { + if (!of_irq_parse_one(np, i, &oirq)) { + irq = *oirq.args; + if (irq) { + pr_info("disable L2 cache irq %d in plic\n", irq); + plic_toggle(handler, irq, 0); + continue; + } + } + pr_err("Can't get L2 cache irq(#%d).\n", i); + } +} +#endif + + static int __init plic_init(struct device_node *node, struct device_node *parent) { @@ -366,6 +404,9 @@ static int __init plic_init(struct device_node *node, done: for (hwirq = 1; hwirq <= nr_irqs; hwirq++) plic_toggle(handler, hwirq, 0); +#if IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) + sifive_l2_irq_disable(handler); +#endif nr_handlers++; } diff --git a/drivers/soc/sifive/Kconfig b/drivers/soc/sifive/Kconfig index 4d0fdab56e81a6..4cccaad9e943b8 100644 --- a/drivers/soc/sifive/Kconfig +++ b/drivers/soc/sifive/Kconfig @@ -22,4 +22,8 @@ config SIFIVE_L2_FLUSH_SIZE endif # SIFIVE_L2_FLUSH +config SIFIVE_L2_IRQ_DISABLE + bool "Disable Level 2 Cache Controller interrupts" + default y if SOC_STARFIVE_VIC7100 + endif diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index 5f2b295fc5efdf..be4e141f5a0ea9 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -40,7 +40,9 @@ #define SIFIVE_L2_FLUSH64_LINE_LEN 64 static void __iomem *l2_base = NULL; +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) static int g_irq[SIFIVE_L2_MAX_ECCINTR]; +#endif static struct riscv_cacheinfo_ops l2_cache_ops; enum { @@ -188,6 +190,7 @@ static const struct attribute_group *l2_get_priv_group(struct cacheinfo *this_le return NULL; } +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) static irqreturn_t l2_int_handler(int irq, void *device) { unsigned int add_h, add_l; @@ -231,12 +234,15 @@ static irqreturn_t l2_int_handler(int irq, void *device) return IRQ_HANDLED; } +#endif static int __init sifive_l2_init(void) { struct device_node *np; struct resource res; +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) int i, rc, intr_num; +#endif np = of_find_matching_node(NULL, sifive_l2_ids); if (!np) @@ -249,6 +255,7 @@ static int __init sifive_l2_init(void) if (!l2_base) return -ENOMEM; +#if !IS_ENABLED(CONFIG_SIFIVE_L2_IRQ_DISABLE) intr_num = of_property_count_u32_elems(np, "interrupts"); if (!intr_num) { pr_err("L2CACHE: no interrupts property\n"); @@ -263,6 +270,7 @@ static int __init sifive_l2_init(void) return rc; } } +#endif l2_config_read(); From e6cca640970d024d8acd924a64f5b334395c493a Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 21 May 2021 08:35:33 +0200 Subject: [PATCH 33/62] sifive/sifive_l2_cache: Print a backtrace on out-of-range flushes This makes it easier to find out which driver passes a wrong address range. Signed-off-by: Geert Uytterhoeven --- drivers/soc/sifive/sifive_l2_cache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/soc/sifive/sifive_l2_cache.c b/drivers/soc/sifive/sifive_l2_cache.c index be4e141f5a0ea9..626b664547e51e 100644 --- a/drivers/soc/sifive/sifive_l2_cache.c +++ b/drivers/soc/sifive/sifive_l2_cache.c @@ -143,8 +143,8 @@ void sifive_l2_flush64_range(unsigned long start, unsigned long len) if(start < CONFIG_SIFIVE_L2_FLUSH_START || (start + len) > (CONFIG_SIFIVE_L2_FLUSH_START + CONFIG_SIFIVE_L2_FLUSH_SIZE)) { - pr_warn("L2CACHE: flush64 out of range: %lx(%lx), skip flush\n", - start, len); + WARN(1, "L2CACHE: flush64 out of range: %lx(%lx), skip flush\n", + start, len); return; } From d313c5cb394a838272d5f6b7ca17d8aae92eeeb6 Mon Sep 17 00:00:00 2001 From: Chenjieqin Date: Fri, 8 Jan 2021 03:56:54 +0800 Subject: [PATCH 34/62] drivers/pwm: Add SiFive PWM PTC driver --- drivers/pwm/Kconfig | 10 ++ drivers/pwm/Makefile | 1 + drivers/pwm/pwm-sifive-ptc.c | 290 +++++++++++++++++++++++++++++++++++ 3 files changed, 301 insertions(+) create mode 100644 drivers/pwm/pwm-sifive-ptc.c diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index c76adedd58c9f0..e25c22b3ff1955 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -491,6 +491,16 @@ config PWM_SIFIVE To compile this driver as a module, choose M here: the module will be called pwm-sifive. +config PWM_SIFIVE_PTC + tristate "SiFive PWM PTC support" + depends on OF + depends on COMMON_CLK + help + Generic PWM framework driver for SiFive SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-sifive-ptc. + config PWM_SL28CPLD tristate "Kontron sl28cpld PWM support" depends on MFD_SL28CPLD || COMPILE_TEST diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 708840b7fba8d8..3ad7903fd7fe21 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_PWM_RENESAS_TPU) += pwm-renesas-tpu.o obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SIFIVE) += pwm-sifive.o +obj-$(CONFIG_PWM_SIFIVE_PTC) += pwm-sifive-ptc.o obj-$(CONFIG_PWM_SL28CPLD) += pwm-sl28cpld.o obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o obj-$(CONFIG_PWM_SPRD) += pwm-sprd.o diff --git a/drivers/pwm/pwm-sifive-ptc.c b/drivers/pwm/pwm-sifive-ptc.c new file mode 100644 index 00000000000000..e510181b32bb20 --- /dev/null +++ b/drivers/pwm/pwm-sifive-ptc.c @@ -0,0 +1,290 @@ +/* + * Copyright (C) 2018 SiFive, Inc + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2, as published by + * the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#define PTC_DEBUG 0 + +/* max channel of pwm */ +#define MAX_PWM 8 + +/* PTC Register offsets */ +#define REG_RPTC_CNTR 0x0 +#define REG_RPTC_HRC 0x4 +#define REG_RPTC_LRC 0x8 +#define REG_RPTC_CTRL 0xC + +/* Bit for PWM clock */ +#define BIT_PWM_CLOCK_EN 31 + +/* Bit for clock gen soft reset */ +#define BIT_CLK_GEN_SOFT_RESET 13 + +#define NS_1 1000000000 + +/* Access PTC register (cntr hrc lrc and ctrl) ,need to replace PWM_BASE_ADDR */ +#define REG_PTC_BASE_ADDR_SUB(base, N) ((base) + ((N>3)?((N-4)*0x10+(1<<15)):(N*0x10))) +#define REG_PTC_RPTC_CNTR(base,N) (REG_PTC_BASE_ADDR_SUB(base,N)) +#define REG_PTC_RPTC_HRC(base,N) (REG_PTC_BASE_ADDR_SUB(base,N) + 0x4) +#define REG_PTC_RPTC_LRC(base,N) (REG_PTC_BASE_ADDR_SUB(base,N) + 0x8) +#define REG_PTC_RPTC_CTRL(base,N) (REG_PTC_BASE_ADDR_SUB(base,N) + 0xC) + +/* pwm ptc device */ +struct sifive_pwm_ptc_device { + struct pwm_chip chip; + struct clk *clk; + void __iomem *regs; + int irq; + /* apb clock frequency , from dts */ + unsigned int approx_period; +}; + +static inline struct sifive_pwm_ptc_device *chip_to_sifive_ptc(struct pwm_chip *c) +{ + return container_of(c, struct sifive_pwm_ptc_device, chip); +} + + +static void sifive_pwm_ptc_get_state(struct pwm_chip *chip, struct pwm_device *dev, struct pwm_state *state) +{ + struct sifive_pwm_ptc_device *pwm = chip_to_sifive_ptc(chip); + uint32_t data_lrc; + uint32_t data_hrc; + uint32_t pwm_clk_ns = 0; + + /* get lrc and hrc data from registe*/ + data_lrc = ioread32(REG_PTC_RPTC_LRC(pwm->regs, dev->hwpwm)); + data_hrc = ioread32(REG_PTC_RPTC_HRC(pwm->regs, dev->hwpwm)); + + /* how many ns does apb clock elapse */ + pwm_clk_ns = NS_1 / pwm->approx_period; + + /* pwm period(ns) */ + state->period = data_lrc*pwm_clk_ns; + + /* duty cycle(ns) ,means high level eclapse ns if it is normal polarity */ + state->duty_cycle = data_hrc*pwm_clk_ns; + + /* polarity,we don't use it now because it is not in dts */ + state->polarity = PWM_POLARITY_NORMAL; + + /* enabled or not */ + state->enabled = 1; +#ifdef PTC_DEBUG + printk("sifive_pwm_ptc_get_state in,no:%d....\r\n",dev->hwpwm); + printk("data_hrc:0x%x 0x%x \n", data_hrc, data_lrc); + printk("period:%llu\r\n",state->period); + printk("duty_cycle:%llu\r\n",state->duty_cycle); + printk("polarity:%d\r\n",state->polarity); + printk("enabled:%d\r\n",state->enabled); +#endif +} + + +static int sifive_pwm_ptc_apply(struct pwm_chip *chip, struct pwm_device *dev, struct pwm_state *state) +{ + struct sifive_pwm_ptc_device *pwm = chip_to_sifive_ptc(chip); + uint32_t pwm_clk_ns = 0; + uint32_t data_hrc = 0; + uint32_t data_lrc = 0; + uint32_t period_data = 0; + uint32_t duty_data = 0; + void __iomem* reg_addr; + +#if PTC_DEBUG + printk("sifive_pwm_ptc_apply in,no:%d....\r\n",dev->hwpwm); + printk("set parameter......\r\n"); + printk("period:%d\r\n",state->period); + printk("duty_cycle:%d\r\n",state->duty_cycle); + printk("polarity:%d\r\n",state->polarity); + printk("enabled:%d\r\n",state->enabled); +#endif + /* duty_cycle should be less or equal than period */ + if(state->duty_cycle > state->period) + state->duty_cycle = state->period; + + /* calculate pwm real period (ns) */ + pwm_clk_ns = NS_1 / pwm->approx_period; + +#if PTC_DEBUG + printk("approx_period,:%d,pwm_clk_ns:%d\r\n",pwm->approx_period,pwm_clk_ns); +#endif + + /* calculate period count */ + period_data = state->period / pwm_clk_ns; + + if (!state->enabled) + /* if is unenable,just set duty_dat to 0 , means low level always */ + duty_data = 0; + else + /* calculate duty count*/ + duty_data = state->duty_cycle / pwm_clk_ns; + +#if PTC_DEBUG + printk("period_data:%d,duty_data:%d\r\n",period_data,duty_data); +#endif + + if(state->polarity == PWM_POLARITY_NORMAL) + /* calculate data_hrc */ + data_hrc = period_data - duty_data; + else + /* calculate data_hrc */ + data_hrc = duty_data; + + data_lrc = period_data; + + /* set hrc */ + reg_addr = REG_PTC_RPTC_HRC(pwm->regs, dev->hwpwm); +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]reg_addr:0x%lx,data:%d....\n",reg_addr,data_hrc); +#endif + iowrite32(data_hrc, reg_addr); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]hrc ok....\n"); +#endif + + /* set lrc */ + reg_addr = REG_PTC_RPTC_LRC(pwm->regs, dev->hwpwm); +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]reg_addr:0x%lx,data:%d....\n",reg_addr,data_lrc); +#endif + + iowrite32(data_lrc, reg_addr); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_config]lrc ok....\n"); +#endif + + return 0; +} + + + +static const struct pwm_ops sifive_pwm_ptc_ops = { + .get_state = sifive_pwm_ptc_get_state, + .apply = (void *)sifive_pwm_ptc_apply, + .owner = THIS_MODULE, +}; + + + + +static int sifive_pwm_ptc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = pdev->dev.of_node; + struct sifive_pwm_ptc_device *pwm; + struct pwm_chip *chip; + struct resource *res; + int ret; + +#if PTC_DEBUG + printk("sifive_pwm_ptc_probe in....\r\n"); +#endif + pwm = devm_kzalloc(dev, sizeof(*pwm), GFP_KERNEL); + if (!pwm) { + dev_err(dev, "Out of memory\n"); + return -ENOMEM; + } + + chip = &pwm->chip; + chip->dev = dev; + chip->ops = &sifive_pwm_ptc_ops; + + /* how many parameters can be transfered to ptc,need to fix */ + chip->of_pwm_n_cells = 3; + chip->base = -1; + + /* get pwm channels count, max value is 8 */ + ret = of_property_read_u32(node, "starfive,npwm", &chip->npwm); + if (ret < 0 || chip->npwm > MAX_PWM) + chip->npwm = MAX_PWM; + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] npwm:0x%lx....\r\n",chip->npwm); +#endif + /* get apb clock frequency */ + ret = of_property_read_u32(node, "sifive,approx-period", &pwm->approx_period); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] approx_period:%d....\r\n",pwm->approx_period); +#endif + /* get IO base address*/ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] res start:0x%lx,end:0x%lx....\r\n",res->start,res->end); +#endif + pwm->regs = devm_ioremap_resource(dev, res); + if (IS_ERR(pwm->regs)) + { + dev_err(dev, "Unable to map IO resources\n"); + return PTR_ERR(pwm->regs); + } + +#if PTC_DEBUG + printk("[sifive_pwm_ptc_probe] regs:0x%lx....\r\n",pwm->regs); +#endif + + pwm->clk = devm_clk_get(dev, NULL); + if (IS_ERR(pwm->clk)) { + dev_err(dev, "Unable to find controller clock\n"); + return PTR_ERR(pwm->clk); + } + + /* after add,it will display as /sys/class/pwm/pwmchip0,0 is chip->base + * after execute echo 0 > export in , pwm0 can be seen */ + ret = pwmchip_add(chip); + if (ret < 0) { + dev_err(dev, "cannot register PTC: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, pwm); + +#if PTC_DEBUG + printk("SiFive PWM PTC chip registered %d PWMs\n", chip->npwm); +#endif + + return 0; +} + +static int sifive_pwm_ptc_remove(struct platform_device *dev) +{ + struct sifive_pwm_ptc_device *pwm = platform_get_drvdata(dev); + struct pwm_chip *chip = &pwm->chip; + + return pwmchip_remove(chip); +} + +static const struct of_device_id sifive_pwm_ptc_of_match[] = { + { .compatible = "sifive,pwm0" }, + { .compatible = "starfive,pwm0" }, + { }, +}; +MODULE_DEVICE_TABLE(of, sifive_pwm_ptc_of_match); + +static struct platform_driver sifive_pwm_ptc_driver = { + .probe = sifive_pwm_ptc_probe, + .remove = sifive_pwm_ptc_remove, + .driver = { + .name = "pwm-sifive-ptc", + .of_match_table = of_match_ptr(sifive_pwm_ptc_of_match), + }, +}; +module_platform_driver(sifive_pwm_ptc_driver); + +MODULE_DESCRIPTION("SiFive PWM PTC driver"); +MODULE_LICENSE("GPL v2"); From 338e5edcb7b1baf43843f490220efe32b6140f81 Mon Sep 17 00:00:00 2001 From: "yiming.li" Date: Tue, 16 Mar 2021 01:45:19 +0800 Subject: [PATCH 35/62] drivers/pwm/pwm-sifive-ptc: Clear PWM CNTR Clear CNTR of PWM after setting period & duty_cycle --- drivers/pwm/pwm-sifive-ptc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/pwm/pwm-sifive-ptc.c b/drivers/pwm/pwm-sifive-ptc.c index e510181b32bb20..9f149a064ed866 100644 --- a/drivers/pwm/pwm-sifive-ptc.c +++ b/drivers/pwm/pwm-sifive-ptc.c @@ -167,6 +167,10 @@ static int sifive_pwm_ptc_apply(struct pwm_chip *chip, struct pwm_device *dev, s printk("[sifive_pwm_ptc_config]lrc ok....\n"); #endif + /* Clear REG_RPTC_CNTR after setting period & duty_cycle*/ + reg_addr = REG_PTC_RPTC_CNTR(pwm->regs, dev->hwpwm); + iowrite32(0, reg_addr); + return 0; } From 1911a8a065573fa1d2a369a4f22808249ab4d0f6 Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 02:57:50 +0800 Subject: [PATCH 36/62] drivers/dma: Add dw-axi-dmac-starfive driver for VIC7100 --- drivers/dma/Kconfig | 7 + drivers/dma/Makefile | 1 + drivers/dma/dw-axi-dmac-starfive/Makefile | 2 + .../dw-axi-dmac-starfive-misc.c | 322 ++++++++++++++++++ .../starfive_dmaengine_memcpy.c | 287 ++++++++++++++++ .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 103 +++++- drivers/dma/dw-axi-dmac/dw-axi-dmac.h | 36 +- 7 files changed, 738 insertions(+), 20 deletions(-) create mode 100644 drivers/dma/dw-axi-dmac-starfive/Makefile create mode 100644 drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c create mode 100644 drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6ab9d9a488a6ed..60f4e80b23f484 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -180,6 +180,13 @@ config DW_AXI_DMAC NOTE: This driver wasn't tested on 64 bit platform because of lack 64 bit platform with Synopsys DW AXI DMAC. +config DW_AXI_DMAC_STARFIVE + tristate "Synopsys DesignWare AXI DMA support for StarFive SOC" + depends on SOC_STARFIVE_VIC7100 + help + Enable support for Synopsys DesignWare AXI DMA controller. + NOTE: It's for StarFive SOC. + config EP93XX_DMA bool "Cirrus Logic EP93xx DMA support" depends on ARCH_EP93XX || COMPILE_TEST diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index aa69094e35470c..7d332af8b96c6f 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ +obj-$(CONFIG_DW_AXI_DMAC_STARFIVE) += dw-axi-dmac-starfive/ obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o diff --git a/drivers/dma/dw-axi-dmac-starfive/Makefile b/drivers/dma/dw-axi-dmac-starfive/Makefile new file mode 100644 index 00000000000000..c30fd928982f9e --- /dev/null +++ b/drivers/dma/dw-axi-dmac-starfive/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DW_AXI_DMAC_STARFIVE) += starfive_dmaengine_memcpy.o dw-axi-dmac-starfive-misc.o \ No newline at end of file diff --git a/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c b/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c new file mode 100644 index 00000000000000..a1189bbe1e5bae --- /dev/null +++ b/drivers/dma/dw-axi-dmac-starfive/dw-axi-dmac-starfive-misc.c @@ -0,0 +1,322 @@ +/* + * Copyright 2020 StarFive, Inc + * + * DW AXI dma driver for StarFive SoC VIC7100. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define DRIVER_NAME "dwaxidma" +#define AXIDMA_IOC_MAGIC 'A' +#define AXIDMA_IOCGETCHN _IO(AXIDMA_IOC_MAGIC, 0) +#define AXIDMA_IOCCFGANDSTART _IO(AXIDMA_IOC_MAGIC, 1) +#define AXIDMA_IOCGETSTATUS _IO(AXIDMA_IOC_MAGIC, 2) +#define AXIDMA_IOCRELEASECHN _IO(AXIDMA_IOC_MAGIC, 3) + +#define AXI_DMA_MAX_CHANS 20 + +#define DMA_CHN_UNUSED 0 +#define DMA_CHN_USED 1 +#define DMA_STATUS_UNFINISHED 0 +#define DMA_STATUS_FINISHED 1 + +/* for DEBUG*/ +//#define DW_DMA_CHECK_RESULTS +//#define DW_DMA_PRINT_MEM +//#define DW_DMA_FLUSH_DESC + +struct axidma_chncfg { + unsigned long src_addr; /*dma addr*/ + unsigned long dst_addr; /*dma addr*/ + unsigned long virt_src; /*mmap src addr*/ + unsigned long virt_dst; /*mmap dst addr*/ + unsigned long phys; /*desc phys addr*/ + unsigned int len; /*transport lenth*/ + int mem_fd; /*fd*/ + unsigned char chn_num; /*dma channels number*/ + unsigned char status; /*dma transport status*/ +}; + +struct axidma_chns { + struct dma_chan *dma_chan; + unsigned char used; + unsigned char status; + unsigned char reserve[2]; +}; + +struct axidma_chns channels[AXI_DMA_MAX_CHANS]; +#ifdef DW_DMA_PRINT_MEM +void print_in_line_u64(u8 *p_name, u64 *p_buf, u32 len) +{ + u32 i, j; + u32 line; + u32* ptmp; + u32 len_tmp; + u32 rest = len / 4; + + printk("%s: 0x%#llx, 0x%x\n", + p_name, dw_virt_to_phys((void *)p_buf), len); + + if(len >= 0x1000) + len_tmp = 0x1000 / 32; //print 128 size of memory. + else + len_tmp = len / 8; //print real 100% size of memory. + + rest = len / 4; //one line print 8 u32 + + for (i = 0; i < len_tmp; i += 4, rest -= line) { + if (!(i % 4)) + printk(KERN_CONT KERN_INFO" %#llx: ", + dw_virt_to_phys((void *)(p_buf + i))); + + ptmp = (u32*)(p_buf + i); + line = (rest > 8) ? 8 : rest; + + for (j = 0; j < line; j++) + printk(KERN_CONT KERN_INFO "%08x ", *(ptmp + j)); + + printk(KERN_CONT KERN_INFO"\n"); + } +} +#endif + +static int axidma_open(struct inode *inode, struct file *file) +{ + /*Open: do nothing*/ + return 0; +} + +static int axidma_release(struct inode *inode, struct file *file) +{ + /* Release: do nothing */ + return 0; +} + +static ssize_t axidma_write(struct file *file, const char __user *data, + size_t len, loff_t *ppos) +{ + /* Write: do nothing */ + return 0; +} + +static void dma_complete_func(void *status) +{ + *(char *)status = DMA_STATUS_FINISHED; +} + +static long axidma_unlocked_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + int i, ret; + dma_cap_mask_t mask; + dma_cookie_t cookie; + struct dma_device *dma_dev; + struct axidma_chncfg chncfg; + struct dma_async_tx_descriptor *tx; + +#ifdef DW_DMA_FLUSH_DESC + void *des_chncfg = &chncfg; + chncfg.phys = dw_virt_to_phys(des_chncfg); +#endif + memset(&chncfg, 0, sizeof(struct axidma_chncfg)); + + switch(cmd) { + case AXIDMA_IOCGETCHN: + for(i = 0; i < AXI_DMA_MAX_CHANS; i++) { + if(DMA_CHN_UNUSED == channels[i].used) + break; + } + if(AXI_DMA_MAX_CHANS == i) { + printk("Get dma chn failed, because no idle channel\n"); + goto error; + } else { + channels[i].used = DMA_CHN_USED; + channels[i].status = DMA_STATUS_UNFINISHED; + chncfg.status = DMA_STATUS_UNFINISHED; + chncfg.chn_num = i; + } + dma_cap_zero(mask); + dma_cap_set(DMA_MEMCPY, mask); + channels[i].dma_chan = dma_request_channel(mask, NULL, NULL); + if(!channels[i].dma_chan) { + printk("dma request channel failed\n"); + channels[i].used = DMA_CHN_UNUSED; + goto error; + } + ret = copy_to_user((void __user *)arg, &chncfg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy to user failed\n"); + goto error; + } + break; + case AXIDMA_IOCCFGANDSTART: +#ifdef DW_DMA_CHECK_RESULTS + void *src,*dst; +#endif + ret = copy_from_user(&chncfg, (void __user *)arg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy from user failed\n"); + goto error; + } + + if((chncfg.chn_num >= AXI_DMA_MAX_CHANS) || + (!channels[chncfg.chn_num].dma_chan)) { + printk("chn_num[%d] is invalid\n", chncfg.chn_num); + goto error; + } + dma_dev = channels[chncfg.chn_num].dma_chan->device; +#ifdef DW_DMA_FLUSH_DESC + starfive_flush_dcache(chncfg.phys,sizeof(chncfg)); +#endif +#ifdef DW_DMA_CHECK_RESULTS + src = dw_phys_to_virt(chncfg.src_addr); + dst = dw_phys_to_virt(chncfg.dst_addr); +#endif + starfive_flush_dcache(chncfg.src_addr, chncfg.len); + + tx = dma_dev->device_prep_dma_memcpy( + channels[chncfg.chn_num].dma_chan, + chncfg.dst_addr, chncfg.src_addr, chncfg.len, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if(!tx){ + printk("Failed to prepare DMA memcpy\n"); + goto error; + } + channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED; + tx->callback_param = &channels[chncfg.chn_num].status; + tx->callback = dma_complete_func; + cookie = tx->tx_submit(tx); + if(dma_submit_error(cookie)) { + printk("Failed to dma tx_submit\n"); + goto error; + } + dma_async_issue_pending(channels[chncfg.chn_num].dma_chan); + /*flush dcache*/ + starfive_flush_dcache(chncfg.dst_addr, chncfg.len); +#ifdef DW_DMA_PRINT_MEM + print_in_line_u64((u8 *)"src", (u64 *)src, chncfg.len); + print_in_line_u64((u8 *)"dst", (u64 *)dst, chncfg.len); +#endif +#ifdef DW_DMA_CHECK_RESULTS + if(memcmp(src, dst, chncfg.len)) + printk("check data faild.\n"); + else + printk("check data ok.\n"); +#endif + break; + + case AXIDMA_IOCGETSTATUS: + ret = copy_from_user(&chncfg, (void __user *)arg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy from user failed\n"); + goto error; + } + + if(chncfg.chn_num >= AXI_DMA_MAX_CHANS) { + printk("chn_num[%d] is invalid\n", chncfg.chn_num); + goto error; + } + + chncfg.status = channels[chncfg.chn_num].status; + + ret = copy_to_user((void __user *)arg, &chncfg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy to user failed\n"); + goto error; + } + break; + + case AXIDMA_IOCRELEASECHN: + ret = copy_from_user(&chncfg, (void __user *)arg, + sizeof(struct axidma_chncfg)); + if(ret) { + printk("Copy from user failed\n"); + goto error; + } + + if((chncfg.chn_num >= AXI_DMA_MAX_CHANS) || + (!channels[chncfg.chn_num].dma_chan)) { + printk("chn_num[%d] is invalid\n", chncfg.chn_num); + goto error; + } + + dma_release_channel(channels[chncfg.chn_num].dma_chan); + channels[chncfg.chn_num].used = DMA_CHN_UNUSED; + channels[chncfg.chn_num].status = DMA_STATUS_UNFINISHED; + break; + + default: + printk("Don't support cmd [%d]\n", cmd); + break; + } + return 0; + +error: + return -EFAULT; +} + +/* + * Kernel Interfaces + */ +static struct file_operations axidma_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = axidma_write, + .unlocked_ioctl = axidma_unlocked_ioctl, + .open = axidma_open, + .release = axidma_release, +}; + +static struct miscdevice axidma_miscdev = { + .minor = MISC_DYNAMIC_MINOR, + .name = DRIVER_NAME, + .fops = &axidma_fops, +}; + +static int __init axidma_init(void) +{ + int ret = misc_register(&axidma_miscdev); + if(ret) { + printk (KERN_ERR "cannot register miscdev (err=%d)\n", ret); + return ret; + } + + memset(&channels, 0, sizeof(channels)); + + return 0; +} + +static void __exit axidma_exit(void) +{ + misc_deregister(&axidma_miscdev); +} + +module_init(axidma_init); +module_exit(axidma_exit); + +MODULE_AUTHOR("samin.guo"); +MODULE_DESCRIPTION("DW Axi Dmac Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c b/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c new file mode 100644 index 00000000000000..aee72c10d77fa7 --- /dev/null +++ b/drivers/dma/dw-axi-dmac-starfive/starfive_dmaengine_memcpy.c @@ -0,0 +1,287 @@ +/* + * Copyright 2020 StarFive, Inc + * + * API for dma mem2mem. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +static volatile int dma_finished = 0; +static DECLARE_WAIT_QUEUE_HEAD(wq); + +u64 dw_virt_to_phys(void *vaddr) +{ + u64 pfn_offset = ((u64)vaddr) & 0xfff; + + return _dw_virt_to_phys((u64 *)vaddr) + pfn_offset; +} +EXPORT_SYMBOL(dw_virt_to_phys); + +void *dw_phys_to_virt(u64 phys) +{ + u64 pfn_offset = phys & 0xfff; + + return (void *)(_dw_phys_to_virt(phys) + pfn_offset); +} +EXPORT_SYMBOL(dw_phys_to_virt); + +static void tx_callback(void *dma_async_param) +{ + dma_finished = 1; + wake_up_interruptible(&wq); +} + +static int _dma_async_alloc_buf(struct device *dma_dev, + void **src, void **dst, size_t size, + dma_addr_t *src_dma, dma_addr_t *dst_dma) +{ + *src = dma_alloc_coherent(dma_dev, size, src_dma, GFP_KERNEL); + if(!(*src)) { + DMA_DEBUG("src alloc err.\n"); + goto _FAILED_ALLOC_SRC; + } + + *dst = dma_alloc_coherent(dma_dev, size, dst_dma, GFP_KERNEL); + if(!(*dst)) { + DMA_DEBUG("dst alloc err.\n"); + goto _FAILED_ALLOC_DST; + } + + return 0; + +_FAILED_ALLOC_DST: + dma_free_coherent(dma_dev, size, *src, *src_dma); + +_FAILED_ALLOC_SRC: + dma_free_coherent(dma_dev, size, *dst, *dst_dma); + + return -1; +} + +static int _dma_async_prebuf(void *src, void *dst, size_t size) +{ + memset((u8 *)src, 0xff, size); + memset((u8 *)dst, 0x00, size); + return 0; +} + +static int _dma_async_check_data(void *src, void *dst, size_t size) +{ + return memcmp(src, dst, size); +} + +static void _dma_async_release(struct dma_chan *chan) +{ + dma_release_channel(chan); +} + +static struct dma_chan *_dma_get_channel(enum dma_transaction_type tx_type) +{ + dma_cap_mask_t dma_mask; + + dma_cap_zero(dma_mask); + dma_cap_set(tx_type, dma_mask); + + return dma_request_channel(dma_mask, NULL, NULL); +} + +static struct dma_async_tx_descriptor *_dma_async_get_desc( + struct dma_chan *chan, + dma_addr_t src_dma, dma_addr_t dst_dma, + size_t size) +{ + dma_finished = 0; + return dmaengine_prep_dma_memcpy(chan, dst_dma, src_dma, size, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +} + +static void _dma_async_do_start(struct dma_async_tx_descriptor *desc, + struct dma_chan *chan) +{ + dma_cookie_t dma_cookie = dmaengine_submit(desc); + if (dma_submit_error(dma_cookie)) + DMA_DEBUG("Failed to do DMA tx_submit\n"); + + dma_async_issue_pending(chan); + wait_event_interruptible(wq, dma_finished); +} + +int dw_dma_async_do_memcpy(void *src, void *dst, size_t size) +{ + int ret; + struct device *dma_dev; + struct dma_chan *chan; + dma_addr_t src_dma, dst_dma; + struct dma_async_tx_descriptor *desc; + + const struct iommu_ops *iommu; + u64 dma_addr = 0, dma_size = 0; + + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL); + if(!dma_dev){ + dev_err(dma_dev, "kmalloc error.\n"); + return -ENOMEM; + } + + dma_dev->bus = NULL; + dma_dev->coherent_dma_mask = 0xffffffff; + + iort_dma_setup(dma_dev, &dma_addr, &dma_size); + iommu = iort_iommu_configure_id(dma_dev, NULL); + if (PTR_ERR(iommu) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + arch_setup_dma_ops(dma_dev, dst_dma, dma_size, iommu, true); + + if(_dma_async_alloc_buf(dma_dev, &src, &dst, size, &src_dma, &dst_dma)) { + dev_err(dma_dev, "Err alloc.\n"); + return -ENOMEM; + } + + DMA_DEBUG("src=%#llx, dst=%#llx\n", (u64)src, (u64)dst); + DMA_DEBUG("dma_src=%#x dma_dst=%#x\n", (u32)src_dma, (u32)dst_dma); + + _dma_async_prebuf(src, dst, size); + + chan = _dma_get_channel(DMA_MEMCPY); + if(!chan ){ + DMA_PRINTK("Err get chan.\n"); + return -EBUSY; + } + DMA_DEBUG("get chan ok.\n"); + + desc = _dma_async_get_desc(chan, src_dma, dst_dma, size); + if(!desc){ + DMA_PRINTK("Err get desc.\n"); + dma_release_channel(chan); + return -ENOMEM; + } + DMA_DEBUG("get desc ok.\n"); + + desc->callback = tx_callback; + + starfive_flush_dcache(src_dma, size); + starfive_flush_dcache(dst_dma, size); + + _dma_async_do_start(desc, chan); + _dma_async_release(chan); + + ret = _dma_async_check_data(src, dst, size); + + dma_free_coherent(dma_dev, size, src, src_dma); + dma_free_coherent(dma_dev, size, dst, dst_dma); + + return ret; +} +EXPORT_SYMBOL(dw_dma_async_do_memcpy); + +/* +* phys addr for dma. +*/ +int dw_dma_memcpy_raw(dma_addr_t src_dma, dma_addr_t dst_dma, size_t size) +{ + struct dma_chan *chan; + struct device *dma_dev; + struct dma_async_tx_descriptor *desc; + + const struct iommu_ops *iommu; + u64 dma_addr = 0, dma_size = 0; + + dma_dev = kzalloc(sizeof(*dma_dev), GFP_KERNEL); + if(!dma_dev){ + DMA_PRINTK("kmalloc error.\n"); + return -ENOMEM; + } + + dma_dev->bus = NULL; + dma_dev->coherent_dma_mask = 0xffffffff; + + iort_dma_setup(dma_dev, &dma_addr, &dma_size); + iommu = iort_iommu_configure_id(dma_dev, NULL); + if (PTR_ERR(iommu) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + arch_setup_dma_ops(dma_dev, dst_dma, dma_size, iommu, true); + + chan = _dma_get_channel(DMA_MEMCPY); + if(!chan){ + DMA_PRINTK("Error get chan.\n"); + return -EBUSY; + } + DMA_DEBUG("get chan ok.\n"); + + DMA_DEBUG("src_dma=%#llx, dst_dma=%#llx \n", src_dma, dst_dma); + desc = _dma_async_get_desc(chan, src_dma, dst_dma, size); + if(!desc){ + DMA_PRINTK("Error get desc.\n"); + dma_release_channel(chan); + return -ENOMEM; + } + DMA_DEBUG("get desc ok.\n"); + + desc->callback = tx_callback; + + starfive_flush_dcache(src_dma, size); + starfive_flush_dcache(dst_dma, size); + + _dma_async_do_start(desc, chan); + _dma_async_release(chan); + + return 0; +} +EXPORT_SYMBOL(dw_dma_memcpy_raw); + +/* +*virtl addr for cpu. +*/ +int dw_dma_memcpy(void *src, void *dst, size_t size) +{ + dma_addr_t src_dma, dst_dma; + + src_dma = dw_virt_to_phys(src); + dst_dma = dw_virt_to_phys(dst); + + dw_dma_memcpy_raw(src_dma, dst_dma, size); + return 0; +} +EXPORT_SYMBOL(dw_dma_memcpy); + +int dw_dma_mem2mem_test(void) +{ + int ret; + void *src = NULL; + void *dst = NULL; + size_t size = 256; + + ret = dw_dma_async_do_memcpy(src, dst, size); + if(ret){ + DMA_PRINTK("memcpy failed.\n"); + } else { + DMA_PRINTK("memcpy ok.\n"); + } + + return ret; +} diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index d9e4ac3edb4ea1..fd1939eb821ec8 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -32,6 +32,8 @@ #include "../dmaengine.h" #include "../virt-dma.h" +#include + /* * The set of bus widths supported by the DMA controller. DW AXI DMAC supports * master data bus width up to 512 bits (for both AXI master interfaces), but @@ -148,24 +150,43 @@ static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan) return axi_chan_ioread32(chan, CH_INTSTATUS); } +static inline bool axi_chan_get_nr8(struct axi_dma_chan *chan) +{ + return chan->chip->flag->nr_chan_8; +} + static inline void axi_chan_disable(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); - val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if(axi_chan_get_nr8(chan)) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN_8); + val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT_8); + val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT_8; + axi_dma_iowrite32(chan->chip, DMAC_CHEN_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); + val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT); + val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + } } static inline void axi_chan_enable(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | - BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if(axi_chan_get_nr8(chan)) { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN_8); + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT_8 | + BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT_8; + axi_dma_iowrite32(chan->chip, DMAC_CHEN_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHEN); + val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT | + BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + } } static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan) @@ -335,6 +356,7 @@ static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set) static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, struct axi_dma_desc *first) { + struct axi_dma_desc *desc; u32 priority = chan->chip->dw->hdata->priority[chan->id]; u32 reg, irq_mask; u8 lms = 0; /* Select AXI0 master for LLI fetching */ @@ -384,6 +406,23 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; axi_chan_irq_set(chan, irq_mask); + /*flush all the desc */ +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + if(chan->chip->flag->need_flush) { + /*flush fisrt desc*/ + starfive_flush_dcache(first->vd.tx.phys, sizeof(*first)); + + list_for_each_entry(desc, &first->xfer_list, xfer_list) { + starfive_flush_dcache(desc->vd.tx.phys, sizeof(*desc)); + + dev_dbg(chan->chip->dev, + "sar:%#llx dar:%#llx llp:%#llx ctl:0x%x:%08x\n", + desc->lli.sar, desc->lli.dar, desc->lli.llp, + desc->lli.ctl_hi, desc->lli.ctl_lo); + } + } +#endif + axi_chan_enable(chan); } @@ -1070,8 +1109,10 @@ static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id) if (status & DWAXIDMAC_IRQ_ALL_ERR) axi_chan_handle_err(chan, status); - else if (status & DWAXIDMAC_IRQ_DMA_TRF) + else if (status & DWAXIDMAC_IRQ_DMA_TRF) { axi_chan_block_xfer_complete(chan); + dev_dbg(chip->dev, "axi_chan_block_xfer_complete.\n"); + } } /* Re-enable interrupts */ @@ -1126,10 +1167,17 @@ static int dma_chan_pause(struct dma_chan *dchan) spin_lock_irqsave(&chan->vc.lock, flags); - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | - BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); + if(axi_chan_get_nr8(chan)){ + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP_8); + val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT_8 | + BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT_8; + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP); + val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT | + BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT; + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP, val); + } do { if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED) @@ -1152,11 +1200,17 @@ static inline void axi_chan_resume(struct axi_dma_chan *chan) { u32 val; - val = axi_dma_ioread32(chan->chip, DMAC_CHEN); - val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); - val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); - axi_dma_iowrite32(chan->chip, DMAC_CHEN, val); - + if(axi_chan_get_nr8(chan)){ + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP_8); + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT_8); + val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT_8); + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP_8, val); + } else { + val = axi_dma_ioread32(chan->chip, DMAC_CHSUSP); + val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT); + val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT); + axi_dma_iowrite32(chan->chip, DMAC_CHSUSP, val); + } chan->is_paused = false; } @@ -1248,6 +1302,13 @@ static int parse_device_properties(struct axi_dma_chip *chip) chip->dw->hdata->nr_channels = tmp; + if(chip->dw->hdata->nr_channels > 8){ + chip->flag->nr_chan_8 = true; +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + chip->flag->need_flush = true; +#endif + } + ret = device_property_read_u32(dev, "snps,dma-masters", &tmp); if (ret) return ret; @@ -1309,6 +1370,7 @@ static int dw_probe(struct platform_device *pdev) struct resource *mem; struct dw_axi_dma *dw; struct dw_axi_dma_hcfg *hdata; + struct dw_dma_flag *flag; u32 i; int ret; @@ -1324,9 +1386,14 @@ static int dw_probe(struct platform_device *pdev) if (!hdata) return -ENOMEM; + flag = devm_kzalloc(&pdev->dev, sizeof(*flag), GFP_KERNEL); + if (!flag) + return -ENOMEM; + chip->dw = dw; chip->dev = &pdev->dev; chip->dw->hdata = hdata; + chip->flag = flag; chip->irq = platform_get_irq(pdev, 0); if (chip->irq < 0) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h index b69897887c7654..0e454a926a82c8 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h @@ -5,6 +5,8 @@ * Synopsys DesignWare AXI DMA Controller driver. * * Author: Eugeniy Paltsev + * Samin.guo + * add support for (channels > 8). 2020. */ #ifndef _AXI_DMA_PLATFORM_H @@ -18,10 +20,17 @@ #include "../virt-dma.h" -#define DMAC_MAX_CHANNELS 8 +#define DMAC_MAX_CHANNELS 16 #define DMAC_MAX_MASTERS 2 #define DMAC_MAX_BLK_SIZE 0x200000 +struct dw_dma_flag { + bool nr_chan_8; +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + bool need_flush; +#endif +}; + struct dw_axi_dma_hcfg { u32 nr_channels; u32 nr_masters; @@ -68,6 +77,7 @@ struct axi_dma_chip { struct clk *core_clk; struct clk *cfgr_clk; struct dw_axi_dma *dw; + struct dw_dma_flag *flag; }; /* LLI == Linked List Item */ @@ -139,6 +149,15 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */ #define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */ #define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */ +#define DMAC_CHSUSP 0x018 /* R/W DMAC Channel suspend */ +#define DMAC_CHABORT 0x018 /* R/W DMAC Channel Abort */ + +#define DMAC_CHEN_8 0x018 /* R/W DMAC Channel Enable */ +#define DMAC_CHEN_L_8 0x018 /* R/W DMAC Channel Enable */ +#define DMAC_CHEN_H_8 0x01C /* R/W DMAC Channel Enable */ +#define DMAC_CHSUSP_8 0x020 /* R/W DMAC Channel Suspend */ +#define DMAC_CHABORT_8 0x028 /* R/W DMAC Channel Abort */ + #define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */ #define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */ #define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */ @@ -199,6 +218,19 @@ static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan) #define DMAC_CHAN_SUSP_SHIFT 16 #define DMAC_CHAN_SUSP_WE_SHIFT 24 +#define DMAC_CHAN_ABORT_SHIFT 32 +#define DMAC_CHAN_ABORT_WE_SHIFT 40 + + +#define DMAC_CHAN_EN_SHIFT_8 0 +#define DMAC_CHAN_EN_WE_SHIFT_8 16 + +#define DMAC_CHAN_SUSP_SHIFT_8 0 +#define DMAC_CHAN_SUSP_WE_SHIFT_8 16 + +#define DMAC_CHAN_ABORT_SHIFT_8 0 +#define DMAC_CHAN_ABORT_WE_SHIFT_8 16 + /* CH_CTL_H */ #define CH_CTL_H_ARLEN_EN BIT(6) #define CH_CTL_H_ARLEN_POS 7 @@ -255,7 +287,7 @@ enum { #define CH_CTL_L_SRC_MAST BIT(0) /* CH_CFG_H */ -#define CH_CFG_H_PRIORITY_POS 17 +#define CH_CFG_H_PRIORITY_POS 15 #define CH_CFG_H_HS_SEL_DST_POS 4 #define CH_CFG_H_HS_SEL_SRC_POS 3 enum { From 62a62eb36f6a26c3ca221a6c83bace8e7e534921 Mon Sep 17 00:00:00 2001 From: Michael Scott Date: Fri, 30 Apr 2021 18:06:40 -0700 Subject: [PATCH 37/62] drivers/dma: Fix VIC7100 dw-axi-dmac-platform driver addition Descriptor management was simplified with commit: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ef6fb2d6f1abd56cc067c694253ea362159b5ac3 Code added to dw-axi-dmac-platform driver due to VIC7100 Cache Coherency issues needed follow those changes. Signed-off-by: Michael Scott --- .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index fd1939eb821ec8..76a6585088463d 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -356,7 +356,6 @@ static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set) static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, struct axi_dma_desc *first) { - struct axi_dma_desc *desc; u32 priority = chan->chip->dw->hdata->priority[chan->id]; u32 reg, irq_mask; u8 lms = 0; /* Select AXI0 master for LLI fetching */ @@ -406,19 +405,23 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan, irq_mask |= DWAXIDMAC_IRQ_SUSPENDED; axi_chan_irq_set(chan, irq_mask); - /*flush all the desc */ + /* flush all the desc */ #ifdef CONFIG_SOC_STARFIVE_VIC7100 if(chan->chip->flag->need_flush) { - /*flush fisrt desc*/ - starfive_flush_dcache(first->vd.tx.phys, sizeof(*first)); + int count = atomic_read(&chan->descs_allocated); + int i; - list_for_each_entry(desc, &first->xfer_list, xfer_list) { - starfive_flush_dcache(desc->vd.tx.phys, sizeof(*desc)); + for (i = 0; i < count; i++) { + starfive_flush_dcache(first->hw_desc[i].llp, + sizeof(*first->hw_desc[i].lli)); dev_dbg(chan->chip->dev, "sar:%#llx dar:%#llx llp:%#llx ctl:0x%x:%08x\n", - desc->lli.sar, desc->lli.dar, desc->lli.llp, - desc->lli.ctl_hi, desc->lli.ctl_lo); + first->hw_desc[i].lli->sar, + first->hw_desc[i].lli->dar, + first->hw_desc[i].lli->llp, + first->hw_desc[i].lli->ctl_hi, + first->hw_desc[i].lli->ctl_lo); } } #endif From 2eab70491f515aa085148be84ef37594fd43806f Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 03:25:24 +0800 Subject: [PATCH 38/62] drivers/i2c: Improve Synopsys DesignWare I2C adapter driver for StarFive VIC7100 --- drivers/i2c/busses/i2c-designware-platdrv.c | 27 +++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 4b37f28ec0c6c1..904694a19e5124 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -39,6 +39,21 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) return clk_get_rate(dev->clk)/1000; } +#ifdef CONFIG_SOC_STARFIVE_VIC7100 +static u32 starfive_i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) +{ + u32 val; + + if(!device_property_read_u32(dev->dev, "clocks", &val)) { + dev_info(dev->dev, "Using 'clocks' : %u / 1000", val); + return (val / 1000); + } else { + dev_info(dev->dev, "Using the static setting value: 49500"); + return 49500; + } +} +#endif + #ifdef CONFIG_ACPI static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, @@ -271,6 +286,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) if (!dev->sda_hold_time && t->sda_hold_ns) dev->sda_hold_time = div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + } else { + u64 clk_khz; + + dev->get_clk_rate_khz = starfive_i2c_dw_get_clk_rate_khz; + clk_khz = dev->get_clk_rate_khz(dev); + + if (!dev->sda_hold_time && t->sda_hold_ns) + dev->sda_hold_time = + div_u64(clk_khz * t->sda_hold_ns + 500000, + 1000000); +#endif } adap = &dev->adapter; From 0914fe919ff311365d78e6e0f798a0f446a78c22 Mon Sep 17 00:00:00 2001 From: Tom Date: Sat, 13 Mar 2021 15:22:38 +0800 Subject: [PATCH 39/62] drivers/i2c: Add GPIO configuration for VIC7100. [FIXME] why we can not do it in U-boot? [geert: Rebase to v5.13-rc1] --- drivers/i2c/busses/i2c-designware-core.h | 2 + drivers/i2c/busses/i2c-designware-master.c | 44 +++++++++++++++++++++ drivers/i2c/busses/i2c-designware-platdrv.c | 5 +++ 3 files changed, 51 insertions(+) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 6a53f75abf7c79..c8e2c59bc29d50 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -286,6 +286,8 @@ struct dw_i2c_dev { int (*init)(struct dw_i2c_dev *dev); int (*set_sda_hold_time)(struct dw_i2c_dev *dev); int mode; + int scl_gpio; + int sda_gpio; struct i2c_bus_recovery_info rinfo; bool suspended; }; diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 9b08bb5df38d29..85d2cddfcdf6c8 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -20,6 +20,7 @@ #include #include #include +#include #include "i2c-designware-core.h" @@ -164,6 +165,48 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev) return 0; } +static void i2c_dw_configure_gpio(struct dw_i2c_dev *dev) +{ +#ifdef CONFIG_SOC_STARFIVE_VIC7100_I2C_GPIO + if((dev->scl_gpio > 0) && (dev->sda_gpio > 0)) { + SET_GPIO_dout_LOW(dev->scl_gpio); + SET_GPIO_dout_LOW(dev->sda_gpio); + SET_GPIO_doen_reverse_(dev->scl_gpio,1); + SET_GPIO_doen_reverse_(dev->sda_gpio,1); + switch(dev->adapter.nr) { + case 0: + SET_GPIO_doen_i2c0_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c0_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c0_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c0_pad_sda_in(dev->sda_gpio); + break; + case 1: + SET_GPIO_doen_i2c1_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c1_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c1_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c1_pad_sda_in(dev->sda_gpio); + break; + case 2: + SET_GPIO_doen_i2c2_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c2_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c2_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c2_pad_sda_in(dev->sda_gpio); + break; + case 3: + SET_GPIO_doen_i2c3_pad_sck_oe(dev->scl_gpio); + SET_GPIO_doen_i2c3_pad_sda_oe(dev->sda_gpio); + SET_GPIO_i2c3_pad_sck_in(dev->scl_gpio); + SET_GPIO_i2c3_pad_sda_in(dev->sda_gpio); + break; + default: + dev_err(dev->dev, "i2c adapter number is invalid\n"); + } + } else + dev_err(dev->dev, "scl/sda gpio number is invalid !\n"); +#endif + return; +} + /** * i2c_dw_init_master() - Initialize the designware I2C master hardware * @dev: device private data @@ -927,6 +970,7 @@ int i2c_dw_probe_master(struct dw_i2c_dev *dev) dev_err(dev->dev, "failure adding adapter: %d\n", ret); pm_runtime_put_noidle(dev->dev); + i2c_dw_configure_gpio(dev); return ret; } EXPORT_SYMBOL_GPL(i2c_dw_probe_master); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 904694a19e5124..1388e79bd9ef73 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include "i2c-designware-core.h" @@ -220,6 +222,7 @@ static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = { static int dw_i2c_plat_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct i2c_adapter *adap; struct dw_i2c_dev *dev; struct i2c_timings *t; @@ -236,6 +239,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) dev->flags = (uintptr_t)device_get_match_data(&pdev->dev); dev->dev = &pdev->dev; dev->irq = irq; + dev->scl_gpio = of_get_named_gpio(np, "scl-gpio", 0); + dev->sda_gpio = of_get_named_gpio(np, "sda-gpio", 0); platform_set_drvdata(pdev, dev); ret = dw_i2c_plat_request_regs(dev); From f4d89844d4f265f309eae6c0331b4a4ddc6aa951 Mon Sep 17 00:00:00 2001 From: Matteo Croce Date: Fri, 21 May 2021 03:26:38 +0200 Subject: [PATCH 40/62] net: stmmac: use GFP_DMA32 Signed-off-by: Matteo Croce --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c87202cbd3d6d3..bba94e8f2a08a0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1431,14 +1431,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->page) return -ENOMEM; buf->page_offset = stmmac_rx_offset(priv); } if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->sec_page) return -ENOMEM; @@ -4452,13 +4454,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) p = rx_q->dma_rx + entry; if (!buf->page) { - buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->page) break; } if (priv->sph && !buf->sec_page) { - buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); + buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, + GFP_ATOMIC | __GFP_NOWARN | GFP_DMA32); if (!buf->sec_page) break; From d4c4044c08134dca8e5eaaeb6d3faf97dc453b6d Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 04:01:19 +0800 Subject: [PATCH 41/62] net: stmmac: Add dcache flush functions for JH7100 Note: including uSDK v0.9->v1.0 patch [geert: Rebase to v5.13-rc1] Warnings fixed by Matteo. Signed-off-by: Matteo Croce --- drivers/net/ethernet/stmicro/stmmac/descs.h | 3 + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 239 +++++++++++++++++- 2 files changed, 238 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 49d6a866244f47..1bf6506ff77883 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -169,6 +169,9 @@ struct dma_extended_desc { __le32 des5; /* Reserved */ __le32 des6; /* Tx/Rx Timestamp Low */ __le32 des7; /* Tx/Rx Timestamp High */ +#if defined(CONFIG_FPGA_GMAC_FLUSH_DDR) + __le32 pad[8]; +#endif }; /* Enhanced descriptor for TBS */ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index bba94e8f2a08a0..f5af8a8de7356b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -166,6 +166,20 @@ int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled) } EXPORT_SYMBOL_GPL(stmmac_bus_clks_config); +#ifdef CONFIG_FPGA_GMAC_FLUSH_DDR +#define FLUSH_RX_DESC_ENABLE +#define FLUSH_RX_BUF_ENABLE + +#define FLUSH_TX_DESC_ENABLE +#define FLUSH_TX_BUF_ENABLE + +#include +static inline void stmmac_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + start % 64); +} +#endif + /** * stmmac_verify_args - verify the driver parameters. * Description: it checks the driver parameters and set a default in case of @@ -1363,6 +1377,19 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue) priv->use_riwt, priv->mode, (i == priv->dma_rx_size - 1), priv->dma_buf_sz); + +#ifdef FLUSH_RX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_RX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_RX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(rx_q->dma_rx_phy, len); + } +#endif } /** @@ -1391,6 +1418,19 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue) stmmac_init_tx_desc(priv, p, priv->mode, last); } + +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(tx_q->dma_tx_phy, len); + } +#endif } /** @@ -1454,6 +1494,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; stmmac_set_desc_addr(priv, p, buf->addr); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->addr, priv->dma_buf_sz); +#endif if (priv->dma_buf_sz == BUF_SIZE_16KiB) stmmac_init_desc3(priv, p); @@ -1780,6 +1823,18 @@ static int __init_dma_tx_desc_rings(struct stmmac_priv *priv, u32 queue) tx_q->tx_skbuff_dma[i].last_segment = false; tx_q->tx_skbuff[i] = NULL; } +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(tx_q->dma_tx_phy, len); + } +#endif tx_q->dirty_tx = 0; tx_q->cur_tx = 0; @@ -2494,8 +2549,22 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) status = stmmac_tx_status(priv, &priv->dev->stats, &priv->xstats, p, priv->ioaddr); /* Check if the descriptor is owned by the DMA */ - if (unlikely(status & tx_dma_own)) + if (unlikely(status & tx_dma_own)) { +#ifdef FLUSH_TX_DESC_ENABLE + unsigned long start, len; + + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif break; + } count++; @@ -2564,6 +2633,22 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) } stmmac_release_tx_desc(priv, p, priv->mode); +#ifdef FLUSH_TX_DESC_ENABLE + { + /* wangyh for test,flush description */ + unsigned long start, len; + + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); + } +#endif entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); } @@ -2637,6 +2722,19 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan) stmmac_stop_tx_dma(priv, chan); dma_free_tx_skbufs(priv, chan); stmmac_clear_tx_descriptors(priv, chan); + +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long len; + + if (priv->extend_desc) + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_extended_desc); + else + len = DMA_DEFAULT_TX_SIZE * sizeof(struct dma_desc); + + stmmac_flush_dcache(tx_q->dma_tx_phy, len); + } +#endif tx_q->dirty_tx = 0; tx_q->cur_tx = 0; tx_q->mss = 0; @@ -3882,6 +3980,21 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des, (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), 0, 0); +#ifdef FLUSH_TX_DESC_ENABLE + { + unsigned long start, len; + + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + tx_q->cur_tx * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + tx_q->cur_tx * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); + } +#endif tmp_len -= TSO_MAX_BUFF_SIZE; } } @@ -3949,6 +4062,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) u32 pay_len, mss; dma_addr_t des; int i; +#ifdef FLUSH_TX_DESC_ENABLE + unsigned int mss_entry; + unsigned long start, len; +#endif tx_q = &priv->tx_queue[queue]; first_tx = tx_q->cur_tx; @@ -3988,6 +4105,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; stmmac_set_mss(priv, mss_desc, mss); +#ifdef FLUSH_TX_DESC_ENABLE + mss_entry = tx_q->cur_tx; +#endif tx_q->mss = mss; tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); @@ -4022,6 +4142,10 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, skb_headlen(skb)); +#endif + tx_q->tx_skbuff_dma[first_entry].buf = des; tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); tx_q->tx_skbuff_dma[first_entry].map_as_page = false; @@ -4055,6 +4179,9 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, skb_frag_size(frag)); +#endif stmmac_tso_allocator(priv, des, skb_frag_size(frag), (i == nfrags - 1), queue); @@ -4102,7 +4229,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) * ndo_start_xmit will fill this descriptor the next time it's * called and stmmac_tx_clean may clean up to this descriptor. */ - tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", @@ -4133,6 +4259,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) 1, tx_q->tx_skbuff_dma[first_entry].last_segment, hdr / 4, (skb->len - proto_hdr_len)); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif /* If context desc is used to change MSS */ if (mss_desc) { /* Make sure that first descriptor has been completely @@ -4142,6 +4279,17 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) */ dma_wmb(); stmmac_set_tx_owner(priv, mss_desc); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + mss_entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + mss_entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif } if (netif_msg_pktdata(priv)) { @@ -4189,6 +4337,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) bool has_vlan, set_ic; int entry, first_tx; dma_addr_t des; +#ifdef FLUSH_TX_DESC_ENABLE + unsigned long start, len; +#endif tx_q = &priv->tx_queue[queue]; first_tx = tx_q->cur_tx; @@ -4268,6 +4419,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) if (dma_mapping_error(priv->device, des)) goto dma_map_err; /* should reuse desc w/o issues */ +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, len); +#endif tx_q->tx_skbuff_dma[entry].buf = des; stmmac_set_desc_addr(priv, desc, des); @@ -4280,6 +4434,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) /* Prepare the descriptor and set the own bit too */ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion, priv->mode, 1, last_segment, skb->len); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif } /* Only the last descriptor gets to point to the skb. */ @@ -4316,6 +4481,17 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) tx_q->tx_count_frames = 0; stmmac_set_tx_ic(priv, desc); +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif priv->xstats.tx_set_ic_bit++; } @@ -4395,6 +4571,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); +#ifdef FLUSH_TX_BUF_ENABLE + stmmac_flush_dcache(des, nopaged_len); +#endif + +#ifdef FLUSH_TX_DESC_ENABLE + if (priv->extend_desc) { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = tx_q->dma_tx_phy + first_entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif + stmmac_enable_dma_transmission(priv, priv->ioaddr); stmmac_flush_tx_descriptors(priv, queue); @@ -4488,9 +4680,25 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) if (!priv->use_riwt) use_rx_wd = false; - dma_wmb(); stmmac_set_rx_owner(priv, p, use_rx_wd); +#ifdef FLUSH_RX_DESC_ENABLE + { + unsigned long start, len; + + if (priv->extend_desc) { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); + } +#endif + dma_wmb(); + entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); } rx_q->dirty_rx = entry; @@ -5106,8 +5314,22 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) status = stmmac_rx_status(priv, &priv->dev->stats, &priv->xstats, p); /* check if managed by the DMA otherwise go ahead */ - if (unlikely(status & dma_own)) + if (unlikely(status & dma_own)) { +#ifdef FLUSH_RX_DESC_ENABLE + unsigned long start, len; + + if (priv->extend_desc) { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_extended_desc); + len = sizeof(struct dma_extended_desc); + } else { + start = rx_q->dma_rx_phy + entry * sizeof(struct dma_desc); + len = sizeof(struct dma_desc); + } + + stmmac_flush_dcache(start, len); +#endif break; + } rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, priv->dma_rx_size); @@ -5174,6 +5396,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->addr, buf1_len); +#endif xdp.data = page_address(buf->page) + buf->page_offset; xdp.data_end = xdp.data + buf1_len; @@ -5245,6 +5470,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) } else if (buf1_len) { dma_sync_single_for_cpu(priv->device, buf->addr, buf1_len, dma_dir); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->addr, buf1_len); +#endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->page, buf->page_offset, buf1_len, priv->dma_buf_sz); @@ -5257,6 +5485,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) if (buf2_len) { dma_sync_single_for_cpu(priv->device, buf->sec_addr, buf2_len, dma_dir); +#ifdef FLUSH_RX_BUF_ENABLE + stmmac_flush_dcache(buf->sec_addr, buf2_len); +#endif skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, buf->sec_page, 0, buf2_len, priv->dma_buf_sz); From 3511b5eee6723fa34a2e067f432fc367a2c9796d Mon Sep 17 00:00:00 2001 From: Tom Date: Tue, 6 Apr 2021 13:30:26 +0800 Subject: [PATCH 42/62] net: stmmac: Configure gtxclk based on speed --- .../ethernet/stmicro/stmmac/dwmac-generic.c | 47 +++++++++++++++++++ drivers/net/phy/micrel.c | 0 2 files changed, 47 insertions(+) mode change 100644 => 100755 drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c mode change 100644 => 100755 drivers/net/phy/micrel.c diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c old mode 100644 new mode 100755 index fbfda55b4c5263..8b6b1bfad35e64 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c @@ -16,6 +16,50 @@ #include "stmmac.h" #include "stmmac_platform.h" +/* + * GMAC_GTXCLK 为 gmac 的时钟分频寄存器,低8位为分频值 + * bit name access default descript + * [31] clk_gmac_gtxclk enable RW 0x0 "1:enable; 0:disable" + * [30] reserved - 0x0 reserved + * [29:8] reserved - 0x0 reserved + * [7:0] clk_gmac_gtxclk divide ratio RW 0x4 divide value + * + * gmac 的 root 时钟为500M, gtxclk 需求的时钟如下: + * 1000M: gtxclk为125M,分频值为500/125 = 0x4 + * 100M: gtxclk为25M, 分频值为500/25 = 0x14 + * 10M: gtxclk为2.5M,分频值为500/2.5 = 0xc8 + */ +#ifdef CONFIG_SOC_STARFIVE_VIC7100 +#define CLKGEN_BASE 0x11800000 +#define CLKGEN_GMAC_GTXCLK_OFFSET 0x1EC +#define CLKGEN_GMAC_GTXCLK_ADDR (CLKGEN_BASE + CLKGEN_GMAC_GTXCLK_OFFSET) + +#define CLKGEN_125M_DIV 0x4 +#define CLKGEN_25M_DIV 0x14 +#define CLKGEN_2_5M_DIV 0xc8 + +static void dwmac_fixed_speed(void *priv, unsigned int speed) +{ + u32 value; + void *addr = ioremap(CLKGEN_GMAC_GTXCLK_ADDR, sizeof(value)); + if (!addr) { + pr_err("%s can't remap CLKGEN_GMAC_GTXCLK_ADDR\n", __func__); + return; + } + + value = readl(addr) & (~0x000000FF); + + switch (speed) { + case SPEED_1000: value |= CLKGEN_125M_DIV; break; + case SPEED_100: value |= CLKGEN_25M_DIV; break; + case SPEED_10: value |= CLKGEN_2_5M_DIV; break; + default: iounmap(addr); return; + } + writel(value, addr); /*set gmac gtxclk*/ + iounmap(addr); +} +#endif + static int dwmac_generic_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -52,6 +96,9 @@ static int dwmac_generic_probe(struct platform_device *pdev) if (ret) goto err_remove_config_dt; } +#ifdef CONFIG_SOC_STARFIVE_VIC7100 + plat_dat->fix_mac_speed = dwmac_fixed_speed; +#endif ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c old mode 100644 new mode 100755 From f97d4ab347f409f76019a6116181ab01043b5ccb Mon Sep 17 00:00:00 2001 From: Tom Date: Wed, 6 Jan 2021 20:31:08 +0800 Subject: [PATCH 43/62] drivers/mmc/host/dw_mmc: Add dcache flush(VIC7100 ONLY). --- drivers/mmc/host/dw_mmc.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index d333130d15315a..360373de4e074a 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -145,6 +145,14 @@ static int dw_mci_req_show(struct seq_file *s, void *v) } DEFINE_SHOW_ATTRIBUTE(dw_mci_req); +#ifdef CONFIG_MMC_DW_FLUSH_DDR +#include +static inline void dw_mci_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + start % 64); +} +#endif + static int dw_mci_regs_show(struct seq_file *s, void *v) { struct dw_mci *host = s->private; @@ -691,6 +699,10 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host, /* Physical address to DMA to/from */ desc->des2 = cpu_to_le32(mem_addr); +#ifdef CONFIG_MMC_DW_FLUSH_DDR + dw_mci_flush_dcache((unsigned long)mem_addr, + (unsigned long)desc_len); +#endif /* Update physical address for the next desc */ mem_addr += desc_len; @@ -707,6 +719,10 @@ static inline int dw_mci_prepare_desc32(struct dw_mci *host, IDMAC_DES0_DIC)); desc_last->des0 |= cpu_to_le32(IDMAC_DES0_LD); +#ifdef CONFIG_MMC_DW_FLUSH_DDR + dw_mci_flush_dcache((unsigned long)(host->sg_dma), + (unsigned long)(sg_len * sizeof(struct idmac_desc))); +#endif return 0; err_own_bit: /* restore the descriptor chain as it's polluted */ From 920613f4f0ec29f6ac05a581b03e8e4b263fb20e Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 19:51:05 +0800 Subject: [PATCH 44/62] drivers/usb: Add dcache flush(VIC7100 ONLY) drivers/usb/cdns3/ drivers/usb/core/ drivers/usb/host/ include/linux/usb.h Geert: Rebase to v5.13-rc1 Stafford: Don't flush NULL values Signed-off-by: Stafford Horne --- drivers/usb/cdns3/cdns3-debug.h | 3 + drivers/usb/cdns3/cdns3-ep0.c | 129 ++++++++++-- drivers/usb/cdns3/cdns3-gadget.c | 204 +++++++++++++++++- drivers/usb/cdns3/cdns3-gadget.h | 8 + drivers/usb/cdns3/cdns3-trace.h | 7 + drivers/usb/core/devio.c | 22 ++ drivers/usb/core/hcd.c | 81 +++++++- drivers/usb/core/urb.c | 4 + drivers/usb/core/usb.c | 10 + drivers/usb/host/xhci-dbg.c | 3 + drivers/usb/host/xhci-dbgcap.c | 28 +++ drivers/usb/host/xhci-debugfs.c | 12 ++ drivers/usb/host/xhci-hub.c | 9 +- drivers/usb/host/xhci-mem.c | 141 ++++++++++++- drivers/usb/host/xhci-ring.c | 346 +++++++++++++++++++++++++++++-- drivers/usb/host/xhci.c | 230 +++++++++++++++++++- include/linux/usb.h | 22 ++ 17 files changed, 1209 insertions(+), 50 deletions(-) diff --git a/drivers/usb/cdns3/cdns3-debug.h b/drivers/usb/cdns3/cdns3-debug.h index a5c6a29e134067..ba4143280a2312 100644 --- a/drivers/usb/cdns3/cdns3-debug.h +++ b/drivers/usb/cdns3/cdns3-debug.h @@ -152,6 +152,9 @@ static inline char *cdns3_dbg_ring(struct cdns3_endpoint *priv_ep, le32_to_cpu(trb->buffer), le32_to_cpu(trb->length), le32_to_cpu(trb->control)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(struct cdns3_trb)); +#endif addr += sizeof(*trb); } diff --git a/drivers/usb/cdns3/cdns3-ep0.c b/drivers/usb/cdns3/cdns3-ep0.c index 9a17802275d51b..2e0ad2df53d3e3 100644 --- a/drivers/usb/cdns3/cdns3-ep0.c +++ b/drivers/usb/cdns3/cdns3-ep0.c @@ -53,6 +53,11 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev, priv_ep->trb_pool[1].control = 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + 2 * TRB_SIZE); +#endif + trace_cdns3_prepare_trb(priv_ep, priv_ep->trb_pool); cdns3_select_ep(priv_dev, priv_dev->ep0_data_dir); @@ -88,6 +93,9 @@ static int cdns3_ep0_delegate_req(struct cdns3_device *priv_dev, spin_unlock(&priv_dev->lock); priv_dev->setup_pending = 1; ret = priv_dev->gadget_driver->setup(&priv_dev->gadget, ctrl_req); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif priv_dev->setup_pending = 0; spin_lock(&priv_dev->lock); return ret; @@ -97,6 +105,12 @@ static void cdns3_prepare_setup_packet(struct cdns3_device *priv_dev) { priv_dev->ep0_data_dir = 0; priv_dev->ep0_stage = CDNS3_SETUP_STAGE; + +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(priv_dev->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, sizeof(struct usb_ctrlrequest), 0, 0); } @@ -140,6 +154,9 @@ static int cdns3_req_ep0_set_configuration(struct cdns3_device *priv_dev, u32 config = le16_to_cpu(ctrl_req->wValue); int result = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif switch (device_state) { case USB_STATE_ADDRESS: result = cdns3_ep0_delegate_req(priv_dev, ctrl_req); @@ -185,7 +202,9 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev, u32 addr; addr = le16_to_cpu(ctrl_req->wValue); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif if (addr > USB_DEVICE_MAX_ADDRESS) { dev_err(priv_dev->dev, "Device address (%d) cannot be greater than %d\n", @@ -225,9 +244,14 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, u16 usb_status = 0; u32 recip; u8 index; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 tmp_ind; +#endif recip = ctrl->bRequestType & USB_RECIP_MASK; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif switch (recip) { case USB_RECIP_DEVICE: /* self powered */ @@ -253,8 +277,17 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + tmp_ind = ctrl->wIndex; + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); + + /* check if endpoint is stalled or stall is pending */ + cdns3_select_ep(priv_dev, tmp_ind); +#else + /* check if endpoint is stalled or stall is pending */ cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); +#endif if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) || (priv_ep->flags & EP_STALL_PENDING)) usb_status = BIT(USB_ENDPOINT_HALT); @@ -266,6 +299,10 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev, response_pkt = (__le16 *)priv_dev->setup_buf; *response_pkt = cpu_to_le16(usb_status); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(priv_dev->setup_dma, sizeof(*response_pkt)); +#endif + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, sizeof(*response_pkt), 1, 0); return 0; @@ -282,6 +319,9 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, u16 tmode; wValue = le16_to_cpu(ctrl->wValue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif state = priv_dev->gadget.state; speed = priv_dev->gadget.speed; @@ -309,7 +349,9 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev, return -EINVAL; tmode = le16_to_cpu(ctrl->wIndex); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif if (!set || (tmode & 0xff) != 0) return -EINVAL; @@ -342,7 +384,9 @@ static int cdns3_ep0_feature_handle_intf(struct cdns3_device *priv_dev, int ret = 0; wValue = le16_to_cpu(ctrl->wValue); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif switch (wValue) { case USB_INTRF_FUNC_SUSPEND: break; @@ -360,17 +404,38 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep; int ret = 0; u8 index; - - if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 tmp_ind; +#endif + + if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif return -EINVAL; + } - if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN)) + if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif return 0; + } + +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex)); priv_ep = priv_dev->eps[index]; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + tmp_ind = ctrl->wIndex; + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); + cdns3_select_ep(priv_dev, tmp_ind); +#else cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex)); +#endif if (set) __cdns3_gadget_ep_set_halt(priv_ep); @@ -400,7 +465,9 @@ static int cdns3_req_ep0_handle_feature(struct cdns3_device *priv_dev, u32 recip; recip = ctrl->bRequestType & USB_RECIP_MASK; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif switch (recip) { case USB_RECIP_DEVICE: ret = cdns3_ep0_feature_handle_device(priv_dev, ctrl, set); @@ -434,9 +501,17 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, if (le16_to_cpu(ctrl_req->wLength) != 6) { dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n", ctrl_req->wLength); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif return -EINVAL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); + cdns_flush_dcache(priv_dev->setup_dma, 6); +#endif + cdns3_ep0_run_transfer(priv_dev, priv_dev->setup_dma, 6, 1, 0); return 0; } @@ -452,11 +527,19 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev, static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev, struct usb_ctrlrequest *ctrl_req) { - if (ctrl_req->wIndex || ctrl_req->wLength) + if (ctrl_req->wIndex || ctrl_req->wLength) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif return -EINVAL; + } priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); +#endif + return 0; } @@ -472,7 +555,13 @@ static int cdns3_ep0_standard_request(struct cdns3_device *priv_dev, { int ret; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u8 bReq = ctrl_req->bRequest; + cdns_virt_flush_dcache(ctrl_req, sizeof(struct usb_ctrlrequest)); + switch (bReq) { +#else switch (ctrl_req->bRequest) { +#endif case USB_REQ_SET_ADDRESS: ret = cdns3_req_ep0_set_address(priv_dev, ctrl_req); break; @@ -535,7 +624,9 @@ static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev) int result; priv_dev->ep0_data_dir = ctrl->bRequestType & USB_DIR_IN; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif trace_cdns3_ctrl_req(ctrl); if (!list_empty(&priv_ep->pending_req_list)) { @@ -552,10 +643,17 @@ static void cdns3_ep0_setup_phase(struct cdns3_device *priv_dev) else priv_dev->ep0_stage = CDNS3_STATUS_STAGE; - if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) + if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif result = cdns3_ep0_standard_request(priv_dev, ctrl); - else + } else { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif result = cdns3_ep0_delegate_req(priv_dev, ctrl); + } if (result == USB_GADGET_DELAYED_STATUS) return; @@ -579,6 +677,10 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev) request->actual = TRB_LEN(le32_to_cpu(priv_ep->trb_pool->length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + sizeof(struct cdns3_trb)); +#endif priv_ep->dir = priv_dev->ep0_data_dir; cdns3_gadget_giveback(priv_ep, to_cdns3_request(request), 0); } @@ -764,6 +866,9 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, (request->length % ep->maxpacket == 0)) zlp = 1; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(request->dma, request->length); +#endif cdns3_ep0_run_transfer(priv_dev, request->dma, request->length, 1, zlp); spin_unlock_irqrestore(&priv_dev->lock, flags); diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index a8b7b50abf6451..9d254f2a2b945c 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -230,6 +230,9 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) } memset(priv_ep->trb_pool, 0, ring_size); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(priv_ep->trb_pool_dma, ring_size); +#endif priv_ep->num_trbs = num_trbs; @@ -249,6 +252,11 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, link_trb)), + TRB_SIZE); +#endif return 0; } @@ -464,6 +472,11 @@ static void __cdns3_descmiss_copy_data(struct usb_request *request, memcpy(&((u8 *)request->buf)[request->actual], descmiss_req->buf, descmiss_req->actual); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache( + &((u8 *)request->buf)[request->actual], + descmiss_req->actual); +#endif request->actual = length; } else { /* It should never occures */ @@ -827,6 +840,10 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, priv_req->aligned_buf->dir); memcpy(request->buf, priv_req->aligned_buf->buf, request->length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(priv_req->aligned_buf->buf, + request->length); +#endif } priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); @@ -930,6 +947,10 @@ static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) return -ENOMEM; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(buf->dma, buf->size); +#endif + if (priv_req->aligned_buf) { trace_cdns3_free_aligned_request(priv_req); priv_req->aligned_buf->in_use = 0; @@ -950,6 +971,10 @@ static int cdns3_prepare_aligned_request_buf(struct cdns3_request *priv_req) buf->dma, buf->size, buf->dir); memcpy(buf->buf, priv_req->request.buf, priv_req->request.length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(buf->dma, buf->size); + cdns_virt_flush_dcache(priv_req->request.buf, buf->size); +#endif } /* Transfer DMA buffer ownership back to device */ @@ -1016,10 +1041,18 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, priv_ep->flags |= EP_PENDING_REQUEST; /* must allocate buffer aligned to 8 */ - if (priv_req->flags & REQUEST_UNALIGNED) + if (priv_req->flags & REQUEST_UNALIGNED){ trb_dma = priv_req->aligned_buf->dma; - else +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(priv_req->aligned_buf->dma, + priv_req->aligned_buf->size); +#endif + }else{ trb_dma = request->dma; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(request->dma, request->length); +#endif + } /* For stream capable endpoints driver use only single TD. */ trb = priv_ep->trb_pool + priv_ep->enqueue; @@ -1035,15 +1068,34 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, if (!request->num_sgs) { trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); length = request->length; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif } else { trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); length = request->sg[sg_idx].length; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(TRB_BUFFER(request->sg[sg_idx].dma_address), + request->sg[sg_idx].length); +#endif +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(TRB_BUFFER(request->sg[sg_idx].dma_address), + request->sg[sg_idx].length); +#endif } tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif + /* * For DEV_VER_V2 controller version we have enabled * USB_CONF2_EN_TDL_TRB in DMULT configuration. @@ -1056,6 +1108,11 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, priv_req->flags |= REQUEST_PENDING; trb->control = cpu_to_le32(control); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif trace_cdns3_prepare_trb(priv_ep, priv_req->trb); @@ -1063,6 +1120,10 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, * Memory barrier - Cycle Bit must be set before trb->length and * trb->buffer fields. */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(cdns3_trb_virt_to_dma(priv_ep, trb), + sizeof(struct cdns3_trb)); +#endif wmb(); /* always first element */ @@ -1124,6 +1185,9 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, u32 control; int pcs; u16 total_tdl = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + int number = 0; +#endif struct scatterlist *s = NULL; bool sg_supported = !!(request->num_mapped_sgs); @@ -1143,10 +1207,18 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, priv_ep->flags |= EP_PENDING_REQUEST; /* must allocate buffer aligned to 8 */ - if (priv_req->flags & REQUEST_UNALIGNED) + if (priv_req->flags & REQUEST_UNALIGNED){ trb_dma = priv_req->aligned_buf->dma; - else +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(priv_req->aligned_buf->dma, + priv_req->aligned_buf->size); +#endif + }else{ trb_dma = request->dma; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(request->dma, request->length); +#endif + } trb = priv_ep->trb_pool + priv_ep->enqueue; priv_req->start_trb = priv_ep->enqueue; @@ -1184,6 +1256,12 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + link_trb)), + sizeof(struct cdns3_trb)); +#endif } if (priv_dev->dev_ver <= DEV_VER_V2) @@ -1219,12 +1297,26 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, length = request->length; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); + if(request->num_sgs) + gadget_flush_dcache(request->sg[sg_iter].dma_address, + request->sg[sg_iter].length); +#endif + if (priv_ep->flags & EP_TDLCHK_EN) total_tdl += DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); trb->length |= cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | TRB_LEN(length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif pcs = priv_ep->pcs ? TRB_CYCLE : 0; /* @@ -1256,12 +1348,23 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, s = sg_next(s); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif + control = 0; ++sg_iter; priv_req->end_trb = priv_ep->enqueue; cdns3_ep_inc_enq(priv_ep); trb = priv_ep->trb_pool + priv_ep->enqueue; trb->length = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif } while (sg_iter < num_trb); trb = priv_req->trb; @@ -1271,6 +1374,11 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, if (sg_iter == 1) trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif if (priv_dev->dev_ver < DEV_VER_V2 && (priv_ep->flags & EP_TDLCHK_EN)) { @@ -1295,8 +1403,14 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, wmb(); /* give the TD to the consumer*/ - if (togle_pcs) + if (togle_pcs) { trb->control = trb->control ^ cpu_to_le32(1); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif + } if (priv_dev->dev_ver <= DEV_VER_V2) cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); @@ -1324,6 +1438,22 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, */ wmb(); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + if((priv_req->start_trb + num_trb) > (priv_ep->num_trbs - 1)) { + number = priv_ep->num_trbs - 1 - priv_req->start_trb; + gadget_flush_dcache(priv_ep->trb_pool_dma + + (priv_req->start_trb * TRB_SIZE), + (number + 1) * TRB_SIZE); + gadget_flush_dcache(priv_ep->trb_pool_dma, + (num_trb - number)* TRB_SIZE); + } else { + gadget_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma + + priv_req->start_trb * + TRB_SIZE), + num_trb * TRB_SIZE); + } +#endif + /* * For DMULT mode we can set address to transfer ring only once after * enabling endpoint. @@ -1508,9 +1638,18 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, /* Request was dequeued and TRB was changed to TRB_LINK. */ if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + gadget_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif cdns3_move_deq_to_next_trb(priv_req); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif if (!request->stream_id) { /* Re-select endpoint. It could be changed by other CPU * during handling usb_gadget_giveback_request. @@ -1554,6 +1693,11 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, cdns3_select_ep(priv_dev, priv_ep->endpoint.address); trb = priv_ep->trb_pool; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif trace_cdns3_complete_trb(priv_ep, trb); if (trb != priv_req->trb) @@ -1562,6 +1706,12 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, priv_req->trb, trb); request->actual += TRB_LEN(le32_to_cpu(trb->length)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif if (!request->num_sgs || (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { @@ -1769,6 +1919,10 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, __must_hold(&priv_dev->lock) { int speed = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + int i; + struct cdns3_endpoint *priv_ep; +#endif trace_cdns3_usb_irq(priv_dev, usb_ists); if (usb_ists & USB_ISTS_L1ENTI) { @@ -1797,6 +1951,18 @@ __must_hold(&priv_dev->lock) priv_dev->gadget.speed = USB_SPEED_UNKNOWN; usb_gadget_set_state(&priv_dev->gadget, USB_STATE_NOTATTACHED); cdns3_hw_reset_eps_config(priv_dev); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + /* clean TRB*/ + for(i = 0;i < CDNS3_ENDPOINTS_MAX_COUNT; i++){ + priv_ep = priv_dev->eps[i]; + if(priv_ep && priv_ep->trb_pool){ + memset(priv_ep->trb_pool, 0, + priv_ep->alloc_ring_size); + gadget_flush_dcache(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + priv_ep->alloc_ring_size); + } + } +#endif } if (usb_ists & (USB_ISTS_L2ENTI | USB_ISTS_U3ENTI)) { @@ -2642,6 +2808,12 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep, ((priv_req->end_trb + 1) * TRB_SIZE))); link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | TRB_TYPE(TRB_LINK) | TRB_CHAIN); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + link_trb)), + sizeof(struct cdns3_trb)); +#endif if (priv_ep->wa1_trb == priv_req->trb) cdns3_wa1_restore_cycle_bit(priv_ep); @@ -2695,8 +2867,15 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { priv_req = to_cdns3_request(request); trb = priv_req->trb; - if (trb) + if (trb) { trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif + } } writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); @@ -2710,9 +2889,16 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING); if (request) { - if (trb) + if (trb) { trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache( + EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, + trb)), + sizeof(struct cdns3_trb)); +#endif + } cdns3_rearm_transfer(priv_ep, 1); } @@ -3210,7 +3396,9 @@ static int cdns3_gadget_start(struct cdns *cdns) ret = -ENOMEM; goto err2; } - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(priv_dev->setup_dma, 8); +#endif priv_dev->dev_ver = readl(&priv_dev->regs->usb_cap6); dev_dbg(priv_dev->dev, "Device Controller version: %08x\n", diff --git a/drivers/usb/cdns3/cdns3-gadget.h b/drivers/usb/cdns3/cdns3-gadget.h index c5660f2c4293ff..a1805ba41d38b6 100644 --- a/drivers/usb/cdns3/cdns3-gadget.h +++ b/drivers/usb/cdns3/cdns3-gadget.h @@ -1368,4 +1368,12 @@ int cdns3_ep_config(struct cdns3_endpoint *priv_ep, bool enable); void cdns3_check_ep0_interrupt_proceed(struct cdns3_device *priv_dev, int dir); int __cdns3_gadget_wakeup(struct cdns3_device *priv_dev); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA +#include +static inline void gadget_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + start % 64); +} +#endif + #endif /* __LINUX_CDNS3_GADGET */ diff --git a/drivers/usb/cdns3/cdns3-trace.h b/drivers/usb/cdns3/cdns3-trace.h index 7574b4a6281320..45b95b545515e5 100644 --- a/drivers/usb/cdns3/cdns3-trace.h +++ b/drivers/usb/cdns3/cdns3-trace.h @@ -187,6 +187,9 @@ DECLARE_EVENT_CLASS(cdns3_log_ctrl, __entry->wIndex = le16_to_cpu(ctrl->wIndex); __entry->wLength = le16_to_cpu(ctrl->wLength); ), +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl, sizeof(struct usb_ctrlrequest)); +#endif TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNS3_MSG_MAX, __entry->bRequestType, __entry->bRequest, __entry->wValue, @@ -407,6 +410,10 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); __entry->last_stream_id = priv_ep->last_stream_id; ), +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(EP_TRADDR_TRADDR(cdns3_trb_virt_to_dma(priv_ep, trb)), + sizeof(struct cdns3_trb)); +#endif TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u", __get_str(name), __entry->trb, __entry->buffer, TRB_LEN(__entry->length), diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 2218941d35a3fb..24f6d20fd7d49c 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -251,6 +251,10 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) usbm->vma_use_count = 1; INIT_LIST_HEAD(&usbm->memlist); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma_handle, size); +#endif + if (hcd->localmem_pool || !hcd_uses_dma(hcd)) { if (remap_pfn_range(vma, vma->vm_start, virt_to_phys(usbm->mem) >> PAGE_SHIFT, @@ -262,6 +266,9 @@ static int usbdev_mmap(struct file *file, struct vm_area_struct *vma) if (dma_mmap_coherent(hcd->self.sysdev, vma, mem, dma_handle, size)) { dec_usb_memory_use_count(usbm, &usbm->vma_use_count); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma_handle, size); +#endif return -EAGAIN; } } @@ -542,6 +549,9 @@ static int copy_urb_data_to_user(u8 __user *userbuffer, struct urb *urb) if (urb->num_sgs == 0) { if (copy_to_user(userbuffer, urb->transfer_buffer, len)) return -EFAULT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, len); +#endif return 0; } @@ -1734,6 +1744,12 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb as->urb->transfer_buffer = as->usbm->mem + (uurb_start - as->usbm->vm_start); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(as->usbm->dma_handle + + (uurb_start - as->usbm->vm_start), + as->usbm->size - + (uurb_start - as->usbm->vm_start)); +#endif } else { as->urb->transfer_buffer = kmalloc(uurb->buffer_length, GFP_KERNEL | __GFP_NOWARN); @@ -1820,6 +1836,12 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb as->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; as->urb->transfer_dma = as->usbm->dma_handle + (uurb_start - as->usbm->vm_start); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(as->usbm->dma_handle + + (uurb_start - as->usbm->vm_start), + as->usbm->size - + (uurb_start - as->usbm->vm_start)); +#endif } else if (is_in && uurb->buffer_length > 0) as->userbuffer = uurb->buffer; as->signr = uurb->signr; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 6119fb41d73653..4cd2f3376dfb5c 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -419,6 +419,9 @@ ascii2desc(char const *s, u8 *buf, unsigned len) *buf++ = t >> 8; t = (unsigned char)*s++; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(buf, len); +#endif return len; } @@ -450,6 +453,9 @@ rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len) if (len > 4) len = 4; memcpy(data, langids, len); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(data, len); +#endif return len; case 1: /* Serial number */ @@ -502,6 +508,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) wValue = le16_to_cpu (cmd->wValue); wIndex = le16_to_cpu (cmd->wIndex); wLength = le16_to_cpu (cmd->wLength); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd, sizeof(struct usb_ctrlrequest)); +#endif if (wLength > urb->transfer_buffer_length) goto error; @@ -727,6 +736,9 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb) bDeviceProtocol)) ((struct usb_device_descriptor *) ubuf)-> bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ubuf, len); +#endif } kfree(tbuf); @@ -773,6 +785,9 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd) urb->actual_length = length; memcpy(urb->transfer_buffer, buffer, length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, length); +#endif usb_hcd_unlink_urb_from_ep(hcd, urb); usb_hcd_giveback_urb(hcd, urb, 0); } else { @@ -1301,6 +1316,9 @@ static int hcd_alloc_coherent(struct usb_bus *bus, memcpy(vaddr, *vaddr_handle, size); *vaddr_handle = vaddr; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(*dma_handle, size + sizeof(vaddr)); +#endif return 0; } @@ -1312,9 +1330,13 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle, vaddr = (void *)get_unaligned((unsigned long *)(vaddr + size)); - if (dir == DMA_FROM_DEVICE) + if (dir == DMA_FROM_DEVICE) { memcpy(vaddr, *vaddr_handle, size); - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(vaddr, size); + cdns_virt_flush_dcache(*vaddr_handle, size); +#endif + } hcd_buffer_free(bus, size + sizeof(vaddr), *vaddr_handle, *dma_handle); *vaddr_handle = vaddr; @@ -1324,12 +1346,16 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle, void usb_hcd_unmap_urb_setup_for_dma(struct usb_hcd *hcd, struct urb *urb) { if (IS_ENABLED(CONFIG_HAS_DMA) && - (urb->transfer_flags & URB_SETUP_MAP_SINGLE)) + (urb->transfer_flags & URB_SETUP_MAP_SINGLE)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif dma_unmap_single(hcd->self.sysdev, urb->setup_dma, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); - else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL) + } else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL) hcd_free_coherent(urb->dev->bus, &urb->setup_dma, (void **) &urb->setup_packet, @@ -1363,23 +1389,36 @@ void usb_hcd_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) urb->num_sgs, dir); else if (IS_ENABLED(CONFIG_HAS_DMA) && - (urb->transfer_flags & URB_DMA_MAP_PAGE)) + (urb->transfer_flags & URB_DMA_MAP_PAGE)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif dma_unmap_page(hcd->self.sysdev, urb->transfer_dma, urb->transfer_buffer_length, dir); - else if (IS_ENABLED(CONFIG_HAS_DMA) && - (urb->transfer_flags & URB_DMA_MAP_SINGLE)) + } else if (IS_ENABLED(CONFIG_HAS_DMA) && + (urb->transfer_flags & URB_DMA_MAP_SINGLE)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif dma_unmap_single(hcd->self.sysdev, urb->transfer_dma, urb->transfer_buffer_length, dir); - else if (urb->transfer_flags & URB_MAP_LOCAL) + } else if (urb->transfer_flags & URB_MAP_LOCAL) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif hcd_free_coherent(urb->dev->bus, &urb->transfer_dma, &urb->transfer_buffer, urb->transfer_buffer_length, dir); + } /* Make it safe to call this routine more than once */ urb->transfer_flags &= ~(URB_DMA_MAP_SG | URB_DMA_MAP_PAGE | @@ -1418,6 +1457,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, (void **)&urb->setup_packet, sizeof(struct usb_ctrlrequest), DMA_TO_DEVICE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif if (ret) return ret; urb->transfer_flags |= URB_SETUP_MAP_LOCAL; @@ -1435,6 +1478,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, if (dma_mapping_error(hcd->self.sysdev, urb->setup_dma)) return -EAGAIN; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->setup_dma, + sizeof(struct usb_ctrlrequest)); +#endif urb->transfer_flags |= URB_SETUP_MAP_SINGLE; } } @@ -1449,6 +1496,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, &urb->transfer_buffer, urb->transfer_buffer_length, dir); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length + 8); +#endif if (ret == 0) urb->transfer_flags |= URB_MAP_LOCAL; } else if (hcd_uses_dma(hcd)) { @@ -1487,6 +1538,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; else urb->transfer_flags |= URB_DMA_MAP_PAGE; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif } else if (object_is_on_stack(urb->transfer_buffer)) { WARN_ONCE(1, "transfer buffer is on stack\n"); ret = -EAGAIN; @@ -1501,6 +1556,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, ret = -EAGAIN; else urb->transfer_flags |= URB_DMA_MAP_SINGLE; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(urb->transfer_dma, + urb->transfer_buffer_length); +#endif } } if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE | @@ -2949,6 +3008,9 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, if (IS_ERR(local_mem)) return PTR_ERR(local_mem); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(phys_addr,size); +#endif /* * Here we pass a dma_addr_t but the arg type is a phys_addr_t. * It's not backed by system memory and thus there's no kernel mapping @@ -2962,6 +3024,9 @@ int usb_hcd_setup_local_mem(struct usb_hcd *hcd, phys_addr_t phys_addr, return err; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma,size); +#endif return 0; } EXPORT_SYMBOL_GPL(usb_hcd_setup_local_mem); diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 357b149b20d3a1..3df361946a1430 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -407,6 +407,10 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) return -ENOEXEC; is_out = !(setup->bRequestType & USB_DIR_IN) || !setup->wLength; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, + sizeof(struct usb_ctrlrequest)); +#endif } else { is_out = usb_endpoint_dir_out(&ep->desc); } diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c index 62368c4ed37afb..c5b441d2d56258 100644 --- a/drivers/usb/core/usb.c +++ b/drivers/usb/core/usb.c @@ -970,9 +970,19 @@ EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor); void *usb_alloc_coherent(struct usb_device *dev, size_t size, gfp_t mem_flags, dma_addr_t *dma) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + void *ret; +#endif if (!dev || !dev->bus) return NULL; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ret = hcd_buffer_alloc(dev->bus, size, mem_flags, dma); + if(ret) + cdns_flush_dcache(*dma, size); + return ret; +#else return hcd_buffer_alloc(dev->bus, size, mem_flags, dma); +#endif } EXPORT_SYMBOL_GPL(usb_alloc_coherent); diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index 386abf26641d28..5a840db12aa89a 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c @@ -16,6 +16,9 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); int state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif return xhci_slot_state_string(state); } diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index ccb0156fcebebf..68375819086935 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -105,6 +105,9 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) info->product = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 2); info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3); info->length = cpu_to_le32(string_length); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma, string_length); +#endif /* Populate bulk out endpoint context: */ ep_ctx = dbc_bulkout_ctx(dbc); @@ -113,6 +116,9 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) ep_ctx->ep_info = 0; ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst); ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif /* Populate bulk in endpoint context: */ ep_ctx = dbc_bulkin_ctx(dbc); @@ -120,6 +126,9 @@ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) ep_ctx->ep_info = 0; ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst); ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif /* Set DbC context and info registers: */ lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); @@ -279,6 +288,11 @@ static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, * Add a barrier between writes of trb fields and flipping * the cycle bit: */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(req->dma, req->length); + cdns_flush_dcache(req->trb_dma, + sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif wmb(); if (cycle) @@ -286,6 +300,10 @@ static int xhci_dbc_queue_bulk_tx(struct dbc_ep *dep, else trb->generic.field[3] &= cpu_to_le32(~TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(req->trb_dma, + sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif writel(DBC_DOOR_BELL_TARGET(dep->direction), &dbc->regs->doorbell); return 0; @@ -501,12 +519,19 @@ static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags) if (!dbc->string) goto string_fail; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dbc->string_dma, dbc->string_size); +#endif + /* Setup ERST register: */ writel(dbc->erst.erst_size, &dbc->regs->ersts); lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba); deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, dbc->ring_evt->dequeue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif lo_hi_writeq(deq, &dbc->regs->erdp); /* Setup strings and contexts: */ @@ -877,6 +902,9 @@ static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) if (update_erdp) { deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, dbc->ring_evt->dequeue); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb)); +#endif lo_hi_writeq(deq, &dbc->regs->erdp); } diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index 2c0fda57869e4c..0a6a351c1b322a 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -207,6 +207,9 @@ static void xhci_ring_dump_segment(struct seq_file *s, le32_to_cpu(trb->generic.field[1]), le32_to_cpu(trb->generic.field[2]), le32_to_cpu(trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma,sizeof(*trb)); +#endif } } @@ -268,6 +271,9 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused) le32_to_cpu(slot_ctx->dev_info2), le32_to_cpu(slot_ctx->tt_info), le32_to_cpu(slot_ctx->dev_state))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif return 0; } @@ -291,6 +297,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused) le32_to_cpu(ep_ctx->ep_info2), le64_to_cpu(ep_ctx->deq), le32_to_cpu(ep_ctx->tx_info))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } return 0; @@ -551,6 +560,9 @@ static int xhci_stream_context_array_show(struct seq_file *s, void *unused) else seq_printf(s, "%pad stream context entry not used deq %016llx\n", &dma, le64_to_cpu(stream_ctx->stream_ring)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma,16); +#endif } return 0; diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e9b18fc1761723..dc9ddbaf5eefd5 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -499,8 +499,15 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i); /* Check ep is running, required by AMD SNPS 3.1 xHC */ - if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING) + if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif continue; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif command = xhci_alloc_command(xhci, false, GFP_NOWAIT); if (!command) { diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f66815fe84822c..82e06d75b88b46 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -44,6 +44,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, kfree(seg); return NULL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma, sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif if (max_packet) { seg->bounce_buf = kzalloc_node(max_packet, flags, @@ -56,8 +59,13 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, } /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */ if (cycle_state == 0) { - for (i = 0; i < TRBS_PER_SEGMENT; i++) + for (i = 0; i < TRBS_PER_SEGMENT; i++) { seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&seg->trbs[i], + sizeof(union xhci_trb)); +#endif + } } seg->dma = dma; seg->next = NULL; @@ -68,6 +76,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) { if (seg->trbs) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(seg->trbs, sizeof(union xhci_trb)); +#endif dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); seg->trbs = NULL; } @@ -111,11 +122,19 @@ static void xhci_link_segments(struct xhci_segment *prev, /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ val = le32_to_cpu(prev->trbs[TRBS_PER_SEGMENT-1].link.control); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&prev->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif val &= ~TRB_TYPE_BITMASK; val |= TRB_TYPE(TRB_LINK); if (chain_links) val |= TRB_CHAIN; prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&prev->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif } } @@ -149,7 +168,15 @@ static void xhci_link_rings(struct xhci_hcd *xhci, struct xhci_ring *ring, &= ~cpu_to_le32(LINK_TOGGLE); last->trbs[TRBS_PER_SEGMENT-1].link.control |= cpu_to_le32(LINK_TOGGLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ring->last_seg->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif ring->last_seg = last; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&last->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif } } @@ -265,6 +292,10 @@ static void xhci_remove_stream_mapping(struct xhci_ring *ring) seg = ring->first_seg; do { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->dma, + sizeof(union xhci_trb) * TRBS_PER_SEGMENT); +#endif xhci_remove_segment_mapping(ring->trb_address_map, seg); seg = seg->next; } while (seg != ring->first_seg); @@ -398,6 +429,10 @@ struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, /* See section 4.9.2.1 and 6.4.4.1 */ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |= cpu_to_le32(LINK_TOGGLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ring->last_seg->trbs[TRBS_PER_SEGMENT - 1], + sizeof(union xhci_trb)); +#endif } xhci_initialize_ring_info(ring, cycle_state); trace_xhci_ring_alloc(ring); @@ -489,6 +524,9 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, kfree(ctx); return NULL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(ctx->dma, ctx->size); +#endif return ctx; } @@ -645,6 +683,10 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, goto cleanup_ctx; memset(stream_info->stream_ctx_array, 0, sizeof(struct xhci_stream_ctx)*num_stream_ctxs); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(stream_info->ctx_array_dma, + sizeof(struct xhci_stream_ctx) * num_stream_ctxs); +#endif /* Allocate everything needed to free the stream rings later */ stream_info->free_streams_command = @@ -674,6 +716,10 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci, cur_ring->cycle_state; stream_info->stream_ctx_array[cur_stream].stream_ring = cpu_to_le64(addr); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&stream_info->stream_ctx_array[cur_stream], + sizeof(struct xhci_stream_ctx)); +#endif xhci_dbg(xhci, "Setting stream %d ring ptr to 0x%08llx\n", cur_stream, (unsigned long long) addr); @@ -731,6 +777,9 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci, ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams) | EP_HAS_LSA); ep_ctx->deq = cpu_to_le64(stream_info->ctx_array_dma); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } /* @@ -745,6 +794,9 @@ void xhci_setup_no_streams_ep_input_ctx(struct xhci_ep_ctx *ep_ctx, ep_ctx->ep_info &= cpu_to_le32(~(EP_MAXPSTREAMS_MASK | EP_HAS_LSA)); addr = xhci_trb_virt_to_dma(ep->ring->deq_seg, ep->ring->dequeue); ep_ctx->deq = cpu_to_le64(addr | ep->ring->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx,sizeof(*ep_ctx)); +#endif } /* Frees all stream contexts associated with the endpoint, @@ -1011,12 +1063,19 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, dev->udev = udev; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dev->out_ctx->dma, dev->out_ctx->size); +#endif /* Point to output device context in dcbaa. */ xhci->dcbaa->dev_context_ptrs[slot_id] = cpu_to_le64(dev->out_ctx->dma); xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", slot_id, &xhci->dcbaa->dev_context_ptrs[slot_id], le64_to_cpu(xhci->dcbaa->dev_context_ptrs[slot_id])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->dcbaa->dma, + sizeof(struct xhci_device_context_array)); +#endif trace_xhci_alloc_virt_device(dev); @@ -1054,6 +1113,9 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci, ep0_ctx->deq = cpu_to_le64(xhci_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue) | ep_ring->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep0_ctx, sizeof(*ep0_ctx)); +#endif } /* @@ -1106,6 +1168,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud /* 3) Only the control endpoint is valid - one endpoint context */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx,sizeof(*slot_ctx)); +#endif switch (udev->speed) { case USB_SPEED_SUPER_PLUS: slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP); @@ -1136,10 +1201,16 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud return -EINVAL; } /* Find the root hub port this device is under */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif port_num = xhci_find_real_port_number(xhci, udev); if (!port_num) return -EINVAL; slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif /* Set the port number in the virtual_device to the faked port number */ for (top_dev = udev; top_dev->parent && top_dev->parent->parent; top_dev = top_dev->parent) @@ -1185,6 +1256,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud (udev->ttport << 8)); if (udev->tt->multi) slot_ctx->dev_info |= cpu_to_le32(DEV_MTT); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif } xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); @@ -1199,6 +1273,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud ep0_ctx->deq = cpu_to_le64(dev->eps[0].ring->first_seg->dma | dev->eps[0].ring->cycle_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep0_ctx, sizeof(*ep0_ctx)); +#endif trace_xhci_setup_addressable_virt_device(dev); @@ -1508,6 +1585,9 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) | EP_AVG_TRB_LENGTH(avg_trb_len)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif return 0; } @@ -1529,6 +1609,9 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci, /* Don't free the endpoint ring until the set interface or configuration * request succeeds. */ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info) @@ -1560,14 +1643,29 @@ void xhci_update_bw_info(struct xhci_hcd *xhci, * set in the first place. */ if (!EP_IS_ADDED(ctrl_ctx, i) && EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Dropped endpoint */ xhci_clear_endpoint_bw_info(bw_info); continue; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif if (EP_IS_ADDED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, i); ep_type = CTX_TO_EP_TYPE(le32_to_cpu(ep_ctx->ep_info2)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* Ignore non-periodic endpoints */ if (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP && @@ -1591,7 +1689,14 @@ void xhci_update_bw_info(struct xhci_hcd *xhci, bw_info->type = ep_type; bw_info->max_esit_payload = CTX_TO_MAX_ESIT_PAYLOAD( le32_to_cpu(ep_ctx->tx_info)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, sizeof(struct xhci_input_control_ctx)); +#endif } } @@ -1618,6 +1723,10 @@ void xhci_endpoint_copy(struct xhci_hcd *xhci, in_ep_ctx->reserved[0] = out_ep_ctx->reserved[0]; in_ep_ctx->reserved[1] = out_ep_ctx->reserved[1]; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(in_ep_ctx, sizeof(*in_ep_ctx)); + cdns_virt_flush_dcache(out_ep_ctx, sizeof(*out_ep_ctx)); +#endif } /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. @@ -1639,6 +1748,10 @@ void xhci_slot_copy(struct xhci_hcd *xhci, in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; in_slot_ctx->tt_info = out_slot_ctx->tt_info; in_slot_ctx->dev_state = out_slot_ctx->dev_state; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(in_slot_ctx, sizeof(*in_slot_ctx)); + cdns_virt_flush_dcache(out_slot_ctx, sizeof(*out_slot_ctx)); +#endif } /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ @@ -1664,6 +1777,9 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) &xhci->scratchpad->sp_dma, flags); if (!xhci->scratchpad->sp_array) goto fail_sp2; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->scratchpad->sp_dma, num_sp * sizeof(u64)); +#endif xhci->scratchpad->sp_buffers = kcalloc_node(num_sp, sizeof(void *), flags, dev_to_node(dev)); @@ -1680,7 +1796,13 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) xhci->scratchpad->sp_array[i] = dma; xhci->scratchpad->sp_buffers[i] = buf; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(dma, xhci->page_size); +#endif } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->scratchpad->sp_dma, num_sp * sizeof(u64)); +#endif return 0; @@ -1804,6 +1926,9 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, size, &erst->erst_dma_addr, flags); if (!erst->entries) return -ENOMEM; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(erst->erst_dma_addr, size); +#endif erst->num_entries = evt_ring->num_segs; @@ -1815,6 +1940,9 @@ int xhci_alloc_erst(struct xhci_hcd *xhci, entry->rsvd = 0; seg = seg->next; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(erst->erst_dma_addr, size); +#endif return 0; } @@ -2109,6 +2237,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci) xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Write event ring dequeue pointer, " "preserving EHB bit"); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb)); +#endif xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp, &xhci->ir_set->erst_dequeue); } @@ -2432,6 +2563,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) if (!xhci->dcbaa) goto fail; xhci->dcbaa->dma = dma; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->dcbaa->dma, sizeof(*xhci->dcbaa)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Device context base array address = 0x%llx (DMA), %p (virt)", (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); @@ -2540,6 +2674,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); val_64 &= ERST_PTR_MASK; val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->erst.erst_dma_addr, + xhci->event_ring->num_segs * + sizeof(struct xhci_erst_entry)); +#endif xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); /* Set the event ring dequeue address */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6acd2329e08d49..709c14ce479e56 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -82,12 +82,26 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, static bool trb_is_noop(union xhci_trb *trb) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + bool ret; + ret = TRB_TYPE_NOOP_LE32(trb->generic.field[3]); + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); + return ret; +#else return TRB_TYPE_NOOP_LE32(trb->generic.field[3]); +#endif } static bool trb_is_link(union xhci_trb *trb) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + bool ret; + ret = TRB_TYPE_LINK_LE32(trb->link.control); + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); + return ret; +#else return TRB_TYPE_LINK_LE32(trb->link.control); +#endif } static bool last_trb_on_seg(struct xhci_segment *seg, union xhci_trb *trb) @@ -103,7 +117,14 @@ static bool last_trb_on_ring(struct xhci_ring *ring, static bool link_trb_toggles_cycle(union xhci_trb *trb) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + bool ret; + ret = le32_to_cpu(trb->link.control) & LINK_TOGGLE; + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); + return ret; +#else return le32_to_cpu(trb->link.control) & LINK_TOGGLE; +#endif } static bool last_td_in_urb(struct xhci_td *td) @@ -133,6 +154,9 @@ static void trb_to_noop(union xhci_trb *trb, u32 noop_type) trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE); trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type)); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif } /* Updates trb to point to the next TRB in the ring, and updates seg if the next @@ -224,6 +248,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, /* If this is not event ring, there is one less usable TRB */ if (!trb_is_link(ring->enqueue)) ring->num_trbs_free--; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ring->enqueue, sizeof(union xhci_trb)); +#endif if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) { xhci_err(xhci, "Tried to move enqueue past ring segment\n"); @@ -255,6 +282,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, next->link.control &= cpu_to_le32(~TRB_CHAIN); next->link.control |= cpu_to_le32(chain); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(next,sizeof(union xhci_trb)); +#endif /* Give this link TRB to the hardware */ wmb(); next->link.control ^= cpu_to_le32(TRB_CYCLE); @@ -262,6 +292,9 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, /* Toggle the cycle bit after the last ring segment. */ if (link_trb_toggles_cycle(next)) ring->cycle_state ^= 1; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(next,sizeof(union xhci_trb)); +#endif ring->enq_seg = ring->enq_seg->next; ring->enqueue = ring->enq_seg->trbs; @@ -539,15 +572,30 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev, struct xhci_ep_ctx *ep_ctx; struct xhci_stream_ctx *st_ctx; struct xhci_virt_ep *ep; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u64 ret; +#endif ep = &vdev->eps[ep_index]; if (ep->ep_state & EP_HAS_STREAMS) { st_ctx = &ep->stream_info->stream_ctx_array[stream_id]; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ret = le64_to_cpu(st_ctx->stream_ring); + cdns_virt_flush_dcache(st_ctx, sizeof(*st_ctx)); + return ret; +#else return le64_to_cpu(st_ctx->stream_ring); +#endif } ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ret = le64_to_cpu(ep_ctx->deq); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + return ret; +#else return le64_to_cpu(ep_ctx->deq); +#endif } static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, @@ -694,8 +742,12 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, trb_to_noop(trb, TRB_TR_NOOP); /* flip cycle if asked to */ - if (flip_cycle && trb != td->first_trb && trb != td->last_trb) + if (flip_cycle && trb != td->first_trb && trb != td->last_trb) { trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif + } if (trb == td->last_trb) break; @@ -748,17 +800,26 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, return; if (usb_urb_dir_out(urb)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, ring->bounce_buf_len); +#endif dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_TO_DEVICE); return; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, ring->bounce_buf_len); +#endif dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); /* for in tranfers we need to copy the data from bounce to sg */ if (urb->num_sgs) { len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, seg->bounce_len, seg->bounce_offs); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, ring->bounce_buf_len); +#endif if (len != seg->bounce_len) xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", len, seg->bounce_len); @@ -1019,6 +1080,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, int err; if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif if (!xhci->devs[slot_id]) xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n", slot_id); @@ -1026,6 +1090,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, } ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return; @@ -1033,6 +1100,9 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id, ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); trace_xhci_handle_cmd_stop_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif if (comp_code == COMP_CONTEXT_STATE_ERROR) { /* @@ -1309,6 +1379,9 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return; @@ -1325,6 +1398,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); trace_xhci_handle_cmd_set_deq(slot_ctx); trace_xhci_handle_cmd_set_deq_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif if (cmd_comp_code != COMP_SUCCESS) { unsigned int ep_state; @@ -1339,6 +1416,10 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, ep_state = GET_EP_CTX_STATE(ep_ctx); slot_state = le32_to_cpu(slot_ctx->dev_state); slot_state = GET_SLOT_STATE(slot_state); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Slot state = %u, EP state = %u", slot_state, ep_state); @@ -1365,8 +1446,14 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id, struct xhci_stream_ctx *ctx = &ep->stream_info->stream_ctx_array[stream_id]; deq = le64_to_cpu(ctx->stream_ring) & SCTX_DEQ_MASK; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctx, sizeof(*ctx)); +#endif } else { deq = le64_to_cpu(ep_ctx->deq) & ~EP_CTX_CYCLE_MASK; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Successful Set TR Deq Ptr cmd, deq = @%08llx", deq); @@ -1408,12 +1495,18 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, unsigned int ep_index; ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) return; ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index); trace_xhci_handle_cmd_reset_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* This command will only fail if the endpoint wasn't halted, * but we don't care. @@ -1432,8 +1525,16 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id, xhci_giveback_invalidated_tds(ep); /* if this was a soft reset, then restart */ - if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) + if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif ring_doorbell_for_active_rings(xhci, slot_id, ep_index); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif } static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id, @@ -1456,6 +1557,9 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id) slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); trace_xhci_handle_cmd_disable_slot(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif if (xhci->quirks & XHCI_EP_LIMIT_QUIRK) /* Delete default control endpoint resources */ @@ -1492,11 +1596,17 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id, add_flags = le32_to_cpu(ctrl_ctx->add_flags); drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Input ctx add_flags are the endpoint index plus one */ ep_index = xhci_last_valid_endpoint(add_flags) - 1; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, ep_index); trace_xhci_handle_cmd_config_ep(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* A usb_set_interface() call directly after clearing a halted * condition may race on this quirky hardware. Not worth @@ -1532,6 +1642,9 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id) return; slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_handle_cmd_addr_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif } static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) @@ -1547,6 +1660,9 @@ static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id) } slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_handle_cmd_reset_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg(xhci, "Completed reset device command.\n"); } @@ -1562,6 +1678,9 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci, "NEC firmware version %2x.%02x", NEC_FW_MAJOR(le32_to_cpu(event->status)), NEC_FW_MINOR(le32_to_cpu(event->status))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif } static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) @@ -1649,12 +1768,19 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_command *cmd; u32 cmd_type; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif + if (slot_id >= MAX_HC_SLOTS) { xhci_warn(xhci, "Invalid slot_id %u\n", slot_id); return; } cmd_dma = le64_to_cpu(event->cmd_trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif cmd_trb = xhci->cmd_ring->dequeue; trace_xhci_handle_command(xhci->cmd_ring, &cmd_trb->generic); @@ -1676,6 +1802,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, cancel_delayed_work(&xhci->cmd_timer); cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(struct xhci_event_cmd)); +#endif /* If CMD ring stopped we own the trbs between enqueue and dequeue */ if (cmd_comp_code == COMP_COMMAND_RING_STOPPED) { @@ -1705,6 +1834,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, } cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif switch (cmd_type) { case TRB_ENABLE_SLOT: xhci_handle_cmd_enable_slot(xhci, slot_id, cmd, cmd_comp_code); @@ -1724,6 +1856,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_STOP_RING: WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif if (!cmd->completion) xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, cmd_comp_code); @@ -1731,6 +1866,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_SET_DEQ: WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_CMD_NOOP: @@ -1741,6 +1879,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, case TRB_RESET_EP: WARN_ON(slot_id != TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3]))); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); break; case TRB_RESET_DEV: @@ -1749,6 +1890,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, */ slot_id = TRB_TO_SLOT_ID( le32_to_cpu(cmd_trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(cmd_trb, sizeof(union xhci_trb)); +#endif xhci_handle_cmd_reset_dev(xhci, slot_id); break; case TRB_NEC_GET_FW: @@ -1790,6 +1934,9 @@ static void handle_device_notification(struct xhci_hcd *xhci, struct usb_device *udev; slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif if (!xhci->devs[slot_id]) { xhci_warn(xhci, "Device Notification event for " "unused slot %u\n", slot_id); @@ -1846,12 +1993,19 @@ static void handle_port_status(struct xhci_hcd *xhci, struct xhci_port *port; /* Port status change events always have a successful completion code */ - if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) + if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif xhci_warn(xhci, "WARN: xHC returned failed port status event\n"); + } port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0])); max_ports = HCS_MAX_PORTS(xhci->hcs_params1); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif if ((port_id <= 0) || (port_id > max_ports)) { xhci_warn(xhci, "Port change event with invalid port ID %d\n", @@ -2107,15 +2261,24 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci, /* TRB completion codes that may require a manual halt cleanup */ if (trb_comp_code == COMP_USB_TRANSACTION_ERROR || trb_comp_code == COMP_BABBLE_DETECTED_ERROR || - trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) + trb_comp_code == COMP_SPLIT_TRANSACTION_ERROR) { /* The 0.95 spec says a babbling control endpoint * is not halted. The 0.96 spec says it is. Some HW * claims to be 0.95 compliant, but it halts the control * endpoint anyway. Check if a babble halted the * endpoint. */ - if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) + if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_HALTED) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif return 1; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif + } return 0; } @@ -2229,6 +2392,9 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring, for (sum = 0; trb != stop_trb; next_trb(xhci, ring, &seg, &trb)) { if (!trb_is_noop(trb) && !trb_is_link(trb)) sum += TRB_LEN(le32_to_cpu(trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif } return sum; } @@ -2246,10 +2412,16 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, u32 trb_type; trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index); trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); requested = td->urb->transfer_buffer_length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif switch (trb_comp_code) { case COMP_SUCCESS: @@ -2351,6 +2523,10 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, requested = frame->length; remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif short_framestatus = td->urb->transfer_flags & URB_SHORT_NOT_OK ? -EREMOTEIO : 0; @@ -2452,9 +2628,16 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, u32 remaining, requested, ep_trb_len; slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)); ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif requested = td->urb->transfer_buffer_length; switch (trb_comp_code) { @@ -2486,8 +2669,15 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_virt_ep *ep, case COMP_USB_TRANSACTION_ERROR: if (xhci->quirks & XHCI_NO_SOFT_RETRY || (ep_ring->err_count++ > MAX_SOFT_RETRY) || - le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) + le32_to_cpu(slot_ctx->tt_info) & TT_SLOT) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif break; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif td->status = 0; @@ -2542,6 +2732,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1; trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len)); ep_trb_dma = le64_to_cpu(event->buffer); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif ep = xhci_get_virt_ep(xhci, slot_id, ep_index); if (!ep) { @@ -2556,8 +2749,14 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_err(xhci, "ERROR Transfer event for disabled endpoint slot %u ep %u\n", slot_id, ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif goto err_out; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* Some transfer events don't always point to a trb, see xhci 4.17.4 */ if (!ep_ring) { @@ -2592,8 +2791,16 @@ static int handle_tx_event(struct xhci_hcd *xhci, * transfer type */ case COMP_SUCCESS: - if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) + if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) == 0) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif break; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif if (xhci->quirks & XHCI_TRUST_TX_LENGTH || ep_ring->last_td_was_short) trb_comp_code = COMP_SHORT_PACKET; @@ -2671,19 +2878,27 @@ static int handle_tx_event(struct xhci_hcd *xhci, * Underrun Event for OUT Isoch endpoint. */ xhci_dbg(xhci, "underrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) + if (!list_empty(&ep_ring->td_list)) { xhci_dbg(xhci, "Underrun Event for slot %d ep %d " "still with TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif + } goto cleanup; case COMP_RING_OVERRUN: xhci_dbg(xhci, "overrun event on endpoint\n"); - if (!list_empty(&ep_ring->td_list)) + if (!list_empty(&ep_ring->td_list)) { xhci_dbg(xhci, "Overrun Event for slot %d ep %d " "still with TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif + } goto cleanup; case COMP_MISSED_SERVICE_ERROR: /* @@ -2741,6 +2956,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n", TRB_TO_SLOT_ID(le32_to_cpu(event->flags)), ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif } if (ep->skip) { ep->skip = false; @@ -2832,6 +3050,9 @@ static int handle_tx_event(struct xhci_hcd *xhci, trace_xhci_handle_transfer(ep_ring, (struct xhci_generic_trb *) ep_trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_trb, sizeof(union xhci_trb)); +#endif /* * No-op TRB could trigger interrupts in a case where @@ -2916,9 +3137,16 @@ static int xhci_handle_event(struct xhci_hcd *xhci) event = xhci->event_ring->dequeue; /* Does the HC or OS own the TRB? */ if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) != - xhci->event_ring->cycle_state) + xhci->event_ring->cycle_state) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif return 0; - + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif trace_xhci_handle_event(xhci->event_ring, &event->generic); /* @@ -2927,6 +3155,9 @@ static int xhci_handle_event(struct xhci_hcd *xhci) */ rmb(); trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(event, sizeof(union xhci_trb)); +#endif /* FIXME: Handle more event types. */ switch (trb_type) { @@ -2999,6 +3230,9 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci, /* Update HC event ring dequeue pointer */ temp_64 &= ERST_PTR_MASK; temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(deq, sizeof(union xhci_trb)); +#endif } /* Clear the event handler busy flag (RW1C) */ @@ -3115,8 +3349,14 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, /* make sure TRB is fully written before giving it to the controller */ wmb(); trb->field[3] = cpu_to_le32(field4); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif trace_xhci_queue_trb(ring, trb); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(trb, sizeof(union xhci_trb)); +#endif inc_enq(xhci, ring, more_trbs_coming); } @@ -3191,10 +3431,16 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring, else ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ep_ring->enqueue->link, sizeof(union xhci_trb)); +#endif wmb(); ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(&ep_ring->enqueue->link, sizeof(union xhci_trb)); +#endif /* Toggle the cycle bit after the last ring segment. */ if (link_trb_toggles_cycle(ep_ring->enqueue)) ep_ring->cycle_state ^= 1; @@ -3231,6 +3477,9 @@ static int prepare_transfer(struct xhci_hcd *xhci, struct xhci_td *td; struct xhci_ring *ep_ring; struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 ep_state; +#endif ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index, stream_id); @@ -3240,7 +3489,13 @@ static int prepare_transfer(struct xhci_hcd *xhci, return -EINVAL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + ep_state = GET_EP_CTX_STATE(ep_ctx); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + ret = prepare_ring(xhci, ep_ring, ep_state, +#else ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), +#endif num_trbs, mem_flags); if (ret) return ret; @@ -3337,6 +3592,9 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id, start_trb->field[3] |= cpu_to_le32(start_cycle); else start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(start_trb, sizeof(union xhci_trb)); +#endif xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id); } @@ -3347,6 +3605,9 @@ static void check_interval(struct xhci_hcd *xhci, struct urb *urb, int ep_interval; xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif ep_interval = urb->interval; /* Convert to microframes */ @@ -3484,6 +3745,9 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(seg->bounce_buf, new_buff_len); +#endif } else { seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_FROM_DEVICE); @@ -3494,6 +3758,9 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, xhci_warn(xhci, "Failed mapping bounce buffer, not aligning\n"); return 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(seg->bounce_dma, max_pkt); +#endif *trb_buff_len = new_buff_len; seg->bounce_len = new_buff_len; seg->bounce_offs = enqd_len; @@ -3539,6 +3806,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, addr = (u64) urb->transfer_dma; block_len = full_len; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(addr, block_len); +#endif ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, urb->stream_id, num_trbs, urb, 0, mem_flags); @@ -3608,6 +3878,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, trb_buff_len); le64_to_cpus(&send_addr); field |= TRB_IDT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, trb_buff_len); + cdns_flush_dcache(send_addr, trb_buff_len); +#endif } } @@ -3622,6 +3896,9 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(send_addr, trb_buff_len); +#endif queue_trb(xhci, ring, more_trbs_coming | need_zero_pkt, lower_32_bits(send_addr), @@ -3731,6 +4008,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field |= TRB_TX_TYPE(TRB_DATA_IN); else field |= TRB_TX_TYPE(TRB_DATA_OUT); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); +#endif } } @@ -3740,6 +4020,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, TRB_LEN(8) | TRB_INTR_TARGET(0), /* Immediate data in pointer */ field); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); +#endif /* If there's data, queue data TRBs */ /* Only set interrupt on short packet for IN endpoints */ @@ -3757,6 +4040,12 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, urb->transfer_buffer_length); le64_to_cpus(&addr); field |= TRB_IDT; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(urb->transfer_buffer, + urb->transfer_buffer_length); + cdns_flush_dcache(addr, + urb->transfer_buffer_length); +#endif } else { addr = (u64) urb->transfer_dma; } @@ -3770,6 +4059,10 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, TRB_INTR_TARGET(0); if (setup->bRequestType & USB_DIR_IN) field |= TRB_DIR_IN; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); + cdns_flush_dcache(addr, urb->transfer_buffer_length); +#endif queue_trb(xhci, ep_ring, true, lower_32_bits(addr), upper_32_bits(addr), @@ -3787,6 +4080,9 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, field = 0; else field = TRB_DIR_IN; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(setup, sizeof(struct usb_ctrlrequest)); +#endif queue_trb(xhci, ep_ring, false, 0, 0, @@ -4091,7 +4387,9 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, else length_field |= TRB_TD_SIZE(remainder); first_trb = false; - +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(addr, trb_buff_len); +#endif queue_trb(xhci, ep_ring, more_trbs_coming, lower_32_bits(addr), upper_32_bits(addr), @@ -4166,6 +4464,9 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, int ret; struct xhci_virt_ep *xep; int ist; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + u32 ep_state; +#endif xdev = xhci->devs[slot_id]; xep = &xhci->devs[slot_id]->eps[ep_index]; @@ -4180,8 +4481,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Check the ring to guarantee there is enough room for the whole urb. * Do not insert any td of the urb to the ring if the check failed. */ +#if defined(CONFIG_USB_CDNS3_HOST_FLUSH_DMA) + ep_state = GET_EP_CTX_STATE(ep_ctx); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); + ret = prepare_ring(xhci, ep_ring, ep_state, num_trbs, mem_flags); +#else ret = prepare_ring(xhci, ep_ring, GET_EP_CTX_STATE(ep_ctx), num_trbs, mem_flags); +#endif if (ret) return ret; @@ -4194,9 +4501,15 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags, /* Calculate the start frame and put it in urb->start_frame. */ if (HCC_CFC(xhci->hcc_params) && !list_empty(&ep_ring->td_list)) { if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_RUNNING) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif urb->start_frame = xep->next_frame_id; goto skip_start_over; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } start_frame = readl(&xhci->run_regs->microframe_index); @@ -4293,6 +4606,9 @@ int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd, int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(in_ctx_ptr, sizeof(struct xhci_ep_ctx)); +#endif return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id) @@ -4319,6 +4635,9 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(in_ctx_ptr, sizeof(struct xhci_ep_ctx)); +#endif return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id), @@ -4329,6 +4648,9 @@ int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd, dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(in_ctx_ptr, sizeof(struct xhci_ep_ctx)); +#endif return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr), upper_32_bits(in_ctx_ptr), 0, TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id), diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 27283654ca0804..b1c0f0c21e93a1 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -860,6 +860,11 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci) sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1)); seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(seg->trbs, + sizeof(union xhci_trb) * + TRBS_PER_SEGMENT); +#endif seg = seg->next; } while (seg != ring->deq_seg); @@ -1527,6 +1532,9 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, out_ctx = xhci->devs[slot_id]->out_ctx; ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc); if (hw_max_packet_size != max_packet_size) { xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, @@ -1566,8 +1574,14 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK); ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif ctrl_ctx->add_flags = cpu_to_le32(EP0_FLAG); ctrl_ctx->drop_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif ret = xhci_configure_endpoint(xhci, urb->dev, command, true, false); @@ -1576,6 +1590,9 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id, * functions. */ ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif command_cleanup: kfree(command->completion); kfree(command); @@ -1908,18 +1925,29 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, if ((GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) || le32_to_cpu(ctrl_ctx->drop_flags) & xhci_get_endpoint_flag(&ep->desc)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif /* Do not warn when called after a usb_device_reset */ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", __func__, ep); return 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif ctrl_ctx->drop_flags |= cpu_to_le32(drop_flag); new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); ctrl_ctx->add_flags &= cpu_to_le32(~drop_flag); new_add_flags = le32_to_cpu(ctrl_ctx->add_flags); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_debugfs_remove_endpoint(xhci, xhci->devs[udev->slot_id], ep_index); @@ -1996,20 +2024,32 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, */ if (virt_dev->eps[ep_index].ring && !(le32_to_cpu(ctrl_ctx->drop_flags) & added_ctxs)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_warn(xhci, "Trying to add endpoint 0x%x " "without dropping it.\n", (unsigned int) ep->desc.bEndpointAddress); return -EINVAL; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* If the HCD has already noted the endpoint is enabled, * ignore this request. */ if (le32_to_cpu(ctrl_ctx->add_flags) & added_ctxs) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_warn(xhci, "xHCI %s called with enabled ep %p\n", __func__, ep); return 0; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* * Configuration and alternate setting changes must be done in @@ -2032,12 +2072,18 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, * drop flags alone. */ new_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Store the usb_device pointer for later use */ ep->hcpriv = udev; ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); trace_xhci_add_endpoint(ep_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, @@ -2069,16 +2115,25 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir */ ctrl_ctx->drop_flags = 0; ctrl_ctx->add_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); /* Endpoint 0 is always valid */ slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif for (i = 1; i < 31; i++) { ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i); ep_ctx->ep_info = 0; ep_ctx->ep_info2 = 0; ep_ctx->deq = 0; ep_ctx->tx_info = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ep_ctx, sizeof(*ep_ctx)); +#endif } } @@ -2194,6 +2249,9 @@ static u32 xhci_count_num_new_endpoints(struct xhci_hcd *xhci, */ valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Use hweight32 to count the number of ones in the add flags, or * number of endpoints added. Don't count endpoints that are changed @@ -2211,6 +2269,9 @@ static unsigned int xhci_count_num_dropped_endpoints(struct xhci_hcd *xhci, valid_add_flags = le32_to_cpu(ctrl_ctx->add_flags) >> 2; valid_drop_flags = le32_to_cpu(ctrl_ctx->drop_flags) >> 2; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif return hweight32(valid_drop_flags) - hweight32(valid_add_flags & valid_drop_flags); @@ -2790,8 +2851,17 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, } for (i = 0; i < 31; i++) { - if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) + if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif continue; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Make a copy of the BW info in case we need to revert this */ memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info, @@ -2799,25 +2869,45 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, /* Drop the endpoint from the interval table if the endpoint is * being dropped or changed. */ - if (EP_IS_DROPPED(ctrl_ctx, i)) + if (EP_IS_DROPPED(ctrl_ctx, i)){ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_drop_ep_from_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, virt_dev->udev, &virt_dev->eps[i], virt_dev->tt_info); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif } /* Overwrite the information stored in the endpoints' bw_info */ xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev); for (i = 0; i < 31; i++) { /* Add any changed or added endpoints to the interval table */ - if (EP_IS_ADDED(ctrl_ctx, i)) + if (EP_IS_ADDED(ctrl_ctx, i)){ +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_add_ep_to_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, virt_dev->udev, &virt_dev->eps[i], virt_dev->tt_info); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif } if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) { @@ -2830,13 +2920,26 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, /* We don't have enough bandwidth for this, revert the stored info. */ for (i = 0; i < 31; i++) { - if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) + if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif continue; + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Drop the new copies of any added or changed endpoints from * the interval table. */ if (EP_IS_ADDED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_drop_ep_from_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, @@ -2844,18 +2947,36 @@ static int xhci_reserve_bandwidth(struct xhci_hcd *xhci, &virt_dev->eps[i], virt_dev->tt_info); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif /* Revert the endpoint back to its old information */ memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i], sizeof(ep_bw_info[i])); /* Add any changed or dropped endpoints back into the table */ - if (EP_IS_DROPPED(ctrl_ctx, i)) + if (EP_IS_DROPPED(ctrl_ctx, i)) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif xhci_add_ep_to_interval_table(xhci, &virt_dev->eps[i].bw_info, virt_dev->bw_table, virt_dev->udev, &virt_dev->eps[i], virt_dev->tt_info); + } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, + sizeof(struct xhci_input_control_ctx)); +#endif } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif return -ENOMEM; } @@ -2915,6 +3036,10 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, trace_xhci_configure_endpoint_ctrl_ctx(ctrl_ctx); trace_xhci_configure_endpoint(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, command, @@ -3021,13 +3146,22 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG); ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Don't issue the command if there's no endpoints to update. */ if (ctrl_ctx->add_flags == cpu_to_le32(SLOT_FLAG) && ctrl_ctx->drop_flags == 0) { ret = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif goto command_cleanup; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); for (i = 31; i >= 1; i--) { @@ -3035,10 +3169,19 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) if ((virt_dev->eps[i-1].ring && !(ctrl_ctx->drop_flags & le32)) || (ctrl_ctx->add_flags & le32) || i == 1) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK); slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i)); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif break; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif } ret = xhci_configure_endpoint(xhci, udev, command, @@ -3051,9 +3194,16 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) for (i = 1; i < 31; i++) { if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) && !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif xhci_free_endpoint_ring(xhci, virt_dev, i); xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif } xhci_zero_in_ctx(xhci, virt_dev); /* @@ -3117,6 +3267,9 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, ctrl_ctx->drop_flags = cpu_to_le32(drop_flags); xhci_slot_copy(xhci, in_ctx, out_ctx); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif } static void xhci_endpoint_disable(struct usb_hcd *hcd, @@ -3802,10 +3955,17 @@ static int xhci_discover_or_reset_device(struct usb_hcd *hcd, /* If device is not setup, there is no point in resetting it */ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == - SLOT_STATE_DISABLED) + SLOT_STATE_DISABLED) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif return 0; + } trace_xhci_discover_or_reset_device(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); /* Allocate the command structure that holds the struct completion. @@ -3942,6 +4102,9 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) virt_dev = xhci->devs[udev->slot_id]; slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); trace_xhci_free_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif /* Stop any wayward timer functions (which may grab the lock) */ for (i = 0; i < 31; i++) { @@ -4077,6 +4240,9 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) vdev = xhci->devs[slot_id]; slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx); trace_xhci_alloc_dev(slot_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif udev->slot_id = slot_id; @@ -4153,9 +4319,16 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, if (setup == SETUP_CONTEXT_ONLY) { if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == SLOT_STATE_DEFAULT) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg(xhci, "Slot already in default state\n"); goto out; } +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + else + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif } command = xhci_alloc_command(xhci, true, GFP_KERNEL); @@ -4179,18 +4352,35 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, * virt_device realloaction after a resume with an xHCI power loss, * then set up the slot context. */ - if (!slot_ctx->dev_info) + if (!slot_ctx->dev_info) { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_setup_addressable_virt_dev(xhci, udev); /* Otherwise, update the control endpoint ring enqueue pointer. */ - else + } else { +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev); + } ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG); ctrl_ctx->drop_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif trace_xhci_address_ctx(xhci, virt_dev->in_ctx, le32_to_cpu(slot_ctx->dev_info) >> 27); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif trace_xhci_address_ctrl_ctx(ctrl_ctx); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif + spin_lock_irqsave(&xhci->lock, flags); trace_xhci_setup_device(virt_dev); ret = xhci_queue_address_device(xhci, command, virt_dev->in_ctx->dma, @@ -4261,6 +4451,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, &xhci->dcbaa->dev_context_ptrs[udev->slot_id], (unsigned long long) le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id])); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_flush_dcache(xhci->dcbaa->dma, sizeof(*xhci->dcbaa)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_address, "Output Context DMA address = %#08llx", (unsigned long long)virt_dev->out_ctx->dma); @@ -4272,9 +4465,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, */ trace_xhci_address_ctx(xhci, virt_dev->out_ctx, le32_to_cpu(slot_ctx->dev_info) >> 27); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif /* Zero the input context control for later use */ ctrl_ctx->add_flags = 0; ctrl_ctx->drop_flags = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); udev->devaddr = (u8)(le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); @@ -4282,6 +4481,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, "Internal device address = %d", le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK); out: +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif mutex_unlock(&xhci->mutex); if (command) { kfree(command->completion); @@ -4357,10 +4559,16 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, spin_unlock_irqrestore(&xhci->lock, flags); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, command->in_ctx); slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT)); slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency); slot_ctx->dev_state = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif xhci_dbg_trace(xhci, trace_xhci_dbg_context_change, "Set up evaluate context for LPM MEL change."); @@ -5118,6 +5326,9 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx); ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG); +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(ctrl_ctx, sizeof(*ctrl_ctx)); +#endif slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx); slot_ctx->dev_info |= cpu_to_le32(DEV_HUB); /* @@ -5154,6 +5365,9 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev, (unsigned int) xhci->hci_version); } slot_ctx->dev_state = 0; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(slot_ctx, sizeof(*slot_ctx)); +#endif spin_unlock_irqrestore(&xhci->lock, flags); xhci_dbg(xhci, "Set up %s for hub device.\n", diff --git a/include/linux/usb.h b/include/linux/usb.h index eaae24217e8a2e..6cbb23aff7adb2 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1608,6 +1608,19 @@ struct urb { /* (in) ISO ONLY */ }; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA +#include +static inline void cdns_flush_dcache(unsigned long start, unsigned long len) +{ + starfive_flush_dcache(_ALIGN_DOWN(start, 64), len + (start & 63)); +} + +static inline void cdns_virt_flush_dcache(void *virt_start, unsigned long len) +{ + if (virt_start) + cdns_flush_dcache(dw_virt_to_phys(virt_start), len); +} +#endif /* ----------------------------------------------------------------------- */ /** @@ -1640,6 +1653,9 @@ static inline void usb_fill_control_urb(struct urb *urb, urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(transfer_buffer, buffer_length); +#endif } /** @@ -1669,6 +1685,9 @@ static inline void usb_fill_bulk_urb(struct urb *urb, urb->transfer_buffer_length = buffer_length; urb->complete = complete_fn; urb->context = context; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(transfer_buffer, buffer_length); +#endif } /** @@ -1712,6 +1731,9 @@ static inline void usb_fill_int_urb(struct urb *urb, urb->complete = complete_fn; urb->context = context; +#ifdef CONFIG_USB_CDNS3_HOST_FLUSH_DMA + cdns_virt_flush_dcache(transfer_buffer, buffer_length); +#endif if (dev->speed == USB_SPEED_HIGH || dev->speed >= USB_SPEED_SUPER) { /* make sure interval is within allowed range */ interval = clamp(interval, 1, 16); From 84ee4f56b3d5e7180ca6c66f7d76593b0c38753d Mon Sep 17 00:00:00 2001 From: Tom Date: Fri, 8 Jan 2021 02:59:40 +0800 Subject: [PATCH 45/62] riscv/starfive: Add VIC7100 support --- arch/riscv/Kconfig.socs | 50 ++++++++++++++++++++++++++++++++++ include/soc/starfive/vic7100.h | 36 ++++++++++++++++++++++++ 2 files changed, 86 insertions(+) create mode 100644 include/soc/starfive/vic7100.h diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs index ed963761fbd2f3..81cd35b02d555f 100644 --- a/arch/riscv/Kconfig.socs +++ b/arch/riscv/Kconfig.socs @@ -18,6 +18,56 @@ config SOC_SIFIVE help This enables support for SiFive SoC platform hardware. +config SOC_STARFIVE_VIC7100 + bool "StarFive VIC7100 SoC" + select SOC_SIFIVE + select OF_RESERVED_MEM + select SIFIVE_L2 + select SIFIVE_L2_FLUSH + select DW_AXI_DMAC_STARFIVE + select GPIO_STARFIVE_VIC + select HW_RANDOM_STARFIVE_VIC + help + This enables support for StarFive VIC7100 SoC Platform Hardware. + +menu "StarFive VIC7100 SoC Debug Option" + depends on SOC_STARFIVE_VIC7100 + +config FPGA_GMAC_FLUSH_DDR + bool "VIC7100 SOC GMAC description and packet buffer flush" + depends on SOC_STARFIVE_VIC7100 + depends on STMMAC_ETH + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 GMAC description and packet buffer flush + +config MMC_DW_FLUSH_DDR + bool "VIC7100 SOC DW MMC buffer flush" + depends on SOC_STARFIVE_VIC7100 + depends on MMC_DW + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 DW MMC description and data buffer flush + +config USB_CDNS3_HOST_FLUSH_DMA + bool "Cadence USB3 host controller flush dma memery" + depends on USB + depends on USB_CDNS3 + depends on SOC_STARFIVE_VIC7100 + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 DW USB CDNS3 driver data buffer flush + +config SOC_STARFIVE_VIC7100_I2C_GPIO + bool "VIC7100 SOC I2C GPIO init" + depends on I2C_DESIGNWARE_CORE + depends on SOC_STARFIVE_VIC7100 + default y if SOC_STARFIVE_VIC7100 + help + enable VIC7100 DW I2C GPIO init in Synopsys DesignWare master driver + +endmenu + config SOC_VIRT bool "QEMU Virt Machine" select CLINT_TIMER if RISCV_M_MODE diff --git a/include/soc/starfive/vic7100.h b/include/soc/starfive/vic7100.h new file mode 100644 index 00000000000000..a850f4cd11bf75 --- /dev/null +++ b/include/soc/starfive/vic7100.h @@ -0,0 +1,36 @@ +#ifndef STARFIVE_VIC7100_H +#define STARFIVE_VIC7100_H +#include +#include + +/*cache.c*/ +#define starfive_flush_dcache(start, len) \ + sifive_l2_flush64_range(start, len) + +/*dma*/ +#define CONFIG_DW_DEBUG + +#define DMA_PRINTK(fmt,...) \ + printk("[DW_DMA] %s():%d \n" fmt, __func__, __LINE__, ##__VA_ARGS__) + +#ifdef CONFIG_DW_DEBUG +#define DMA_DEBUG(fmt,...) \ + printk("[DW_DMA_DEBUG] %s():%d \n" fmt, __func__, __LINE__, ##__VA_ARGS__) +#else +#define DMA_BEBUG(fmt,...) +#endif + +#define _dw_virt_to_phys(vaddr) (pfn_to_phys(virt_to_pfn(vaddr))) +#define _dw_phys_to_virt(paddr) (page_to_virt(phys_to_page(paddr))) + +void *dw_phys_to_virt(u64 phys); +u64 dw_virt_to_phys(void *vaddr); + +int dw_dma_async_do_memcpy(void *src, void *dst, size_t size); +int dw_dma_memcpy_raw(dma_addr_t src_dma, dma_addr_t dst_dma, size_t size); +int dw_dma_memcpy(void *src, void *dst, size_t size); + +int dw_dma_mem2mem_arry(void); +int dw_dma_mem2mem_test(void); + +#endif /*STARFIVE_VIC7100_H*/ \ No newline at end of file From b4e9da54fde740bddcaef0cf54c131ecb1448463 Mon Sep 17 00:00:00 2001 From: "jack.zhu" Date: Mon, 11 Jan 2021 04:06:22 +0800 Subject: [PATCH 46/62] drivers/video/fbdev and drivers/media/platform: starfive drivers added 1, add ov5640&sc2235 drivers, update stf_isp 2, add MIPI/CSI/DSI drivers for VIC7100 --- drivers/media/platform/Kconfig | 1 + drivers/media/platform/Makefile | 2 + drivers/media/platform/starfive/Kconfig | 32 + drivers/media/platform/starfive/Makefile | 10 + drivers/media/platform/starfive/imx219_mipi.c | 425 +++ drivers/media/platform/starfive/ov5640_dvp.c | 456 ++++ drivers/media/platform/starfive/sc2235.c | 424 +++ drivers/media/platform/starfive/stf_csi.c | 210 ++ drivers/media/platform/starfive/stf_csi.h | 135 + drivers/media/platform/starfive/stf_event.c | 39 + drivers/media/platform/starfive/stf_isp.c | 441 ++++ drivers/media/platform/starfive/stf_isp.h | 16 + drivers/media/platform/starfive/stf_vin.c | 935 +++++++ drivers/video/fbdev/Kconfig | 10 + drivers/video/fbdev/Makefile | 1 + drivers/video/fbdev/starfive/Kconfig | 35 + drivers/video/fbdev/starfive/Makefile | 11 + drivers/video/fbdev/starfive/adv7513.c | 268 ++ drivers/video/fbdev/starfive/adv7513.h | 22 + drivers/video/fbdev/starfive/seeed5inch.c | 242 ++ .../video/fbdev/starfive/starfive_comm_regs.h | 95 + .../fbdev/starfive/starfive_display_dev.c | 135 + .../fbdev/starfive/starfive_display_dev.h | 273 ++ .../video/fbdev/starfive/starfive_displayer.c | 912 +++++++ drivers/video/fbdev/starfive/starfive_fb.c | 1245 +++++++++ drivers/video/fbdev/starfive/starfive_fb.h | 138 + drivers/video/fbdev/starfive/starfive_lcdc.c | 364 +++ drivers/video/fbdev/starfive/starfive_lcdc.h | 152 ++ .../video/fbdev/starfive/starfive_mipi_tx.c | 665 +++++ .../video/fbdev/starfive/starfive_mipi_tx.h | 203 ++ drivers/video/fbdev/starfive/starfive_vpp.c | 588 +++++ drivers/video/fbdev/starfive/starfive_vpp.h | 194 ++ drivers/video/fbdev/starfive/tda998x.c | 2279 +++++++++++++++++ include/dt-bindings/starfive_fb.h | 47 + include/video/stf-vin.h | 307 +++ 35 files changed, 11312 insertions(+) mode change 100644 => 100755 drivers/media/platform/Kconfig create mode 100644 drivers/media/platform/starfive/Kconfig create mode 100644 drivers/media/platform/starfive/Makefile create mode 100644 drivers/media/platform/starfive/imx219_mipi.c create mode 100755 drivers/media/platform/starfive/ov5640_dvp.c create mode 100755 drivers/media/platform/starfive/sc2235.c create mode 100644 drivers/media/platform/starfive/stf_csi.c create mode 100644 drivers/media/platform/starfive/stf_csi.h create mode 100644 drivers/media/platform/starfive/stf_event.c create mode 100644 drivers/media/platform/starfive/stf_isp.c create mode 100644 drivers/media/platform/starfive/stf_isp.h create mode 100644 drivers/media/platform/starfive/stf_vin.c mode change 100644 => 100755 drivers/video/fbdev/Kconfig mode change 100644 => 100755 drivers/video/fbdev/Makefile create mode 100644 drivers/video/fbdev/starfive/Kconfig create mode 100755 drivers/video/fbdev/starfive/Makefile create mode 100644 drivers/video/fbdev/starfive/adv7513.c create mode 100644 drivers/video/fbdev/starfive/adv7513.h create mode 100644 drivers/video/fbdev/starfive/seeed5inch.c create mode 100644 drivers/video/fbdev/starfive/starfive_comm_regs.h create mode 100644 drivers/video/fbdev/starfive/starfive_display_dev.c create mode 100644 drivers/video/fbdev/starfive/starfive_display_dev.h create mode 100644 drivers/video/fbdev/starfive/starfive_displayer.c create mode 100644 drivers/video/fbdev/starfive/starfive_fb.c create mode 100644 drivers/video/fbdev/starfive/starfive_fb.h create mode 100644 drivers/video/fbdev/starfive/starfive_lcdc.c create mode 100644 drivers/video/fbdev/starfive/starfive_lcdc.h create mode 100644 drivers/video/fbdev/starfive/starfive_mipi_tx.c create mode 100644 drivers/video/fbdev/starfive/starfive_mipi_tx.h create mode 100644 drivers/video/fbdev/starfive/starfive_vpp.c create mode 100644 drivers/video/fbdev/starfive/starfive_vpp.h create mode 100755 drivers/video/fbdev/starfive/tda998x.c create mode 100755 include/dt-bindings/starfive_fb.h create mode 100755 include/video/stf-vin.h diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig old mode 100644 new mode 100755 index 157c924686e4b6..3cd87484d8abd1 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig @@ -171,6 +171,7 @@ source "drivers/media/platform/xilinx/Kconfig" source "drivers/media/platform/rcar-vin/Kconfig" source "drivers/media/platform/atmel/Kconfig" source "drivers/media/platform/sunxi/Kconfig" +source "drivers/media/platform/starfive/Kconfig" config VIDEO_TI_CAL tristate "TI CAL (Camera Adaptation Layer) driver" diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile index eedc14aafb32c2..23141e53e53ef8 100644 --- a/drivers/media/platform/Makefile +++ b/drivers/media/platform/Makefile @@ -43,6 +43,8 @@ obj-$(CONFIG_VIDEO_STI_DELTA) += sti/delta/ obj-y += stm32/ +obj-y += starfive/ + obj-y += davinci/ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o diff --git a/drivers/media/platform/starfive/Kconfig b/drivers/media/platform/starfive/Kconfig new file mode 100644 index 00000000000000..a79d7e1802d85d --- /dev/null +++ b/drivers/media/platform/starfive/Kconfig @@ -0,0 +1,32 @@ +# +# VIN sensor driver configuration +# +config VIDEO_STARFIVE_VIN + bool "starfive VIC video input support" + depends on OF + help + To compile this driver as a module, choose M here: the module + will be called stf-vin. + +choice + prompt "Image Sensor for VIC board" + default VIDEO_STARFIVE_VIN_SENSOR_IMX219 + depends on VIDEO_STARFIVE_VIN + optional + +config VIDEO_STARFIVE_VIN_SENSOR_OV5640 + bool "OmniVision OV5640 5mp MIPI Camera Module" + help + Say Y here if you want to have support for VIN sensor OV5640 + +config VIDEO_STARFIVE_VIN_SENSOR_IMX219 + bool "Sony IMX219 CMOS Image Sensor" + help + Say Y here if you want to have support for VIN sensor IMX219 + +config VIDEO_STARFIVE_VIN_SENSOR_SC2235 + bool "SmartSens Technology SC2235 CMOS Image Sensor" + help + Say Y here if you want to have support for VIN sensor SC2235 + +endchoice diff --git a/drivers/media/platform/starfive/Makefile b/drivers/media/platform/starfive/Makefile new file mode 100644 index 00000000000000..4585213935e653 --- /dev/null +++ b/drivers/media/platform/starfive/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for RTC class/drivers. +# + +obj-$(CONFIG_VIDEO_STARFIVE_VIN_SENSOR_OV5640) += ov5640_dvp.o +obj-$(CONFIG_VIDEO_STARFIVE_VIN_SENSOR_SC2235) += sc2235.o +obj-$(CONFIG_VIDEO_STARFIVE_VIN_SENSOR_IMX219) += imx219_mipi.o +obj-$(CONFIG_VIDEO_STARFIVE_VIN) += video_stf_vin.o +video_stf_vin-objs += stf_vin.o stf_event.o stf_isp.o stf_csi.o diff --git a/drivers/media/platform/starfive/imx219_mipi.c b/drivers/media/platform/starfive/imx219_mipi.c new file mode 100644 index 00000000000000..2bbc2abefbd40d --- /dev/null +++ b/drivers/media/platform/starfive/imx219_mipi.c @@ -0,0 +1,425 @@ +/* + * Copyright (C) 2011-2013 StarFive Technology Co., Ltd. All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include