blob: 3429b483d9f85cd2495e01c8c0a11d05bc22e16c [file] [log] [blame]
Johannes Weinere5083a62009-03-04 16:21:31 +01001/*
2 * xtensa mmu stuff
3 *
4 * Extracted from init.c
5 */
Max Filippov65559102014-02-04 02:17:09 +04006#include <linux/bootmem.h>
Johannes Weinere5083a62009-03-04 16:21:31 +01007#include <linux/percpu.h>
8#include <linux/init.h>
9#include <linux/string.h>
10#include <linux/slab.h>
11#include <linux/cache.h>
12
13#include <asm/tlb.h>
14#include <asm/tlbflush.h>
15#include <asm/mmu_context.h>
16#include <asm/page.h>
Baruch Siach6cb97112013-12-29 11:03:30 +020017#include <asm/initialize_mmu.h>
18#include <asm/io.h>
Johannes Weinere5083a62009-03-04 16:21:31 +010019
Max Filippov65559102014-02-04 02:17:09 +040020#if defined(CONFIG_HIGHMEM)
21static void * __init init_pmd(unsigned long vaddr)
22{
23 pgd_t *pgd = pgd_offset_k(vaddr);
24 pmd_t *pmd = pmd_offset(pgd, vaddr);
25
26 if (pmd_none(*pmd)) {
27 unsigned i;
28 pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
29
30 for (i = 0; i < 1024; i++)
31 pte_clear(NULL, 0, pte + i);
32
33 set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
34 BUG_ON(pte != pte_offset_kernel(pmd, 0));
35 pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
36 __func__, vaddr, pmd, pte);
37 return pte;
38 } else {
39 return pte_offset_kernel(pmd, 0);
40 }
41}
42
43static void __init fixedrange_init(void)
44{
45 BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
46 init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
47}
48#endif
49
Johannes Weinere5083a62009-03-04 16:21:31 +010050void __init paging_init(void)
51{
52 memset(swapper_pg_dir, 0, PAGE_SIZE);
Max Filippov65559102014-02-04 02:17:09 +040053#ifdef CONFIG_HIGHMEM
54 fixedrange_init();
55 pkmap_page_table = init_pmd(PKMAP_BASE);
56 kmap_init();
57#endif
Johannes Weinere5083a62009-03-04 16:21:31 +010058}
59
60/*
61 * Flush the mmu and reset associated register to default values.
62 */
Max Filippovf6151362013-10-17 02:42:26 +040063void init_mmu(void)
Johannes Weinere5083a62009-03-04 16:21:31 +010064{
Max Filippove85e3352012-12-03 15:01:43 +040065#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
66 /*
67 * Writing zeros to the instruction and data TLBCFG special
68 * registers ensure that valid values exist in the register.
69 *
70 * For existing PGSZID<w> fields, zero selects the first element
71 * of the page-size array. For nonexistent PGSZID<w> fields,
72 * zero is the best value to write. Also, when changing PGSZID<w>
Johannes Weinere5083a62009-03-04 16:21:31 +010073 * fields, the corresponding TLB must be flushed.
74 */
75 set_itlbcfg_register(0);
76 set_dtlbcfg_register(0);
Max Filippove85e3352012-12-03 15:01:43 +040077#endif
Max Filippov9848e492014-01-16 03:38:58 +040078#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
Baruch Siach6cb97112013-12-29 11:03:30 +020079 /*
80 * Update the IO area mapping in case xtensa_kio_paddr has changed
81 */
82 write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
83 XCHAL_KIO_CACHED_VADDR + 6);
84 write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
85 XCHAL_KIO_CACHED_VADDR + 6);
86 write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
87 XCHAL_KIO_BYPASS_VADDR + 6);
88 write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
89 XCHAL_KIO_BYPASS_VADDR + 6);
90#endif
91
Max Filippovf6151362013-10-17 02:42:26 +040092 local_flush_tlb_all();
Johannes Weinere5083a62009-03-04 16:21:31 +010093
94 /* Set rasid register to a known value. */
95
Max Filippovec747b22012-12-11 01:26:24 +040096 set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
Johannes Weinere5083a62009-03-04 16:21:31 +010097
98 /* Set PTEVADDR special register to the start of the page
99 * table, which is in kernel mappable space (ie. not
100 * statically mapped). This register's value is undefined on
101 * reset.
102 */
103 set_ptevaddr_register(PGTABLE_START);
104}