Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 1 | #ifndef __ASM_SH_PGTABLE_64_H |
| 2 | #define __ASM_SH_PGTABLE_64_H |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 3 | |
| 4 | /* |
Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 5 | * include/asm-sh/pgtable_64.h |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 6 | * |
Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 7 | * This file contains the functions and defines necessary to modify and use |
| 8 | * the SuperH page table tree. |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 9 | * |
| 10 | * Copyright (C) 2000, 2001 Paolo Alberelli |
| 11 | * Copyright (C) 2003, 2004 Paul Mundt |
| 12 | * Copyright (C) 2003, 2004 Richard Curnow |
| 13 | * |
Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 14 | * This file is subject to the terms and conditions of the GNU General Public |
| 15 | * License. See the file "COPYING" in the main directory of this archive |
| 16 | * for more details. |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 17 | */ |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 18 | #include <linux/threads.h> |
| 19 | #include <asm/processor.h> |
| 20 | #include <asm/page.h> |
| 21 | |
| 22 | /* |
| 23 | * Error outputs. |
| 24 | */ |
| 25 | #define pte_ERROR(e) \ |
| 26 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) |
| 27 | #define pgd_ERROR(e) \ |
| 28 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
| 29 | |
| 30 | /* |
| 31 | * Table setting routines. Used within arch/mm only. |
| 32 | */ |
| 33 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) |
| 34 | |
| 35 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) |
| 36 | { |
| 37 | unsigned long long x = ((unsigned long long) pteval.pte_low); |
| 38 | unsigned long long *xp = (unsigned long long *) pteptr; |
| 39 | /* |
| 40 | * Sign-extend based on NPHYS. |
| 41 | */ |
| 42 | *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; |
| 43 | } |
| 44 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) |
| 45 | |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 46 | /* |
| 47 | * PGD defines. Top level. |
| 48 | */ |
| 49 | |
| 50 | /* To find an entry in a generic PGD. */ |
| 51 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
| 52 | #define __pgd_offset(address) pgd_index(address) |
| 53 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) |
| 54 | |
| 55 | /* To find an entry in a kernel PGD. */ |
| 56 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
| 57 | |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 58 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) |
| 59 | #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) |
| 60 | |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 61 | /* |
| 62 | * PMD level access routines. Same notes as above. |
| 63 | */ |
| 64 | #define _PMD_EMPTY 0x0 |
| 65 | /* Either the PMD is empty or present, it's not paged out */ |
| 66 | #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) |
| 67 | #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) |
| 68 | #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) |
| 69 | #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
| 70 | |
| 71 | #define pmd_page_vaddr(pmd_entry) \ |
| 72 | ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) |
| 73 | |
| 74 | #define pmd_page(pmd) \ |
| 75 | (virt_to_page(pmd_val(pmd))) |
| 76 | |
| 77 | /* PMD to PTE dereferencing */ |
| 78 | #define pte_index(address) \ |
| 79 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
| 80 | |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 81 | #define __pte_offset(address) pte_index(address) |
| 82 | |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 83 | #define pte_offset_kernel(dir, addr) \ |
| 84 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) |
| 85 | |
| 86 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 87 | #define pte_unmap(pte) do { } while (0) |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 88 | |
| 89 | #ifndef __ASSEMBLY__ |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 90 | /* |
| 91 | * PTEL coherent flags. |
| 92 | * See Chapter 17 ST50 CPU Core Volume 1, Architecture. |
| 93 | */ |
| 94 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined |
| 95 | positions, to avoid expensive bit shuffling on every refill. The remaining |
| 96 | bits are used for s/w purposes and masked out on each refill. |
| 97 | |
| 98 | Note, the PTE slots are used to hold data of type swp_entry_t when a page is |
| 99 | swapped out. Only the _PAGE_PRESENT flag is significant when the page is |
| 100 | swapped out, and it must be placed so that it doesn't overlap either the |
| 101 | type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type |
| 102 | at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This |
| 103 | scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit |
| 104 | [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split |
| 105 | into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ |
| 106 | #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ |
| 107 | #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ |
| 108 | #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ |
| 109 | #define _PAGE_PRESENT 0x004 /* software: page referenced */ |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 110 | #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ |
| 111 | #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ |
| 112 | #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ |
| 113 | #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */ |
| 114 | #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */ |
| 115 | #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */ |
| 116 | #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */ |
| 117 | #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ |
| 118 | #define _PAGE_ACCESSED 0x800 /* software: page referenced */ |
| 119 | |
Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 120 | /* Wrapper for extended mode pgprot twiddling */ |
| 121 | #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) |
| 122 | |
| 123 | /* |
| 124 | * We can use the sign-extended bits in the PTEL to get 32 bits of |
| 125 | * software flags. This works for now because no implementations uses |
| 126 | * anything above the PPN field. |
| 127 | */ |
| 128 | #define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */ |
Paul Mundt | a16382c | 2010-10-27 16:40:19 +0900 | [diff] [blame] | 129 | #define _PAGE_SPECIAL _PAGE_EXT(0x002) |
Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 130 | |
Kirill A. Shutemov | 8b70bea | 2015-02-10 14:11:09 -0800 | [diff] [blame] | 131 | #define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \ |
Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 132 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED) |
| 133 | |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 134 | /* Mask which drops software flags */ |
Matt Fleming | 24ef7fc | 2009-11-19 21:11:05 +0000 | [diff] [blame] | 135 | #define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS)) |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 136 | |
| 137 | /* |
| 138 | * HugeTLB support |
| 139 | */ |
| 140 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) |
| 141 | #define _PAGE_SZHUGE (_PAGE_SIZE0) |
| 142 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) |
| 143 | #define _PAGE_SZHUGE (_PAGE_SIZE1) |
| 144 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) |
| 145 | #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) |
| 146 | #endif |
| 147 | |
| 148 | /* |
Paul Mundt | 5286031 | 2008-02-12 16:55:21 +0900 | [diff] [blame] | 149 | * Stub out _PAGE_SZHUGE if we don't have a good definition for it, |
| 150 | * to make pte_mkhuge() happy. |
| 151 | */ |
| 152 | #ifndef _PAGE_SZHUGE |
| 153 | # define _PAGE_SZHUGE (0) |
| 154 | #endif |
| 155 | |
| 156 | /* |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 157 | * Default flags for a Kernel page. |
| 158 | * This is fundametally also SHARED because the main use of this define |
| 159 | * (other than for PGD/PMD entries) is for the VMALLOC pool which is |
| 160 | * contextless. |
| 161 | * |
| 162 | * _PAGE_EXECUTE is required for modules |
| 163 | * |
| 164 | */ |
| 165 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 166 | _PAGE_EXECUTE | \ |
| 167 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
| 168 | _PAGE_SHARED) |
| 169 | |
| 170 | /* Default flags for a User page */ |
| 171 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) |
| 172 | |
Paul Mundt | a16382c | 2010-10-27 16:40:19 +0900 | [diff] [blame] | 173 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
| 174 | _PAGE_SPECIAL) |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 175 | |
| 176 | /* |
| 177 | * We have full permissions (Read/Write/Execute/Shared). |
| 178 | */ |
| 179 | #define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \ |
| 180 | _PAGE_CACHABLE | _PAGE_ACCESSED) |
| 181 | |
| 182 | #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) |
| 183 | #define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \ |
| 184 | _PAGE_SHARED) |
| 185 | #define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE) |
| 186 | |
| 187 | /* |
| 188 | * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default |
| 189 | * protection mode for the stack. |
| 190 | */ |
| 191 | #define PAGE_COPY PAGE_EXECREAD |
| 192 | |
| 193 | #define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ) |
| 194 | #define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE) |
| 195 | #define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \ |
| 196 | _PAGE_WRITE | _PAGE_EXECUTE) |
| 197 | #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) |
| 198 | |
Paul Mundt | 5e9c8ac | 2008-02-12 16:59:30 +0900 | [diff] [blame] | 199 | #define PAGE_KERNEL_NOCACHE \ |
| 200 | __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
| 201 | _PAGE_EXECUTE | _PAGE_ACCESSED | \ |
| 202 | _PAGE_DIRTY | _PAGE_SHARED) |
| 203 | |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 204 | /* Make it a device mapping for maximum safety (e.g. for mapping device |
| 205 | registers into user-space via /dev/map). */ |
| 206 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) |
| 207 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) |
| 208 | |
| 209 | /* |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 210 | * PTE level access routines. |
| 211 | * |
| 212 | * Note1: |
| 213 | * It's the tree walk leaf. This is physical address to be stored. |
| 214 | * |
| 215 | * Note 2: |
| 216 | * Regarding the choice of _PTE_EMPTY: |
| 217 | |
| 218 | We must choose a bit pattern that cannot be valid, whether or not the page |
| 219 | is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped |
| 220 | out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is |
| 221 | left for us to select. If we force bit[7]==0 when swapped out, we could use |
| 222 | the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if |
| 223 | we force bit[7]==1 when swapped out, we can use all zeroes to indicate |
| 224 | empty. This is convenient, because the page tables get cleared to zero |
| 225 | when they are allocated. |
| 226 | |
| 227 | */ |
| 228 | #define _PTE_EMPTY 0x0 |
| 229 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) |
| 230 | #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) |
| 231 | #define pte_none(x) (pte_val(x) == _PTE_EMPTY) |
| 232 | |
| 233 | /* |
| 234 | * Some definitions to translate between mem_map, PTEs, and page |
| 235 | * addresses: |
| 236 | */ |
| 237 | |
| 238 | /* |
| 239 | * Given a PTE, return the index of the mem_map[] entry corresponding |
| 240 | * to the page frame the PTE. Get the absolute physical address, make |
| 241 | * a relative physical address and translate it to an index. |
| 242 | */ |
| 243 | #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ |
| 244 | __MEMORY_START) >> PAGE_SHIFT) |
| 245 | |
| 246 | /* |
| 247 | * Given a PTE, return the "struct page *". |
| 248 | */ |
| 249 | #define pte_page(x) (mem_map + pte_pagenr(x)) |
| 250 | |
| 251 | /* |
| 252 | * Return number of (down rounded) MB corresponding to x pages. |
| 253 | */ |
| 254 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) |
| 255 | |
| 256 | |
| 257 | /* |
| 258 | * The following have defined behavior only work if pte_present() is true. |
| 259 | */ |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 260 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
| 261 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
Nick Piggin | 7e67513 | 2008-04-28 02:13:00 -0700 | [diff] [blame] | 262 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } |
Paul Mundt | a16382c | 2010-10-27 16:40:19 +0900 | [diff] [blame] | 263 | static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; } |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 264 | |
| 265 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } |
| 266 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } |
| 267 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } |
| 268 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } |
| 269 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } |
| 270 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } |
| 271 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } |
Paul Mundt | a16382c | 2010-10-27 16:40:19 +0900 | [diff] [blame] | 272 | static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; } |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 273 | |
| 274 | /* |
| 275 | * Conversion functions: convert a page and protection to a page entry. |
| 276 | * |
| 277 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) |
| 278 | */ |
| 279 | #define mk_pte(page,pgprot) \ |
| 280 | ({ \ |
| 281 | pte_t __pte; \ |
| 282 | \ |
| 283 | set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ |
| 284 | __MEMORY_START | pgprot_val((pgprot)))); \ |
| 285 | __pte; \ |
| 286 | }) |
| 287 | |
| 288 | /* |
| 289 | * This takes a (absolute) physical page address that is used |
| 290 | * by the remapping functions |
| 291 | */ |
| 292 | #define mk_pte_phys(physpage, pgprot) \ |
| 293 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) |
| 294 | |
| 295 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
| 296 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } |
| 297 | |
| 298 | /* Encode and decode a swap entry */ |
| 299 | #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) |
| 300 | #define __swp_offset(x) ((x).val >> 8) |
| 301 | #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) |
| 302 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
| 303 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
| 304 | |
Paul Mundt | 249cfea | 2007-11-19 18:26:19 +0900 | [diff] [blame] | 305 | #endif /* !__ASSEMBLY__ */ |
| 306 | |
| 307 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 308 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
| 309 | |
Paul Mundt | a23ba43 | 2007-11-28 20:19:38 +0900 | [diff] [blame] | 310 | #endif /* __ASM_SH_PGTABLE_64_H */ |