Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Performance events support for SH-4A performance counters |
| 3 | * |
| 4 | * Copyright (C) 2009 Paul Mundt |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | */ |
| 10 | #include <linux/kernel.h> |
| 11 | #include <linux/init.h> |
| 12 | #include <linux/io.h> |
| 13 | #include <linux/irq.h> |
| 14 | #include <linux/perf_event.h> |
| 15 | #include <asm/processor.h> |
| 16 | |
| 17 | #define PPC_CCBR(idx) (0xff200800 + (sizeof(u32) * idx)) |
| 18 | #define PPC_PMCTR(idx) (0xfc100000 + (sizeof(u32) * idx)) |
| 19 | |
| 20 | #define CCBR_CIT_MASK (0x7ff << 6) |
| 21 | #define CCBR_DUC (1 << 3) |
| 22 | #define CCBR_CMDS (1 << 1) |
| 23 | #define CCBR_PPCE (1 << 0) |
| 24 | |
| 25 | #define PPC_PMCAT 0xfc100080 |
| 26 | |
| 27 | #define PMCAT_OVF3 (1 << 27) |
| 28 | #define PMCAT_CNN3 (1 << 26) |
| 29 | #define PMCAT_CLR3 (1 << 25) |
| 30 | #define PMCAT_OVF2 (1 << 19) |
| 31 | #define PMCAT_CLR2 (1 << 17) |
| 32 | #define PMCAT_OVF1 (1 << 11) |
| 33 | #define PMCAT_CNN1 (1 << 10) |
| 34 | #define PMCAT_CLR1 (1 << 9) |
| 35 | #define PMCAT_OVF0 (1 << 3) |
| 36 | #define PMCAT_CLR0 (1 << 1) |
| 37 | |
| 38 | static struct sh_pmu sh4a_pmu; |
| 39 | |
| 40 | /* |
Paul Mundt | 0fe69d7 | 2009-11-09 14:11:07 +0900 | [diff] [blame] | 41 | * Supported raw event codes: |
| 42 | * |
| 43 | * Event Code Description |
| 44 | * ---------- ----------- |
| 45 | * |
| 46 | * 0x0000 number of elapsed cycles |
| 47 | * 0x0200 number of elapsed cycles in privileged mode |
| 48 | * 0x0280 number of elapsed cycles while SR.BL is asserted |
| 49 | * 0x0202 instruction execution |
| 50 | * 0x0203 instruction execution in parallel |
| 51 | * 0x0204 number of unconditional branches |
| 52 | * 0x0208 number of exceptions |
| 53 | * 0x0209 number of interrupts |
| 54 | * 0x0220 UTLB miss caused by instruction fetch |
| 55 | * 0x0222 UTLB miss caused by operand access |
| 56 | * 0x02a0 number of ITLB misses |
| 57 | * 0x0028 number of accesses to instruction memories |
| 58 | * 0x0029 number of accesses to instruction cache |
| 59 | * 0x002a instruction cache miss |
| 60 | * 0x022e number of access to instruction X/Y memory |
| 61 | * 0x0030 number of reads to operand memories |
| 62 | * 0x0038 number of writes to operand memories |
| 63 | * 0x0031 number of operand cache read accesses |
| 64 | * 0x0039 number of operand cache write accesses |
| 65 | * 0x0032 operand cache read miss |
| 66 | * 0x003a operand cache write miss |
| 67 | * 0x0236 number of reads to operand X/Y memory |
| 68 | * 0x023e number of writes to operand X/Y memory |
| 69 | * 0x0237 number of reads to operand U memory |
| 70 | * 0x023f number of writes to operand U memory |
| 71 | * 0x0337 number of U memory read buffer misses |
| 72 | * 0x02b4 number of wait cycles due to operand read access |
| 73 | * 0x02bc number of wait cycles due to operand write access |
| 74 | * 0x0033 number of wait cycles due to operand cache read miss |
| 75 | * 0x003b number of wait cycles due to operand cache write miss |
| 76 | */ |
| 77 | |
| 78 | /* |
Paul Mundt | ac44e66 | 2009-10-28 17:57:54 +0900 | [diff] [blame] | 79 | * Special reserved bits used by hardware emulators, read values will |
| 80 | * vary, but writes must always be 0. |
| 81 | */ |
| 82 | #define PMCAT_EMU_CLR_MASK ((1 << 24) | (1 << 16) | (1 << 8) | (1 << 0)) |
| 83 | |
| 84 | static const int sh4a_general_events[] = { |
| 85 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0000, |
| 86 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x0202, |
| 87 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0029, /* I-cache */ |
| 88 | [PERF_COUNT_HW_CACHE_MISSES] = 0x002a, /* I-cache */ |
| 89 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0204, |
| 90 | [PERF_COUNT_HW_BRANCH_MISSES] = -1, |
| 91 | [PERF_COUNT_HW_BUS_CYCLES] = -1, |
| 92 | }; |
| 93 | |
| 94 | #define C(x) PERF_COUNT_HW_CACHE_##x |
| 95 | |
| 96 | static const int sh4a_cache_events |
| 97 | [PERF_COUNT_HW_CACHE_MAX] |
| 98 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 99 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = |
| 100 | { |
| 101 | [ C(L1D) ] = { |
| 102 | [ C(OP_READ) ] = { |
| 103 | [ C(RESULT_ACCESS) ] = 0x0031, |
| 104 | [ C(RESULT_MISS) ] = 0x0032, |
| 105 | }, |
| 106 | [ C(OP_WRITE) ] = { |
| 107 | [ C(RESULT_ACCESS) ] = 0x0039, |
| 108 | [ C(RESULT_MISS) ] = 0x003a, |
| 109 | }, |
| 110 | [ C(OP_PREFETCH) ] = { |
| 111 | [ C(RESULT_ACCESS) ] = 0, |
| 112 | [ C(RESULT_MISS) ] = 0, |
| 113 | }, |
| 114 | }, |
| 115 | |
| 116 | [ C(L1I) ] = { |
| 117 | [ C(OP_READ) ] = { |
| 118 | [ C(RESULT_ACCESS) ] = 0x0029, |
| 119 | [ C(RESULT_MISS) ] = 0x002a, |
| 120 | }, |
| 121 | [ C(OP_WRITE) ] = { |
| 122 | [ C(RESULT_ACCESS) ] = -1, |
| 123 | [ C(RESULT_MISS) ] = -1, |
| 124 | }, |
| 125 | [ C(OP_PREFETCH) ] = { |
| 126 | [ C(RESULT_ACCESS) ] = 0, |
| 127 | [ C(RESULT_MISS) ] = 0, |
| 128 | }, |
| 129 | }, |
| 130 | |
| 131 | [ C(LL) ] = { |
| 132 | [ C(OP_READ) ] = { |
| 133 | [ C(RESULT_ACCESS) ] = 0x0030, |
| 134 | [ C(RESULT_MISS) ] = 0, |
| 135 | }, |
| 136 | [ C(OP_WRITE) ] = { |
| 137 | [ C(RESULT_ACCESS) ] = 0x0038, |
| 138 | [ C(RESULT_MISS) ] = 0, |
| 139 | }, |
| 140 | [ C(OP_PREFETCH) ] = { |
| 141 | [ C(RESULT_ACCESS) ] = 0, |
| 142 | [ C(RESULT_MISS) ] = 0, |
| 143 | }, |
| 144 | }, |
| 145 | |
| 146 | [ C(DTLB) ] = { |
| 147 | [ C(OP_READ) ] = { |
| 148 | [ C(RESULT_ACCESS) ] = 0x0222, |
| 149 | [ C(RESULT_MISS) ] = 0x0220, |
| 150 | }, |
| 151 | [ C(OP_WRITE) ] = { |
| 152 | [ C(RESULT_ACCESS) ] = 0, |
| 153 | [ C(RESULT_MISS) ] = 0, |
| 154 | }, |
| 155 | [ C(OP_PREFETCH) ] = { |
| 156 | [ C(RESULT_ACCESS) ] = 0, |
| 157 | [ C(RESULT_MISS) ] = 0, |
| 158 | }, |
| 159 | }, |
| 160 | |
| 161 | [ C(ITLB) ] = { |
| 162 | [ C(OP_READ) ] = { |
| 163 | [ C(RESULT_ACCESS) ] = 0, |
| 164 | [ C(RESULT_MISS) ] = 0x02a0, |
| 165 | }, |
| 166 | [ C(OP_WRITE) ] = { |
| 167 | [ C(RESULT_ACCESS) ] = -1, |
| 168 | [ C(RESULT_MISS) ] = -1, |
| 169 | }, |
| 170 | [ C(OP_PREFETCH) ] = { |
| 171 | [ C(RESULT_ACCESS) ] = -1, |
| 172 | [ C(RESULT_MISS) ] = -1, |
| 173 | }, |
| 174 | }, |
| 175 | |
| 176 | [ C(BPU) ] = { |
| 177 | [ C(OP_READ) ] = { |
| 178 | [ C(RESULT_ACCESS) ] = -1, |
| 179 | [ C(RESULT_MISS) ] = -1, |
| 180 | }, |
| 181 | [ C(OP_WRITE) ] = { |
| 182 | [ C(RESULT_ACCESS) ] = -1, |
| 183 | [ C(RESULT_MISS) ] = -1, |
| 184 | }, |
| 185 | [ C(OP_PREFETCH) ] = { |
| 186 | [ C(RESULT_ACCESS) ] = -1, |
| 187 | [ C(RESULT_MISS) ] = -1, |
| 188 | }, |
| 189 | }, |
| 190 | }; |
| 191 | |
| 192 | static int sh4a_event_map(int event) |
| 193 | { |
| 194 | return sh4a_general_events[event]; |
| 195 | } |
| 196 | |
| 197 | static u64 sh4a_pmu_read(int idx) |
| 198 | { |
| 199 | return __raw_readl(PPC_PMCTR(idx)); |
| 200 | } |
| 201 | |
| 202 | static void sh4a_pmu_disable(struct hw_perf_event *hwc, int idx) |
| 203 | { |
| 204 | unsigned int tmp; |
| 205 | |
| 206 | tmp = __raw_readl(PPC_CCBR(idx)); |
| 207 | tmp &= ~(CCBR_CIT_MASK | CCBR_DUC); |
| 208 | __raw_writel(tmp, PPC_CCBR(idx)); |
| 209 | } |
| 210 | |
| 211 | static void sh4a_pmu_enable(struct hw_perf_event *hwc, int idx) |
| 212 | { |
| 213 | unsigned int tmp; |
| 214 | |
| 215 | tmp = __raw_readl(PPC_PMCAT); |
| 216 | tmp &= ~PMCAT_EMU_CLR_MASK; |
| 217 | tmp |= idx ? PMCAT_CLR1 : PMCAT_CLR0; |
| 218 | __raw_writel(tmp, PPC_PMCAT); |
| 219 | |
| 220 | tmp = __raw_readl(PPC_CCBR(idx)); |
| 221 | tmp |= (hwc->config << 6) | CCBR_CMDS | CCBR_PPCE; |
| 222 | __raw_writel(tmp, PPC_CCBR(idx)); |
| 223 | |
| 224 | __raw_writel(__raw_readl(PPC_CCBR(idx)) | CCBR_DUC, PPC_CCBR(idx)); |
| 225 | } |
| 226 | |
| 227 | static void sh4a_pmu_disable_all(void) |
| 228 | { |
| 229 | int i; |
| 230 | |
| 231 | for (i = 0; i < sh4a_pmu.num_events; i++) |
| 232 | __raw_writel(__raw_readl(PPC_CCBR(i)) & ~CCBR_DUC, PPC_CCBR(i)); |
| 233 | } |
| 234 | |
| 235 | static void sh4a_pmu_enable_all(void) |
| 236 | { |
| 237 | int i; |
| 238 | |
| 239 | for (i = 0; i < sh4a_pmu.num_events; i++) |
| 240 | __raw_writel(__raw_readl(PPC_CCBR(i)) | CCBR_DUC, PPC_CCBR(i)); |
| 241 | } |
| 242 | |
| 243 | static struct sh_pmu sh4a_pmu = { |
| 244 | .name = "SH-4A", |
| 245 | .num_events = 2, |
| 246 | .event_map = sh4a_event_map, |
| 247 | .max_events = ARRAY_SIZE(sh4a_general_events), |
| 248 | .raw_event_mask = 0x3ff, |
| 249 | .cache_events = &sh4a_cache_events, |
| 250 | .read = sh4a_pmu_read, |
| 251 | .disable = sh4a_pmu_disable, |
| 252 | .enable = sh4a_pmu_enable, |
| 253 | .disable_all = sh4a_pmu_disable_all, |
| 254 | .enable_all = sh4a_pmu_enable_all, |
| 255 | }; |
| 256 | |
| 257 | static int __init sh4a_pmu_init(void) |
| 258 | { |
| 259 | /* |
| 260 | * Make sure this CPU actually has perf counters. |
| 261 | */ |
| 262 | if (!(boot_cpu_data.flags & CPU_HAS_PERF_COUNTER)) { |
| 263 | pr_notice("HW perf events unsupported, software events only.\n"); |
| 264 | return -ENODEV; |
| 265 | } |
| 266 | |
| 267 | return register_sh_pmu(&sh4a_pmu); |
| 268 | } |
| 269 | arch_initcall(sh4a_pmu_init); |