blob: 19980d9a6cc96cc80e3144775bd90bde652477d7 [file] [log] [blame]
Andi Kleena7e3ed12011-03-03 10:34:47 +08001/*
Stephane Eranianefc9f052011-06-06 16:57:03 +02002 * Per core/cpu state
3 *
4 * Used to coordinate shared registers between HT threads or
5 * among events on a single PMU.
Andi Kleena7e3ed12011-03-03 10:34:47 +08006 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03007
Joe Perchesc767a542012-05-21 19:50:07 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
Kevin Winchesterde0428a2011-08-30 20:41:05 -030010#include <linux/stddef.h>
11#include <linux/types.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Paul Gortmaker69c60c82011-05-26 12:22:53 -040014#include <linux/export.h>
Stephane Eranianb37609c2014-11-17 20:07:04 +010015#include <linux/watchdog.h>
Kevin Winchesterde0428a2011-08-30 20:41:05 -030016
Andi Kleen3a632cb2013-06-17 17:36:48 -070017#include <asm/cpufeature.h>
Kevin Winchesterde0428a2011-08-30 20:41:05 -030018#include <asm/hardirq.h>
19#include <asm/apic.h>
20
21#include "perf_event.h"
Andi Kleena7e3ed12011-03-03 10:34:47 +080022
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010023/*
Peter Zijlstrab622d642010-02-01 15:36:30 +010024 * Intel PerfMon, used on Core and later.
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010025 */
Ingo Molnarec75a712011-04-27 11:51:41 +020026static u64 intel_perfmon_event_map[PERF_COUNT_HW_MAX] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010027{
Pekka Enbergc3b7cdf2012-07-06 12:59:46 +030028 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
29 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
30 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
31 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
32 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
33 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
34 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
35 [PERF_COUNT_HW_REF_CPU_CYCLES] = 0x0300, /* pseudo-encoding */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010036};
37
Ingo Molnar5c543e32011-04-27 12:02:04 +020038static struct event_constraint intel_core_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010039{
40 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
41 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
42 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
43 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
44 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
46 EVENT_CONSTRAINT_END
47};
48
Ingo Molnar5c543e32011-04-27 12:02:04 +020049static struct event_constraint intel_core2_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010050{
Peter Zijlstrab622d642010-02-01 15:36:30 +010051 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
52 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +010053 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010054 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
55 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
56 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
57 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
58 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
59 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
60 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
61 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
Peter Zijlstrab622d642010-02-01 15:36:30 +010062 INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010063 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
64 EVENT_CONSTRAINT_END
65};
66
Ingo Molnar5c543e32011-04-27 12:02:04 +020067static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010068{
Peter Zijlstrab622d642010-02-01 15:36:30 +010069 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +010071 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010072 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
73 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
74 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
75 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
76 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
77 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
78 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
79 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
80 EVENT_CONSTRAINT_END
81};
82
Ingo Molnar5c543e32011-04-27 12:02:04 +020083static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
Andi Kleena7e3ed12011-03-03 10:34:47 +080084{
Yan, Zheng53ad0442013-07-18 17:02:23 +080085 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
86 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
Stephane Eranianf20093e2013-01-24 16:10:32 +010087 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
Andi Kleena7e3ed12011-03-03 10:34:47 +080088 EVENT_EXTRA_END
89};
90
Ingo Molnar5c543e32011-04-27 12:02:04 +020091static struct event_constraint intel_westmere_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010092{
Peter Zijlstrab622d642010-02-01 15:36:30 +010093 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
94 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +010095 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +010096 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
97 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
98 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
Stephane Eraniand1100772010-06-10 13:25:01 +020099 INTEL_EVENT_CONSTRAINT(0xb3, 0x1), /* SNOOPQ_REQUEST_OUTSTANDING */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100100 EVENT_CONSTRAINT_END
101};
102
Ingo Molnar5c543e32011-04-27 12:02:04 +0200103static struct event_constraint intel_snb_event_constraints[] __read_mostly =
Lin Mingb06b3d42011-03-02 21:27:04 +0800104{
105 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
106 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100107 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Stephane Eranianfd4a5ae2013-03-17 14:49:57 +0100108 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
109 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
110 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
111 INTEL_UEVENT_CONSTRAINT(0x06a3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
Lin Mingb06b3d42011-03-02 21:27:04 +0800112 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.PENDING */
Lin Mingb06b3d42011-03-02 21:27:04 +0800113 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
114 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
Andi Kleenf8378f52013-03-08 15:22:48 -0800115 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_DISPATCH */
116 INTEL_UEVENT_CONSTRAINT(0x02a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +0100117
118 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
119 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
120 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
121 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
122
Lin Mingb06b3d42011-03-02 21:27:04 +0800123 EVENT_CONSTRAINT_END
124};
125
Stephane Eranian69943182013-02-20 11:15:12 +0100126static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
127{
128 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
129 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
130 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
131 INTEL_UEVENT_CONSTRAINT(0x0148, 0x4), /* L1D_PEND_MISS.PENDING */
132 INTEL_UEVENT_CONSTRAINT(0x0279, 0xf), /* IDQ.EMTPY */
133 INTEL_UEVENT_CONSTRAINT(0x019c, 0xf), /* IDQ_UOPS_NOT_DELIVERED.CORE */
Stephane Eranian6113af12013-09-11 08:22:22 -0700134 INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_LDM_PENDING */
Stephane Eranian69943182013-02-20 11:15:12 +0100135 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
136 INTEL_UEVENT_CONSTRAINT(0x05a3, 0xf), /* CYCLE_ACTIVITY.STALLS_L2_PENDING */
137 INTEL_UEVENT_CONSTRAINT(0x06a3, 0xf), /* CYCLE_ACTIVITY.STALLS_LDM_PENDING */
138 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
139 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
140 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +0100141
142 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
143 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
144 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
145 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
146
Stephane Eranian69943182013-02-20 11:15:12 +0100147 EVENT_CONSTRAINT_END
148};
149
Ingo Molnar5c543e32011-04-27 12:02:04 +0200150static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
Andi Kleena7e3ed12011-03-03 10:34:47 +0800151{
Yan, Zheng53ad0442013-07-18 17:02:23 +0800152 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
153 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
154 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
Stephane Eranianf20093e2013-01-24 16:10:32 +0100155 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
Andi Kleena7e3ed12011-03-03 10:34:47 +0800156 EVENT_EXTRA_END
157};
158
Avi Kivity0af3ac12011-06-29 18:42:36 +0300159static struct event_constraint intel_v1_event_constraints[] __read_mostly =
160{
161 EVENT_CONSTRAINT_END
162};
163
Ingo Molnar5c543e32011-04-27 12:02:04 +0200164static struct event_constraint intel_gen_event_constraints[] __read_mostly =
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100165{
Peter Zijlstrab622d642010-02-01 15:36:30 +0100166 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
167 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100168 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100169 EVENT_CONSTRAINT_END
170};
171
Yan, Zheng1fa64182013-07-18 17:02:24 +0800172static struct event_constraint intel_slm_event_constraints[] __read_mostly =
173{
174 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
175 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
Yan, Zheng1fa64182013-07-18 17:02:24 +0800176 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
177 EVENT_CONSTRAINT_END
178};
179
Stephane Eranianee89cbc2011-06-06 16:57:12 +0200180static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
Yan, Zheng53ad0442013-07-18 17:02:23 +0800181 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
182 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
183 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
Stephane Eranianf20093e2013-01-24 16:10:32 +0100184 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
Stephane Eranianf1923822013-04-16 13:51:43 +0200185 EVENT_EXTRA_END
186};
187
188static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
Yan, Zheng53ad0442013-07-18 17:02:23 +0800189 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
190 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
191 INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
Stephane Eranianf1a52782013-06-07 23:22:10 +0200192 INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
Stephane Eranianee89cbc2011-06-06 16:57:12 +0200193 EVENT_EXTRA_END
194};
195
Ingo Molnar7f2ee912013-09-12 19:17:00 +0200196EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
197EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
198EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
Stephane Eranianf20093e2013-01-24 16:10:32 +0100199
200struct attribute *nhm_events_attrs[] = {
201 EVENT_PTR(mem_ld_nhm),
202 NULL,
203};
204
205struct attribute *snb_events_attrs[] = {
206 EVENT_PTR(mem_ld_snb),
Stephane Eranian9ad64c02013-01-24 16:10:34 +0100207 EVENT_PTR(mem_st_snb),
Stephane Eranianf20093e2013-01-24 16:10:32 +0100208 NULL,
209};
210
Andi Kleen3a632cb2013-06-17 17:36:48 -0700211static struct event_constraint intel_hsw_event_constraints[] = {
212 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
213 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
214 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
215 INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
216 INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
217 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
218 /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
Andi Kleenc420f192015-03-09 11:20:22 -0700219 INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
Andi Kleen3a632cb2013-06-17 17:36:48 -0700220 /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
Andi Kleenc420f192015-03-09 11:20:22 -0700221 INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
Andi Kleen3a632cb2013-06-17 17:36:48 -0700222 /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
Andi Kleenc420f192015-03-09 11:20:22 -0700223 INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf),
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +0100224
225 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */
226 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
227 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
228 INTEL_EXCLEVT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */
229
Andi Kleen3a632cb2013-06-17 17:36:48 -0700230 EVENT_CONSTRAINT_END
231};
232
Andi Kleen91f1b702015-02-17 18:18:05 -0800233struct event_constraint intel_bdw_event_constraints[] = {
234 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
235 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
236 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
237 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
238 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
239 EVENT_CONSTRAINT_END
240};
241
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100242static u64 intel_pmu_event_map(int hw_event)
243{
244 return intel_perfmon_event_map[hw_event];
245}
246
Yan, Zheng74e65432012-07-17 17:27:55 +0800247#define SNB_DMND_DATA_RD (1ULL << 0)
248#define SNB_DMND_RFO (1ULL << 1)
249#define SNB_DMND_IFETCH (1ULL << 2)
250#define SNB_DMND_WB (1ULL << 3)
251#define SNB_PF_DATA_RD (1ULL << 4)
252#define SNB_PF_RFO (1ULL << 5)
253#define SNB_PF_IFETCH (1ULL << 6)
254#define SNB_LLC_DATA_RD (1ULL << 7)
255#define SNB_LLC_RFO (1ULL << 8)
256#define SNB_LLC_IFETCH (1ULL << 9)
257#define SNB_BUS_LOCKS (1ULL << 10)
258#define SNB_STRM_ST (1ULL << 11)
259#define SNB_OTHER (1ULL << 15)
260#define SNB_RESP_ANY (1ULL << 16)
261#define SNB_NO_SUPP (1ULL << 17)
262#define SNB_LLC_HITM (1ULL << 18)
263#define SNB_LLC_HITE (1ULL << 19)
264#define SNB_LLC_HITS (1ULL << 20)
265#define SNB_LLC_HITF (1ULL << 21)
266#define SNB_LOCAL (1ULL << 22)
267#define SNB_REMOTE (0xffULL << 23)
268#define SNB_SNP_NONE (1ULL << 31)
269#define SNB_SNP_NOT_NEEDED (1ULL << 32)
270#define SNB_SNP_MISS (1ULL << 33)
271#define SNB_NO_FWD (1ULL << 34)
272#define SNB_SNP_FWD (1ULL << 35)
273#define SNB_HITM (1ULL << 36)
274#define SNB_NON_DRAM (1ULL << 37)
275
276#define SNB_DMND_READ (SNB_DMND_DATA_RD|SNB_LLC_DATA_RD)
277#define SNB_DMND_WRITE (SNB_DMND_RFO|SNB_LLC_RFO)
278#define SNB_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
279
280#define SNB_SNP_ANY (SNB_SNP_NONE|SNB_SNP_NOT_NEEDED| \
281 SNB_SNP_MISS|SNB_NO_FWD|SNB_SNP_FWD| \
282 SNB_HITM)
283
284#define SNB_DRAM_ANY (SNB_LOCAL|SNB_REMOTE|SNB_SNP_ANY)
285#define SNB_DRAM_REMOTE (SNB_REMOTE|SNB_SNP_ANY)
286
287#define SNB_L3_ACCESS SNB_RESP_ANY
288#define SNB_L3_MISS (SNB_DRAM_ANY|SNB_NON_DRAM)
289
290static __initconst const u64 snb_hw_cache_extra_regs
291 [PERF_COUNT_HW_CACHE_MAX]
292 [PERF_COUNT_HW_CACHE_OP_MAX]
293 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
294{
295 [ C(LL ) ] = {
296 [ C(OP_READ) ] = {
297 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_L3_ACCESS,
298 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_L3_MISS,
299 },
300 [ C(OP_WRITE) ] = {
301 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_L3_ACCESS,
302 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_L3_MISS,
303 },
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_L3_ACCESS,
306 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_L3_MISS,
307 },
308 },
309 [ C(NODE) ] = {
310 [ C(OP_READ) ] = {
311 [ C(RESULT_ACCESS) ] = SNB_DMND_READ|SNB_DRAM_ANY,
312 [ C(RESULT_MISS) ] = SNB_DMND_READ|SNB_DRAM_REMOTE,
313 },
314 [ C(OP_WRITE) ] = {
315 [ C(RESULT_ACCESS) ] = SNB_DMND_WRITE|SNB_DRAM_ANY,
316 [ C(RESULT_MISS) ] = SNB_DMND_WRITE|SNB_DRAM_REMOTE,
317 },
318 [ C(OP_PREFETCH) ] = {
319 [ C(RESULT_ACCESS) ] = SNB_DMND_PREFETCH|SNB_DRAM_ANY,
320 [ C(RESULT_MISS) ] = SNB_DMND_PREFETCH|SNB_DRAM_REMOTE,
321 },
322 },
323};
324
Lin Mingb06b3d42011-03-02 21:27:04 +0800325static __initconst const u64 snb_hw_cache_event_ids
326 [PERF_COUNT_HW_CACHE_MAX]
327 [PERF_COUNT_HW_CACHE_OP_MAX]
328 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
329{
330 [ C(L1D) ] = {
331 [ C(OP_READ) ] = {
332 [ C(RESULT_ACCESS) ] = 0xf1d0, /* MEM_UOP_RETIRED.LOADS */
333 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPLACEMENT */
334 },
335 [ C(OP_WRITE) ] = {
336 [ C(RESULT_ACCESS) ] = 0xf2d0, /* MEM_UOP_RETIRED.STORES */
337 [ C(RESULT_MISS) ] = 0x0851, /* L1D.ALL_M_REPLACEMENT */
338 },
339 [ C(OP_PREFETCH) ] = {
340 [ C(RESULT_ACCESS) ] = 0x0,
341 [ C(RESULT_MISS) ] = 0x024e, /* HW_PRE_REQ.DL1_MISS */
342 },
343 },
344 [ C(L1I ) ] = {
345 [ C(OP_READ) ] = {
346 [ C(RESULT_ACCESS) ] = 0x0,
347 [ C(RESULT_MISS) ] = 0x0280, /* ICACHE.MISSES */
348 },
349 [ C(OP_WRITE) ] = {
350 [ C(RESULT_ACCESS) ] = -1,
351 [ C(RESULT_MISS) ] = -1,
352 },
353 [ C(OP_PREFETCH) ] = {
354 [ C(RESULT_ACCESS) ] = 0x0,
355 [ C(RESULT_MISS) ] = 0x0,
356 },
357 },
358 [ C(LL ) ] = {
Lin Mingb06b3d42011-03-02 21:27:04 +0800359 [ C(OP_READ) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200360 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
Lin Mingb06b3d42011-03-02 21:27:04 +0800361 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200362 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
363 [ C(RESULT_MISS) ] = 0x01b7,
Lin Mingb06b3d42011-03-02 21:27:04 +0800364 },
365 [ C(OP_WRITE) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200366 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
Lin Mingb06b3d42011-03-02 21:27:04 +0800367 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200368 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
369 [ C(RESULT_MISS) ] = 0x01b7,
Lin Mingb06b3d42011-03-02 21:27:04 +0800370 },
371 [ C(OP_PREFETCH) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200372 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
Lin Mingb06b3d42011-03-02 21:27:04 +0800373 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200374 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
375 [ C(RESULT_MISS) ] = 0x01b7,
Lin Mingb06b3d42011-03-02 21:27:04 +0800376 },
377 },
378 [ C(DTLB) ] = {
379 [ C(OP_READ) ] = {
380 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOP_RETIRED.ALL_LOADS */
381 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.CAUSES_A_WALK */
382 },
383 [ C(OP_WRITE) ] = {
384 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOP_RETIRED.ALL_STORES */
385 [ C(RESULT_MISS) ] = 0x0149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
386 },
387 [ C(OP_PREFETCH) ] = {
388 [ C(RESULT_ACCESS) ] = 0x0,
389 [ C(RESULT_MISS) ] = 0x0,
390 },
391 },
392 [ C(ITLB) ] = {
393 [ C(OP_READ) ] = {
394 [ C(RESULT_ACCESS) ] = 0x1085, /* ITLB_MISSES.STLB_HIT */
395 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.CAUSES_A_WALK */
396 },
397 [ C(OP_WRITE) ] = {
398 [ C(RESULT_ACCESS) ] = -1,
399 [ C(RESULT_MISS) ] = -1,
400 },
401 [ C(OP_PREFETCH) ] = {
402 [ C(RESULT_ACCESS) ] = -1,
403 [ C(RESULT_MISS) ] = -1,
404 },
405 },
406 [ C(BPU ) ] = {
407 [ C(OP_READ) ] = {
408 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
409 [ C(RESULT_MISS) ] = 0x00c5, /* BR_MISP_RETIRED.ALL_BRANCHES */
410 },
411 [ C(OP_WRITE) ] = {
412 [ C(RESULT_ACCESS) ] = -1,
413 [ C(RESULT_MISS) ] = -1,
414 },
415 [ C(OP_PREFETCH) ] = {
416 [ C(RESULT_ACCESS) ] = -1,
417 [ C(RESULT_MISS) ] = -1,
418 },
419 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200420 [ C(NODE) ] = {
421 [ C(OP_READ) ] = {
Yan, Zheng74e65432012-07-17 17:27:55 +0800422 [ C(RESULT_ACCESS) ] = 0x01b7,
423 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200424 },
425 [ C(OP_WRITE) ] = {
Yan, Zheng74e65432012-07-17 17:27:55 +0800426 [ C(RESULT_ACCESS) ] = 0x01b7,
427 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200428 },
429 [ C(OP_PREFETCH) ] = {
Yan, Zheng74e65432012-07-17 17:27:55 +0800430 [ C(RESULT_ACCESS) ] = 0x01b7,
431 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200432 },
433 },
434
Lin Mingb06b3d42011-03-02 21:27:04 +0800435};
436
Andi Kleen0f1b5ca2015-02-17 18:18:04 -0800437/*
438 * Notes on the events:
439 * - data reads do not include code reads (comparable to earlier tables)
440 * - data counts include speculative execution (except L1 write, dtlb, bpu)
441 * - remote node access includes remote memory, remote cache, remote mmio.
442 * - prefetches are not included in the counts because they are not
443 * reliably counted.
444 */
445
446#define HSW_DEMAND_DATA_RD BIT_ULL(0)
447#define HSW_DEMAND_RFO BIT_ULL(1)
448#define HSW_ANY_RESPONSE BIT_ULL(16)
449#define HSW_SUPPLIER_NONE BIT_ULL(17)
450#define HSW_L3_MISS_LOCAL_DRAM BIT_ULL(22)
451#define HSW_L3_MISS_REMOTE_HOP0 BIT_ULL(27)
452#define HSW_L3_MISS_REMOTE_HOP1 BIT_ULL(28)
453#define HSW_L3_MISS_REMOTE_HOP2P BIT_ULL(29)
454#define HSW_L3_MISS (HSW_L3_MISS_LOCAL_DRAM| \
455 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
456 HSW_L3_MISS_REMOTE_HOP2P)
457#define HSW_SNOOP_NONE BIT_ULL(31)
458#define HSW_SNOOP_NOT_NEEDED BIT_ULL(32)
459#define HSW_SNOOP_MISS BIT_ULL(33)
460#define HSW_SNOOP_HIT_NO_FWD BIT_ULL(34)
461#define HSW_SNOOP_HIT_WITH_FWD BIT_ULL(35)
462#define HSW_SNOOP_HITM BIT_ULL(36)
463#define HSW_SNOOP_NON_DRAM BIT_ULL(37)
464#define HSW_ANY_SNOOP (HSW_SNOOP_NONE| \
465 HSW_SNOOP_NOT_NEEDED|HSW_SNOOP_MISS| \
466 HSW_SNOOP_HIT_NO_FWD|HSW_SNOOP_HIT_WITH_FWD| \
467 HSW_SNOOP_HITM|HSW_SNOOP_NON_DRAM)
468#define HSW_SNOOP_DRAM (HSW_ANY_SNOOP & ~HSW_SNOOP_NON_DRAM)
469#define HSW_DEMAND_READ HSW_DEMAND_DATA_RD
470#define HSW_DEMAND_WRITE HSW_DEMAND_RFO
471#define HSW_L3_MISS_REMOTE (HSW_L3_MISS_REMOTE_HOP0|\
472 HSW_L3_MISS_REMOTE_HOP1|HSW_L3_MISS_REMOTE_HOP2P)
473#define HSW_LLC_ACCESS HSW_ANY_RESPONSE
474
Andi Kleen91f1b702015-02-17 18:18:05 -0800475#define BDW_L3_MISS_LOCAL BIT(26)
476#define BDW_L3_MISS (BDW_L3_MISS_LOCAL| \
477 HSW_L3_MISS_REMOTE_HOP0|HSW_L3_MISS_REMOTE_HOP1| \
478 HSW_L3_MISS_REMOTE_HOP2P)
479
480
Andi Kleen0f1b5ca2015-02-17 18:18:04 -0800481static __initconst const u64 hsw_hw_cache_event_ids
482 [PERF_COUNT_HW_CACHE_MAX]
483 [PERF_COUNT_HW_CACHE_OP_MAX]
484 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
485{
486 [ C(L1D ) ] = {
487 [ C(OP_READ) ] = {
488 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
489 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
490 },
491 [ C(OP_WRITE) ] = {
492 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
493 [ C(RESULT_MISS) ] = 0x0,
494 },
495 [ C(OP_PREFETCH) ] = {
496 [ C(RESULT_ACCESS) ] = 0x0,
497 [ C(RESULT_MISS) ] = 0x0,
498 },
499 },
500 [ C(L1I ) ] = {
501 [ C(OP_READ) ] = {
502 [ C(RESULT_ACCESS) ] = 0x0,
503 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
504 },
505 [ C(OP_WRITE) ] = {
506 [ C(RESULT_ACCESS) ] = -1,
507 [ C(RESULT_MISS) ] = -1,
508 },
509 [ C(OP_PREFETCH) ] = {
510 [ C(RESULT_ACCESS) ] = 0x0,
511 [ C(RESULT_MISS) ] = 0x0,
512 },
513 },
514 [ C(LL ) ] = {
515 [ C(OP_READ) ] = {
516 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
517 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
518 },
519 [ C(OP_WRITE) ] = {
520 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
521 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
522 },
523 [ C(OP_PREFETCH) ] = {
524 [ C(RESULT_ACCESS) ] = 0x0,
525 [ C(RESULT_MISS) ] = 0x0,
526 },
527 },
528 [ C(DTLB) ] = {
529 [ C(OP_READ) ] = {
530 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
531 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
532 },
533 [ C(OP_WRITE) ] = {
534 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
535 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
536 },
537 [ C(OP_PREFETCH) ] = {
538 [ C(RESULT_ACCESS) ] = 0x0,
539 [ C(RESULT_MISS) ] = 0x0,
540 },
541 },
542 [ C(ITLB) ] = {
543 [ C(OP_READ) ] = {
544 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
545 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
546 },
547 [ C(OP_WRITE) ] = {
548 [ C(RESULT_ACCESS) ] = -1,
549 [ C(RESULT_MISS) ] = -1,
550 },
551 [ C(OP_PREFETCH) ] = {
552 [ C(RESULT_ACCESS) ] = -1,
553 [ C(RESULT_MISS) ] = -1,
554 },
555 },
556 [ C(BPU ) ] = {
557 [ C(OP_READ) ] = {
558 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
559 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
560 },
561 [ C(OP_WRITE) ] = {
562 [ C(RESULT_ACCESS) ] = -1,
563 [ C(RESULT_MISS) ] = -1,
564 },
565 [ C(OP_PREFETCH) ] = {
566 [ C(RESULT_ACCESS) ] = -1,
567 [ C(RESULT_MISS) ] = -1,
568 },
569 },
570 [ C(NODE) ] = {
571 [ C(OP_READ) ] = {
572 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
573 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
574 },
575 [ C(OP_WRITE) ] = {
576 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE */
577 [ C(RESULT_MISS) ] = 0x1b7, /* OFFCORE_RESPONSE */
578 },
579 [ C(OP_PREFETCH) ] = {
580 [ C(RESULT_ACCESS) ] = 0x0,
581 [ C(RESULT_MISS) ] = 0x0,
582 },
583 },
584};
585
586static __initconst const u64 hsw_hw_cache_extra_regs
587 [PERF_COUNT_HW_CACHE_MAX]
588 [PERF_COUNT_HW_CACHE_OP_MAX]
589 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
590{
591 [ C(LL ) ] = {
592 [ C(OP_READ) ] = {
593 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
594 HSW_LLC_ACCESS,
595 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
596 HSW_L3_MISS|HSW_ANY_SNOOP,
597 },
598 [ C(OP_WRITE) ] = {
599 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
600 HSW_LLC_ACCESS,
601 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
602 HSW_L3_MISS|HSW_ANY_SNOOP,
603 },
604 [ C(OP_PREFETCH) ] = {
605 [ C(RESULT_ACCESS) ] = 0x0,
606 [ C(RESULT_MISS) ] = 0x0,
607 },
608 },
609 [ C(NODE) ] = {
610 [ C(OP_READ) ] = {
611 [ C(RESULT_ACCESS) ] = HSW_DEMAND_READ|
612 HSW_L3_MISS_LOCAL_DRAM|
613 HSW_SNOOP_DRAM,
614 [ C(RESULT_MISS) ] = HSW_DEMAND_READ|
615 HSW_L3_MISS_REMOTE|
616 HSW_SNOOP_DRAM,
617 },
618 [ C(OP_WRITE) ] = {
619 [ C(RESULT_ACCESS) ] = HSW_DEMAND_WRITE|
620 HSW_L3_MISS_LOCAL_DRAM|
621 HSW_SNOOP_DRAM,
622 [ C(RESULT_MISS) ] = HSW_DEMAND_WRITE|
623 HSW_L3_MISS_REMOTE|
624 HSW_SNOOP_DRAM,
625 },
626 [ C(OP_PREFETCH) ] = {
627 [ C(RESULT_ACCESS) ] = 0x0,
628 [ C(RESULT_MISS) ] = 0x0,
629 },
630 },
631};
632
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200633static __initconst const u64 westmere_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100634 [PERF_COUNT_HW_CACHE_MAX]
635 [PERF_COUNT_HW_CACHE_OP_MAX]
636 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
637{
638 [ C(L1D) ] = {
639 [ C(OP_READ) ] = {
640 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
641 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
642 },
643 [ C(OP_WRITE) ] = {
644 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
645 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
646 },
647 [ C(OP_PREFETCH) ] = {
648 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
649 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
650 },
651 },
652 [ C(L1I ) ] = {
653 [ C(OP_READ) ] = {
654 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
655 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
656 },
657 [ C(OP_WRITE) ] = {
658 [ C(RESULT_ACCESS) ] = -1,
659 [ C(RESULT_MISS) ] = -1,
660 },
661 [ C(OP_PREFETCH) ] = {
662 [ C(RESULT_ACCESS) ] = 0x0,
663 [ C(RESULT_MISS) ] = 0x0,
664 },
665 },
666 [ C(LL ) ] = {
667 [ C(OP_READ) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200668 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
Andi Kleene994d7d2011-03-03 10:34:48 +0800669 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200670 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
671 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100672 },
Andi Kleene994d7d2011-03-03 10:34:48 +0800673 /*
674 * Use RFO, not WRITEBACK, because a write miss would typically occur
675 * on RFO.
676 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100677 [ C(OP_WRITE) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200678 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
679 [ C(RESULT_ACCESS) ] = 0x01b7,
680 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
Andi Kleene994d7d2011-03-03 10:34:48 +0800681 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100682 },
683 [ C(OP_PREFETCH) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200684 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
Andi Kleene994d7d2011-03-03 10:34:48 +0800685 [ C(RESULT_ACCESS) ] = 0x01b7,
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200686 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
687 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100688 },
689 },
690 [ C(DTLB) ] = {
691 [ C(OP_READ) ] = {
692 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
693 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
694 },
695 [ C(OP_WRITE) ] = {
696 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
697 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
698 },
699 [ C(OP_PREFETCH) ] = {
700 [ C(RESULT_ACCESS) ] = 0x0,
701 [ C(RESULT_MISS) ] = 0x0,
702 },
703 },
704 [ C(ITLB) ] = {
705 [ C(OP_READ) ] = {
706 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
707 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
708 },
709 [ C(OP_WRITE) ] = {
710 [ C(RESULT_ACCESS) ] = -1,
711 [ C(RESULT_MISS) ] = -1,
712 },
713 [ C(OP_PREFETCH) ] = {
714 [ C(RESULT_ACCESS) ] = -1,
715 [ C(RESULT_MISS) ] = -1,
716 },
717 },
718 [ C(BPU ) ] = {
719 [ C(OP_READ) ] = {
720 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
721 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
722 },
723 [ C(OP_WRITE) ] = {
724 [ C(RESULT_ACCESS) ] = -1,
725 [ C(RESULT_MISS) ] = -1,
726 },
727 [ C(OP_PREFETCH) ] = {
728 [ C(RESULT_ACCESS) ] = -1,
729 [ C(RESULT_MISS) ] = -1,
730 },
731 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200732 [ C(NODE) ] = {
733 [ C(OP_READ) ] = {
734 [ C(RESULT_ACCESS) ] = 0x01b7,
735 [ C(RESULT_MISS) ] = 0x01b7,
736 },
737 [ C(OP_WRITE) ] = {
738 [ C(RESULT_ACCESS) ] = 0x01b7,
739 [ C(RESULT_MISS) ] = 0x01b7,
740 },
741 [ C(OP_PREFETCH) ] = {
742 [ C(RESULT_ACCESS) ] = 0x01b7,
743 [ C(RESULT_MISS) ] = 0x01b7,
744 },
745 },
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100746};
747
Andi Kleene994d7d2011-03-03 10:34:48 +0800748/*
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200749 * Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
750 * See IA32 SDM Vol 3B 30.6.1.3
Andi Kleene994d7d2011-03-03 10:34:48 +0800751 */
752
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200753#define NHM_DMND_DATA_RD (1 << 0)
754#define NHM_DMND_RFO (1 << 1)
755#define NHM_DMND_IFETCH (1 << 2)
756#define NHM_DMND_WB (1 << 3)
757#define NHM_PF_DATA_RD (1 << 4)
758#define NHM_PF_DATA_RFO (1 << 5)
759#define NHM_PF_IFETCH (1 << 6)
760#define NHM_OFFCORE_OTHER (1 << 7)
761#define NHM_UNCORE_HIT (1 << 8)
762#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
763#define NHM_OTHER_CORE_HITM (1 << 10)
764 /* reserved */
765#define NHM_REMOTE_CACHE_FWD (1 << 12)
766#define NHM_REMOTE_DRAM (1 << 13)
767#define NHM_LOCAL_DRAM (1 << 14)
768#define NHM_NON_DRAM (1 << 15)
769
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100770#define NHM_LOCAL (NHM_LOCAL_DRAM|NHM_REMOTE_CACHE_FWD)
771#define NHM_REMOTE (NHM_REMOTE_DRAM)
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200772
773#define NHM_DMND_READ (NHM_DMND_DATA_RD)
774#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
775#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
776
777#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100778#define NHM_L3_MISS (NHM_NON_DRAM|NHM_LOCAL_DRAM|NHM_REMOTE_DRAM|NHM_REMOTE_CACHE_FWD)
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200779#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
Andi Kleene994d7d2011-03-03 10:34:48 +0800780
781static __initconst const u64 nehalem_hw_cache_extra_regs
782 [PERF_COUNT_HW_CACHE_MAX]
783 [PERF_COUNT_HW_CACHE_OP_MAX]
784 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
785{
786 [ C(LL ) ] = {
787 [ C(OP_READ) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200788 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
789 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
Andi Kleene994d7d2011-03-03 10:34:48 +0800790 },
791 [ C(OP_WRITE) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200792 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
793 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
Andi Kleene994d7d2011-03-03 10:34:48 +0800794 },
795 [ C(OP_PREFETCH) ] = {
Peter Zijlstra63b6a672011-04-23 00:57:42 +0200796 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
797 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
Andi Kleene994d7d2011-03-03 10:34:48 +0800798 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200799 },
800 [ C(NODE) ] = {
801 [ C(OP_READ) ] = {
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100802 [ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_LOCAL|NHM_REMOTE,
803 [ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_REMOTE,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200804 },
805 [ C(OP_WRITE) ] = {
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100806 [ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_LOCAL|NHM_REMOTE,
807 [ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_REMOTE,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200808 },
809 [ C(OP_PREFETCH) ] = {
Peter Zijlstra87e24f42012-03-05 23:59:25 +0100810 [ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_LOCAL|NHM_REMOTE,
811 [ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_REMOTE,
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200812 },
813 },
Andi Kleene994d7d2011-03-03 10:34:48 +0800814};
815
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200816static __initconst const u64 nehalem_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100817 [PERF_COUNT_HW_CACHE_MAX]
818 [PERF_COUNT_HW_CACHE_OP_MAX]
819 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
820{
821 [ C(L1D) ] = {
822 [ C(OP_READ) ] = {
Peter Zijlstraf4929bd2011-04-22 13:39:56 +0200823 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
824 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100825 },
826 [ C(OP_WRITE) ] = {
Peter Zijlstraf4929bd2011-04-22 13:39:56 +0200827 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
828 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100829 },
830 [ C(OP_PREFETCH) ] = {
831 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
832 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
833 },
834 },
835 [ C(L1I ) ] = {
836 [ C(OP_READ) ] = {
837 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
838 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
839 },
840 [ C(OP_WRITE) ] = {
841 [ C(RESULT_ACCESS) ] = -1,
842 [ C(RESULT_MISS) ] = -1,
843 },
844 [ C(OP_PREFETCH) ] = {
845 [ C(RESULT_ACCESS) ] = 0x0,
846 [ C(RESULT_MISS) ] = 0x0,
847 },
848 },
849 [ C(LL ) ] = {
850 [ C(OP_READ) ] = {
Andi Kleene994d7d2011-03-03 10:34:48 +0800851 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
852 [ C(RESULT_ACCESS) ] = 0x01b7,
853 /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
854 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100855 },
Andi Kleene994d7d2011-03-03 10:34:48 +0800856 /*
857 * Use RFO, not WRITEBACK, because a write miss would typically occur
858 * on RFO.
859 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100860 [ C(OP_WRITE) ] = {
Andi Kleene994d7d2011-03-03 10:34:48 +0800861 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
862 [ C(RESULT_ACCESS) ] = 0x01b7,
863 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
864 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100865 },
866 [ C(OP_PREFETCH) ] = {
Andi Kleene994d7d2011-03-03 10:34:48 +0800867 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
868 [ C(RESULT_ACCESS) ] = 0x01b7,
869 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
870 [ C(RESULT_MISS) ] = 0x01b7,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100871 },
872 },
873 [ C(DTLB) ] = {
874 [ C(OP_READ) ] = {
875 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
876 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
877 },
878 [ C(OP_WRITE) ] = {
879 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
880 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
881 },
882 [ C(OP_PREFETCH) ] = {
883 [ C(RESULT_ACCESS) ] = 0x0,
884 [ C(RESULT_MISS) ] = 0x0,
885 },
886 },
887 [ C(ITLB) ] = {
888 [ C(OP_READ) ] = {
889 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
890 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
891 },
892 [ C(OP_WRITE) ] = {
893 [ C(RESULT_ACCESS) ] = -1,
894 [ C(RESULT_MISS) ] = -1,
895 },
896 [ C(OP_PREFETCH) ] = {
897 [ C(RESULT_ACCESS) ] = -1,
898 [ C(RESULT_MISS) ] = -1,
899 },
900 },
901 [ C(BPU ) ] = {
902 [ C(OP_READ) ] = {
903 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
904 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
905 },
906 [ C(OP_WRITE) ] = {
907 [ C(RESULT_ACCESS) ] = -1,
908 [ C(RESULT_MISS) ] = -1,
909 },
910 [ C(OP_PREFETCH) ] = {
911 [ C(RESULT_ACCESS) ] = -1,
912 [ C(RESULT_MISS) ] = -1,
913 },
914 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200915 [ C(NODE) ] = {
916 [ C(OP_READ) ] = {
917 [ C(RESULT_ACCESS) ] = 0x01b7,
918 [ C(RESULT_MISS) ] = 0x01b7,
919 },
920 [ C(OP_WRITE) ] = {
921 [ C(RESULT_ACCESS) ] = 0x01b7,
922 [ C(RESULT_MISS) ] = 0x01b7,
923 },
924 [ C(OP_PREFETCH) ] = {
925 [ C(RESULT_ACCESS) ] = 0x01b7,
926 [ C(RESULT_MISS) ] = 0x01b7,
927 },
928 },
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100929};
930
Peter Zijlstracaaa8be2010-03-29 13:09:53 +0200931static __initconst const u64 core2_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +0100932 [PERF_COUNT_HW_CACHE_MAX]
933 [PERF_COUNT_HW_CACHE_OP_MAX]
934 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
935{
936 [ C(L1D) ] = {
937 [ C(OP_READ) ] = {
938 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
939 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
940 },
941 [ C(OP_WRITE) ] = {
942 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
943 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
944 },
945 [ C(OP_PREFETCH) ] = {
946 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
947 [ C(RESULT_MISS) ] = 0,
948 },
949 },
950 [ C(L1I ) ] = {
951 [ C(OP_READ) ] = {
952 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
953 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
954 },
955 [ C(OP_WRITE) ] = {
956 [ C(RESULT_ACCESS) ] = -1,
957 [ C(RESULT_MISS) ] = -1,
958 },
959 [ C(OP_PREFETCH) ] = {
960 [ C(RESULT_ACCESS) ] = 0,
961 [ C(RESULT_MISS) ] = 0,
962 },
963 },
964 [ C(LL ) ] = {
965 [ C(OP_READ) ] = {
966 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
967 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
968 },
969 [ C(OP_WRITE) ] = {
970 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
971 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
972 },
973 [ C(OP_PREFETCH) ] = {
974 [ C(RESULT_ACCESS) ] = 0,
975 [ C(RESULT_MISS) ] = 0,
976 },
977 },
978 [ C(DTLB) ] = {
979 [ C(OP_READ) ] = {
980 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
981 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
982 },
983 [ C(OP_WRITE) ] = {
984 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
985 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
986 },
987 [ C(OP_PREFETCH) ] = {
988 [ C(RESULT_ACCESS) ] = 0,
989 [ C(RESULT_MISS) ] = 0,
990 },
991 },
992 [ C(ITLB) ] = {
993 [ C(OP_READ) ] = {
994 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
995 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
996 },
997 [ C(OP_WRITE) ] = {
998 [ C(RESULT_ACCESS) ] = -1,
999 [ C(RESULT_MISS) ] = -1,
1000 },
1001 [ C(OP_PREFETCH) ] = {
1002 [ C(RESULT_ACCESS) ] = -1,
1003 [ C(RESULT_MISS) ] = -1,
1004 },
1005 },
1006 [ C(BPU ) ] = {
1007 [ C(OP_READ) ] = {
1008 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1009 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1010 },
1011 [ C(OP_WRITE) ] = {
1012 [ C(RESULT_ACCESS) ] = -1,
1013 [ C(RESULT_MISS) ] = -1,
1014 },
1015 [ C(OP_PREFETCH) ] = {
1016 [ C(RESULT_ACCESS) ] = -1,
1017 [ C(RESULT_MISS) ] = -1,
1018 },
1019 },
1020};
1021
Peter Zijlstracaaa8be2010-03-29 13:09:53 +02001022static __initconst const u64 atom_hw_cache_event_ids
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001023 [PERF_COUNT_HW_CACHE_MAX]
1024 [PERF_COUNT_HW_CACHE_OP_MAX]
1025 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1026{
1027 [ C(L1D) ] = {
1028 [ C(OP_READ) ] = {
1029 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
1030 [ C(RESULT_MISS) ] = 0,
1031 },
1032 [ C(OP_WRITE) ] = {
1033 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
1034 [ C(RESULT_MISS) ] = 0,
1035 },
1036 [ C(OP_PREFETCH) ] = {
1037 [ C(RESULT_ACCESS) ] = 0x0,
1038 [ C(RESULT_MISS) ] = 0,
1039 },
1040 },
1041 [ C(L1I ) ] = {
1042 [ C(OP_READ) ] = {
1043 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
1044 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
1045 },
1046 [ C(OP_WRITE) ] = {
1047 [ C(RESULT_ACCESS) ] = -1,
1048 [ C(RESULT_MISS) ] = -1,
1049 },
1050 [ C(OP_PREFETCH) ] = {
1051 [ C(RESULT_ACCESS) ] = 0,
1052 [ C(RESULT_MISS) ] = 0,
1053 },
1054 },
1055 [ C(LL ) ] = {
1056 [ C(OP_READ) ] = {
1057 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
1058 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
1059 },
1060 [ C(OP_WRITE) ] = {
1061 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
1062 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
1063 },
1064 [ C(OP_PREFETCH) ] = {
1065 [ C(RESULT_ACCESS) ] = 0,
1066 [ C(RESULT_MISS) ] = 0,
1067 },
1068 },
1069 [ C(DTLB) ] = {
1070 [ C(OP_READ) ] = {
1071 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
1072 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
1073 },
1074 [ C(OP_WRITE) ] = {
1075 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
1076 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
1077 },
1078 [ C(OP_PREFETCH) ] = {
1079 [ C(RESULT_ACCESS) ] = 0,
1080 [ C(RESULT_MISS) ] = 0,
1081 },
1082 },
1083 [ C(ITLB) ] = {
1084 [ C(OP_READ) ] = {
1085 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
1086 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
1087 },
1088 [ C(OP_WRITE) ] = {
1089 [ C(RESULT_ACCESS) ] = -1,
1090 [ C(RESULT_MISS) ] = -1,
1091 },
1092 [ C(OP_PREFETCH) ] = {
1093 [ C(RESULT_ACCESS) ] = -1,
1094 [ C(RESULT_MISS) ] = -1,
1095 },
1096 },
1097 [ C(BPU ) ] = {
1098 [ C(OP_READ) ] = {
1099 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1100 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1101 },
1102 [ C(OP_WRITE) ] = {
1103 [ C(RESULT_ACCESS) ] = -1,
1104 [ C(RESULT_MISS) ] = -1,
1105 },
1106 [ C(OP_PREFETCH) ] = {
1107 [ C(RESULT_ACCESS) ] = -1,
1108 [ C(RESULT_MISS) ] = -1,
1109 },
1110 },
1111};
1112
Yan, Zheng1fa64182013-07-18 17:02:24 +08001113static struct extra_reg intel_slm_extra_regs[] __read_mostly =
1114{
1115 /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
Peter Zijlstra06c939c2013-09-09 13:26:36 +02001116 INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffffull, RSP_0),
1117 INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffffull, RSP_1),
Yan, Zheng1fa64182013-07-18 17:02:24 +08001118 EVENT_EXTRA_END
1119};
1120
1121#define SLM_DMND_READ SNB_DMND_DATA_RD
1122#define SLM_DMND_WRITE SNB_DMND_RFO
1123#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
1124
1125#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
1126#define SLM_LLC_ACCESS SNB_RESP_ANY
1127#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
1128
1129static __initconst const u64 slm_hw_cache_extra_regs
1130 [PERF_COUNT_HW_CACHE_MAX]
1131 [PERF_COUNT_HW_CACHE_OP_MAX]
1132 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1133{
1134 [ C(LL ) ] = {
1135 [ C(OP_READ) ] = {
1136 [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
Kan Liang6d374052015-04-21 05:34:41 -04001137 [ C(RESULT_MISS) ] = 0,
Yan, Zheng1fa64182013-07-18 17:02:24 +08001138 },
1139 [ C(OP_WRITE) ] = {
1140 [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
1141 [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
1142 },
1143 [ C(OP_PREFETCH) ] = {
1144 [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
1145 [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
1146 },
1147 },
1148};
1149
1150static __initconst const u64 slm_hw_cache_event_ids
1151 [PERF_COUNT_HW_CACHE_MAX]
1152 [PERF_COUNT_HW_CACHE_OP_MAX]
1153 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
1154{
1155 [ C(L1D) ] = {
1156 [ C(OP_READ) ] = {
1157 [ C(RESULT_ACCESS) ] = 0,
1158 [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
1159 },
1160 [ C(OP_WRITE) ] = {
1161 [ C(RESULT_ACCESS) ] = 0,
1162 [ C(RESULT_MISS) ] = 0,
1163 },
1164 [ C(OP_PREFETCH) ] = {
1165 [ C(RESULT_ACCESS) ] = 0,
1166 [ C(RESULT_MISS) ] = 0,
1167 },
1168 },
1169 [ C(L1I ) ] = {
1170 [ C(OP_READ) ] = {
1171 [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
1172 [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
1173 },
1174 [ C(OP_WRITE) ] = {
1175 [ C(RESULT_ACCESS) ] = -1,
1176 [ C(RESULT_MISS) ] = -1,
1177 },
1178 [ C(OP_PREFETCH) ] = {
1179 [ C(RESULT_ACCESS) ] = 0,
1180 [ C(RESULT_MISS) ] = 0,
1181 },
1182 },
1183 [ C(LL ) ] = {
1184 [ C(OP_READ) ] = {
1185 /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
1186 [ C(RESULT_ACCESS) ] = 0x01b7,
Kan Liang6d374052015-04-21 05:34:41 -04001187 [ C(RESULT_MISS) ] = 0,
Yan, Zheng1fa64182013-07-18 17:02:24 +08001188 },
1189 [ C(OP_WRITE) ] = {
1190 /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
1191 [ C(RESULT_ACCESS) ] = 0x01b7,
1192 /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
1193 [ C(RESULT_MISS) ] = 0x01b7,
1194 },
1195 [ C(OP_PREFETCH) ] = {
1196 /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
1197 [ C(RESULT_ACCESS) ] = 0x01b7,
1198 /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
1199 [ C(RESULT_MISS) ] = 0x01b7,
1200 },
1201 },
1202 [ C(DTLB) ] = {
1203 [ C(OP_READ) ] = {
1204 [ C(RESULT_ACCESS) ] = 0,
1205 [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
1206 },
1207 [ C(OP_WRITE) ] = {
1208 [ C(RESULT_ACCESS) ] = 0,
1209 [ C(RESULT_MISS) ] = 0,
1210 },
1211 [ C(OP_PREFETCH) ] = {
1212 [ C(RESULT_ACCESS) ] = 0,
1213 [ C(RESULT_MISS) ] = 0,
1214 },
1215 },
1216 [ C(ITLB) ] = {
1217 [ C(OP_READ) ] = {
1218 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
Kan Liang6d374052015-04-21 05:34:41 -04001219 [ C(RESULT_MISS) ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
Yan, Zheng1fa64182013-07-18 17:02:24 +08001220 },
1221 [ C(OP_WRITE) ] = {
1222 [ C(RESULT_ACCESS) ] = -1,
1223 [ C(RESULT_MISS) ] = -1,
1224 },
1225 [ C(OP_PREFETCH) ] = {
1226 [ C(RESULT_ACCESS) ] = -1,
1227 [ C(RESULT_MISS) ] = -1,
1228 },
1229 },
1230 [ C(BPU ) ] = {
1231 [ C(OP_READ) ] = {
1232 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
1233 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
1234 },
1235 [ C(OP_WRITE) ] = {
1236 [ C(RESULT_ACCESS) ] = -1,
1237 [ C(RESULT_MISS) ] = -1,
1238 },
1239 [ C(OP_PREFETCH) ] = {
1240 [ C(RESULT_ACCESS) ] = -1,
1241 [ C(RESULT_MISS) ] = -1,
1242 },
1243 },
1244};
1245
Andi Kleen1a78d932015-03-20 10:11:23 -07001246/*
1247 * Use from PMIs where the LBRs are already disabled.
1248 */
1249static void __intel_pmu_disable_all(void)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001250{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001251 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001252
1253 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1254
Robert Richter15c7ad52012-06-20 20:46:33 +02001255 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001256 intel_pmu_disable_bts();
Alexander Shishkin80623822015-01-30 12:40:35 +02001257 else
1258 intel_bts_disable_local();
Peter Zijlstraca037702010-03-02 19:52:12 +01001259
1260 intel_pmu_pebs_disable_all();
Andi Kleen1a78d932015-03-20 10:11:23 -07001261}
1262
1263static void intel_pmu_disable_all(void)
1264{
1265 __intel_pmu_disable_all();
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001266 intel_pmu_lbr_disable_all();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001267}
1268
Andi Kleen1a78d932015-03-20 10:11:23 -07001269static void __intel_pmu_enable_all(int added, bool pmi)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001270{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001271 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001272
Peter Zijlstrad3295272010-03-08 13:57:14 +01001273 intel_pmu_pebs_enable_all();
Andi Kleen1a78d932015-03-20 10:11:23 -07001274 intel_pmu_lbr_enable_all(pmi);
Gleb Natapov144d31e2011-10-05 14:01:21 +02001275 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
1276 x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001277
Robert Richter15c7ad52012-06-20 20:46:33 +02001278 if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001279 struct perf_event *event =
Robert Richter15c7ad52012-06-20 20:46:33 +02001280 cpuc->events[INTEL_PMC_IDX_FIXED_BTS];
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001281
1282 if (WARN_ON_ONCE(!event))
1283 return;
1284
1285 intel_pmu_enable_bts(event->hw.config);
Alexander Shishkin80623822015-01-30 12:40:35 +02001286 } else
1287 intel_bts_enable_local();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001288}
1289
Andi Kleen1a78d932015-03-20 10:11:23 -07001290static void intel_pmu_enable_all(int added)
1291{
1292 __intel_pmu_enable_all(added, false);
1293}
1294
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001295/*
1296 * Workaround for:
1297 * Intel Errata AAK100 (model 26)
1298 * Intel Errata AAP53 (model 30)
Peter Zijlstra40b91cd2010-03-29 16:37:17 +02001299 * Intel Errata BD53 (model 44)
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001300 *
Zhang, Yanmin351af072010-08-06 13:39:08 +08001301 * The official story:
1302 * These chips need to be 'reset' when adding counters by programming the
1303 * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either
1304 * in sequence on the same PMC or on different PMCs.
1305 *
1306 * In practise it appears some of these events do in fact count, and
1307 * we need to programm all 4 events.
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001308 */
Zhang, Yanmin351af072010-08-06 13:39:08 +08001309static void intel_pmu_nhm_workaround(void)
1310{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001311 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Zhang, Yanmin351af072010-08-06 13:39:08 +08001312 static const unsigned long nhm_magic[4] = {
1313 0x4300B5,
1314 0x4300D2,
1315 0x4300B1,
1316 0x4300B1
1317 };
1318 struct perf_event *event;
1319 int i;
1320
1321 /*
1322 * The Errata requires below steps:
1323 * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL;
1324 * 2) Configure 4 PERFEVTSELx with the magic events and clear
1325 * the corresponding PMCx;
1326 * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL;
1327 * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL;
1328 * 5) Clear 4 pairs of ERFEVTSELx and PMCx;
1329 */
1330
1331 /*
1332 * The real steps we choose are a little different from above.
1333 * A) To reduce MSR operations, we don't run step 1) as they
1334 * are already cleared before this function is called;
1335 * B) Call x86_perf_event_update to save PMCx before configuring
1336 * PERFEVTSELx with magic number;
1337 * C) With step 5), we do clear only when the PERFEVTSELx is
1338 * not used currently.
1339 * D) Call x86_perf_event_set_period to restore PMCx;
1340 */
1341
1342 /* We always operate 4 pairs of PERF Counters */
1343 for (i = 0; i < 4; i++) {
1344 event = cpuc->events[i];
1345 if (event)
1346 x86_perf_event_update(event);
1347 }
1348
1349 for (i = 0; i < 4; i++) {
1350 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]);
1351 wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0);
1352 }
1353
1354 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf);
1355 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
1356
1357 for (i = 0; i < 4; i++) {
1358 event = cpuc->events[i];
1359
1360 if (event) {
1361 x86_perf_event_set_period(event);
1362 __x86_pmu_enable_event(&event->hw,
1363 ARCH_PERFMON_EVENTSEL_ENABLE);
1364 } else
1365 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0);
1366 }
1367}
1368
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001369static void intel_pmu_nhm_enable_all(int added)
1370{
Zhang, Yanmin351af072010-08-06 13:39:08 +08001371 if (added)
1372 intel_pmu_nhm_workaround();
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001373 intel_pmu_enable_all(added);
1374}
1375
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001376static inline u64 intel_pmu_get_status(void)
1377{
1378 u64 status;
1379
1380 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1381
1382 return status;
1383}
1384
1385static inline void intel_pmu_ack_status(u64 ack)
1386{
1387 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1388}
1389
Peter Zijlstraca037702010-03-02 19:52:12 +01001390static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001391{
Robert Richter15c7ad52012-06-20 20:46:33 +02001392 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001393 u64 ctrl_val, mask;
1394
1395 mask = 0xfULL << (idx * 4);
1396
1397 rdmsrl(hwc->config_base, ctrl_val);
1398 ctrl_val &= ~mask;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001399 wrmsrl(hwc->config_base, ctrl_val);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001400}
1401
Peter Zijlstra2b9e3442013-09-12 12:53:44 +02001402static inline bool event_is_checkpointed(struct perf_event *event)
1403{
1404 return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
1405}
1406
Peter Zijlstraca037702010-03-02 19:52:12 +01001407static void intel_pmu_disable_event(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001408{
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001409 struct hw_perf_event *hwc = &event->hw;
Christoph Lameter89cbc762014-08-17 12:30:40 -05001410 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001411
Robert Richter15c7ad52012-06-20 20:46:33 +02001412 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001413 intel_pmu_disable_bts();
1414 intel_pmu_drain_bts_buffer();
1415 return;
1416 }
1417
Gleb Natapov144d31e2011-10-05 14:01:21 +02001418 cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
1419 cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
Peter Zijlstra2b9e3442013-09-12 12:53:44 +02001420 cpuc->intel_cp_status &= ~(1ull << hwc->idx);
Gleb Natapov144d31e2011-10-05 14:01:21 +02001421
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001422 /*
1423 * must disable before any actual event
1424 * because any event may be combined with LBR
1425 */
Yan, Zhenga46a2302014-11-04 21:56:06 -05001426 if (needs_branch_stack(event))
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001427 intel_pmu_lbr_disable(event);
1428
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001429 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001430 intel_pmu_disable_fixed(hwc);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001431 return;
1432 }
1433
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001434 x86_pmu_disable_event(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001435
Peter Zijlstraab608342010-04-08 23:03:20 +02001436 if (unlikely(event->attr.precise_ip))
Peter Zijlstraef21f682010-03-03 13:12:23 +01001437 intel_pmu_pebs_disable(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001438}
1439
Peter Zijlstraca037702010-03-02 19:52:12 +01001440static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001441{
Robert Richter15c7ad52012-06-20 20:46:33 +02001442 int idx = hwc->idx - INTEL_PMC_IDX_FIXED;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001443 u64 ctrl_val, bits, mask;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001444
1445 /*
1446 * Enable IRQ generation (0x8),
1447 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1448 * if requested:
1449 */
1450 bits = 0x8ULL;
1451 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1452 bits |= 0x2;
1453 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1454 bits |= 0x1;
1455
1456 /*
1457 * ANY bit is supported in v3 and up
1458 */
1459 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1460 bits |= 0x4;
1461
1462 bits <<= (idx * 4);
1463 mask = 0xfULL << (idx * 4);
1464
1465 rdmsrl(hwc->config_base, ctrl_val);
1466 ctrl_val &= ~mask;
1467 ctrl_val |= bits;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001468 wrmsrl(hwc->config_base, ctrl_val);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001469}
1470
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001471static void intel_pmu_enable_event(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001472{
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001473 struct hw_perf_event *hwc = &event->hw;
Christoph Lameter89cbc762014-08-17 12:30:40 -05001474 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001475
Robert Richter15c7ad52012-06-20 20:46:33 +02001476 if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
Tejun Heo0a3aee02010-12-18 16:28:55 +01001477 if (!__this_cpu_read(cpu_hw_events.enabled))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001478 return;
1479
1480 intel_pmu_enable_bts(hwc->config);
1481 return;
1482 }
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001483 /*
1484 * must enabled before any actual event
1485 * because any event may be combined with LBR
1486 */
Yan, Zhenga46a2302014-11-04 21:56:06 -05001487 if (needs_branch_stack(event))
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001488 intel_pmu_lbr_enable(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001489
Gleb Natapov144d31e2011-10-05 14:01:21 +02001490 if (event->attr.exclude_host)
1491 cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
1492 if (event->attr.exclude_guest)
1493 cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
1494
Peter Zijlstra2b9e3442013-09-12 12:53:44 +02001495 if (unlikely(event_is_checkpointed(event)))
1496 cpuc->intel_cp_status |= (1ull << hwc->idx);
1497
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001498 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001499 intel_pmu_enable_fixed(hwc);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001500 return;
1501 }
1502
Peter Zijlstraab608342010-04-08 23:03:20 +02001503 if (unlikely(event->attr.precise_ip))
Peter Zijlstraef21f682010-03-03 13:12:23 +01001504 intel_pmu_pebs_enable(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001505
Robert Richter31fa58a2010-04-13 22:23:14 +02001506 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001507}
1508
1509/*
1510 * Save and restart an expired event. Called by NMI contexts,
1511 * so it has to be careful about preempting normal event ops:
1512 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001513int intel_pmu_save_and_restart(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001514{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001515 x86_perf_event_update(event);
Andi Kleen2dbf0112013-09-05 20:37:38 -07001516 /*
1517 * For a checkpointed counter always reset back to 0. This
1518 * avoids a situation where the counter overflows, aborts the
1519 * transaction and is then set back to shortly before the
1520 * overflow, and overflows and aborts again.
1521 */
1522 if (unlikely(event_is_checkpointed(event))) {
1523 /* No race with NMIs because the counter should not be armed */
1524 wrmsrl(event->hw.event_base, 0);
1525 local64_set(&event->hw.prev_count, 0);
1526 }
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001527 return x86_perf_event_set_period(event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001528}
1529
1530static void intel_pmu_reset(void)
1531{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001532 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001533 unsigned long flags;
1534 int idx;
1535
Robert Richter948b1bb2010-03-29 18:36:50 +02001536 if (!x86_pmu.num_counters)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001537 return;
1538
1539 local_irq_save(flags);
1540
Joe Perchesc767a542012-05-21 19:50:07 -07001541 pr_info("clearing PMU state on CPU#%d\n", smp_processor_id());
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001542
Robert Richter948b1bb2010-03-29 18:36:50 +02001543 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
H. Peter Anvin715c85b2012-06-07 13:32:04 -07001544 wrmsrl_safe(x86_pmu_config_addr(idx), 0ull);
1545 wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001546 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001547 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
H. Peter Anvin715c85b2012-06-07 13:32:04 -07001548 wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
Robert Richter948b1bb2010-03-29 18:36:50 +02001549
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001550 if (ds)
1551 ds->bts_index = ds->bts_buffer_base;
1552
Andi Kleen8882edf2015-02-27 09:48:30 -08001553 /* Ack all overflows and disable fixed counters */
1554 if (x86_pmu.version >= 2) {
1555 intel_pmu_ack_status(intel_pmu_get_status());
1556 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1557 }
1558
1559 /* Reset LBRs and LBR freezing */
1560 if (x86_pmu.lbr_nr) {
1561 update_debugctlmsr(get_debugctlmsr() &
1562 ~(DEBUGCTLMSR_FREEZE_LBRS_ON_PMI|DEBUGCTLMSR_LBR));
1563 }
1564
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001565 local_irq_restore(flags);
1566}
1567
1568/*
1569 * This handler is triggered by the local APIC, so the APIC IRQ handling
1570 * rules apply:
1571 */
1572static int intel_pmu_handle_irq(struct pt_regs *regs)
1573{
1574 struct perf_sample_data data;
1575 struct cpu_hw_events *cpuc;
1576 int bit, loops;
Don Zickus2e556b52010-09-02 15:07:47 -04001577 u64 status;
Stephane Eranianb0b20722010-09-10 13:28:01 +02001578 int handled;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001579
Christoph Lameter89cbc762014-08-17 12:30:40 -05001580 cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001581
Don Zickus2bce5da2011-04-27 06:32:33 -04001582 /*
Andi Kleen72db5592013-06-17 17:36:50 -07001583 * No known reason to not always do late ACK,
1584 * but just in case do it opt-in.
Don Zickus2bce5da2011-04-27 06:32:33 -04001585 */
Andi Kleen72db5592013-06-17 17:36:50 -07001586 if (!x86_pmu.late_ack)
1587 apic_write(APIC_LVTPC, APIC_DM_NMI);
Andi Kleen1a78d932015-03-20 10:11:23 -07001588 __intel_pmu_disable_all();
Stephane Eranianb0b20722010-09-10 13:28:01 +02001589 handled = intel_pmu_drain_bts_buffer();
Alexander Shishkin80623822015-01-30 12:40:35 +02001590 handled += intel_bts_interrupt();
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001591 status = intel_pmu_get_status();
Markus Metzgera3ef2222014-02-14 16:44:08 -08001592 if (!status)
1593 goto done;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001594
1595 loops = 0;
1596again:
Don Zickus2e556b52010-09-02 15:07:47 -04001597 intel_pmu_ack_status(status);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001598 if (++loops > 100) {
Dave Hansenae0def02013-05-30 10:45:59 -07001599 static bool warned = false;
1600 if (!warned) {
1601 WARN(1, "perfevents: irq loop stuck!\n");
1602 perf_event_print_debug();
1603 warned = true;
1604 }
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001605 intel_pmu_reset();
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +01001606 goto done;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001607 }
1608
1609 inc_irq_stat(apic_perf_irqs);
Peter Zijlstraca037702010-03-02 19:52:12 +01001610
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001611 intel_pmu_lbr_read();
1612
Peter Zijlstraca037702010-03-02 19:52:12 +01001613 /*
HATAYAMA Daisukeb292d7a2014-06-25 10:09:07 +09001614 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1615 * and clear the bit.
1616 */
1617 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1618 if (!status)
1619 goto done;
1620 }
1621
1622 /*
Peter Zijlstraca037702010-03-02 19:52:12 +01001623 * PEBS overflow sets bit 62 in the global status register
1624 */
Peter Zijlstrade725de2010-09-02 15:07:49 -04001625 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
1626 handled++;
Peter Zijlstraca037702010-03-02 19:52:12 +01001627 x86_pmu.drain_pebs(regs);
Peter Zijlstrade725de2010-09-02 15:07:49 -04001628 }
Peter Zijlstraca037702010-03-02 19:52:12 +01001629
Andi Kleen2dbf0112013-09-05 20:37:38 -07001630 /*
Alexander Shishkin52ca9ce2015-01-30 12:39:52 +02001631 * Intel PT
1632 */
1633 if (__test_and_clear_bit(55, (unsigned long *)&status)) {
1634 handled++;
1635 intel_pt_interrupt();
1636 }
1637
1638 /*
Peter Zijlstra2b9e3442013-09-12 12:53:44 +02001639 * Checkpointed counters can lead to 'spurious' PMIs because the
1640 * rollback caused by the PMI will have cleared the overflow status
1641 * bit. Therefore always force probe these counters.
Andi Kleen2dbf0112013-09-05 20:37:38 -07001642 */
Peter Zijlstra2b9e3442013-09-12 12:53:44 +02001643 status |= cpuc->intel_cp_status;
Andi Kleen2dbf0112013-09-05 20:37:38 -07001644
Akinobu Mita984b3f52010-03-05 13:41:37 -08001645 for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001646 struct perf_event *event = cpuc->events[bit];
1647
Peter Zijlstrade725de2010-09-02 15:07:49 -04001648 handled++;
1649
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001650 if (!test_bit(bit, cpuc->active_mask))
1651 continue;
1652
1653 if (!intel_pmu_save_and_restart(event))
1654 continue;
1655
Robert Richterfd0d0002012-04-02 20:19:08 +02001656 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001657
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01001658 if (has_branch_stack(event))
1659 data.br_stack = &cpuc->lbr_stack;
1660
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001661 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001662 x86_pmu_stop(event, 0);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001663 }
1664
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001665 /*
1666 * Repeat if there is more work to be done:
1667 */
1668 status = intel_pmu_get_status();
1669 if (status)
1670 goto again;
1671
Peter Zijlstra3fb2b8d2010-03-08 13:51:01 +01001672done:
Andi Kleen1a78d932015-03-20 10:11:23 -07001673 __intel_pmu_enable_all(0, true);
Andi Kleen72db5592013-06-17 17:36:50 -07001674 /*
1675 * Only unmask the NMI after the overflow counters
1676 * have been reset. This avoids spurious NMIs on
1677 * Haswell CPUs.
1678 */
1679 if (x86_pmu.late_ack)
1680 apic_write(APIC_LVTPC, APIC_DM_NMI);
Peter Zijlstrade725de2010-09-02 15:07:49 -04001681 return handled;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001682}
1683
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001684static struct event_constraint *
Peter Zijlstraca037702010-03-02 19:52:12 +01001685intel_bts_constraints(struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001686{
Peter Zijlstraca037702010-03-02 19:52:12 +01001687 struct hw_perf_event *hwc = &event->hw;
1688 unsigned int hw_event, bts_event;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001689
Peter Zijlstra18a073a2011-04-26 13:24:33 +02001690 if (event->attr.freq)
1691 return NULL;
1692
Peter Zijlstraca037702010-03-02 19:52:12 +01001693 hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
1694 bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001695
Peter Zijlstraca037702010-03-02 19:52:12 +01001696 if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001697 return &bts_constraint;
Peter Zijlstraca037702010-03-02 19:52:12 +01001698
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001699 return NULL;
1700}
1701
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001702static int intel_alt_er(int idx)
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001703{
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01001704 if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001705 return idx;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001706
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001707 if (idx == EXTRA_REG_RSP_0)
1708 return EXTRA_REG_RSP_1;
1709
1710 if (idx == EXTRA_REG_RSP_1)
1711 return EXTRA_REG_RSP_0;
1712
1713 return idx;
1714}
1715
1716static void intel_fixup_er(struct perf_event *event, int idx)
1717{
1718 event->hw.extra_reg.idx = idx;
1719
1720 if (idx == EXTRA_REG_RSP_0) {
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001721 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
Yan, Zheng53ad0442013-07-18 17:02:23 +08001722 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001723 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001724 } else if (idx == EXTRA_REG_RSP_1) {
1725 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
Yan, Zheng53ad0442013-07-18 17:02:23 +08001726 event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001727 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001728 }
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001729}
1730
Stephane Eranianefc9f052011-06-06 16:57:03 +02001731/*
1732 * manage allocation of shared extra msr for certain events
1733 *
1734 * sharing can be:
1735 * per-cpu: to be shared between the various events on a single PMU
1736 * per-core: per-cpu + shared by HT threads
1737 */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001738static struct event_constraint *
Stephane Eranianefc9f052011-06-06 16:57:03 +02001739__intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
Stephane Eranianb36817e2012-02-09 23:20:53 +01001740 struct perf_event *event,
1741 struct hw_perf_event_extra *reg)
Andi Kleena7e3ed12011-03-03 10:34:47 +08001742{
Stephane Eranianefc9f052011-06-06 16:57:03 +02001743 struct event_constraint *c = &emptyconstraint;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001744 struct er_account *era;
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001745 unsigned long flags;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001746 int idx = reg->idx;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001747
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001748 /*
1749 * reg->alloc can be set due to existing state, so for fake cpuc we
1750 * need to ignore this, otherwise we might fail to allocate proper fake
1751 * state for this extra reg constraint. Also see the comment below.
1752 */
1753 if (reg->alloc && !cpuc->is_fake)
Stephane Eranianb36817e2012-02-09 23:20:53 +01001754 return NULL; /* call x86_get_event_constraint() */
Andi Kleena7e3ed12011-03-03 10:34:47 +08001755
Peter Zijlstrab79e8942011-05-23 11:08:15 +02001756again:
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001757 era = &cpuc->shared_regs->regs[idx];
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001758 /*
1759 * we use spin_lock_irqsave() to avoid lockdep issues when
1760 * passing a fake cpuc
1761 */
1762 raw_spin_lock_irqsave(&era->lock, flags);
Stephane Eranianefc9f052011-06-06 16:57:03 +02001763
1764 if (!atomic_read(&era->ref) || era->config == reg->config) {
1765
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001766 /*
1767 * If its a fake cpuc -- as per validate_{group,event}() we
1768 * shouldn't touch event state and we can avoid doing so
1769 * since both will only call get_event_constraints() once
1770 * on each event, this avoids the need for reg->alloc.
1771 *
1772 * Not doing the ER fixup will only result in era->reg being
1773 * wrong, but since we won't actually try and program hardware
1774 * this isn't a problem either.
1775 */
1776 if (!cpuc->is_fake) {
1777 if (idx != reg->idx)
1778 intel_fixup_er(event, idx);
1779
1780 /*
1781 * x86_schedule_events() can call get_event_constraints()
1782 * multiple times on events in the case of incremental
1783 * scheduling(). reg->alloc ensures we only do the ER
1784 * allocation once.
1785 */
1786 reg->alloc = 1;
1787 }
1788
Stephane Eranianefc9f052011-06-06 16:57:03 +02001789 /* lock in msr value */
1790 era->config = reg->config;
1791 era->reg = reg->reg;
1792
1793 /* one more user */
1794 atomic_inc(&era->ref);
1795
Andi Kleena7e3ed12011-03-03 10:34:47 +08001796 /*
Stephane Eranianb36817e2012-02-09 23:20:53 +01001797 * need to call x86_get_event_constraint()
1798 * to check if associated event has constraints
Andi Kleena7e3ed12011-03-03 10:34:47 +08001799 */
Stephane Eranianb36817e2012-02-09 23:20:53 +01001800 c = NULL;
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001801 } else {
1802 idx = intel_alt_er(idx);
1803 if (idx != reg->idx) {
1804 raw_spin_unlock_irqrestore(&era->lock, flags);
1805 goto again;
1806 }
Andi Kleena7e3ed12011-03-03 10:34:47 +08001807 }
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001808 raw_spin_unlock_irqrestore(&era->lock, flags);
Andi Kleena7e3ed12011-03-03 10:34:47 +08001809
Stephane Eranianefc9f052011-06-06 16:57:03 +02001810 return c;
1811}
1812
1813static void
1814__intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1815 struct hw_perf_event_extra *reg)
1816{
1817 struct er_account *era;
1818
1819 /*
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001820 * Only put constraint if extra reg was actually allocated. Also takes
1821 * care of event which do not use an extra shared reg.
1822 *
1823 * Also, if this is a fake cpuc we shouldn't touch any event state
1824 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1825 * either since it'll be thrown out.
Stephane Eranianefc9f052011-06-06 16:57:03 +02001826 */
Peter Zijlstra5a4252942012-06-05 15:30:31 +02001827 if (!reg->alloc || cpuc->is_fake)
Stephane Eranianefc9f052011-06-06 16:57:03 +02001828 return;
1829
1830 era = &cpuc->shared_regs->regs[reg->idx];
1831
1832 /* one fewer user */
1833 atomic_dec(&era->ref);
1834
1835 /* allocate again next time */
1836 reg->alloc = 0;
1837}
1838
1839static struct event_constraint *
1840intel_shared_regs_constraints(struct cpu_hw_events *cpuc,
1841 struct perf_event *event)
1842{
Stephane Eranianb36817e2012-02-09 23:20:53 +01001843 struct event_constraint *c = NULL, *d;
1844 struct hw_perf_event_extra *xreg, *breg;
Stephane Eranianefc9f052011-06-06 16:57:03 +02001845
Stephane Eranianb36817e2012-02-09 23:20:53 +01001846 xreg = &event->hw.extra_reg;
1847 if (xreg->idx != EXTRA_REG_NONE) {
1848 c = __intel_shared_reg_get_constraints(cpuc, event, xreg);
1849 if (c == &emptyconstraint)
1850 return c;
1851 }
1852 breg = &event->hw.branch_reg;
1853 if (breg->idx != EXTRA_REG_NONE) {
1854 d = __intel_shared_reg_get_constraints(cpuc, event, breg);
1855 if (d == &emptyconstraint) {
1856 __intel_shared_reg_put_constraints(cpuc, xreg);
1857 c = d;
1858 }
1859 }
Stephane Eranianefc9f052011-06-06 16:57:03 +02001860 return c;
Andi Kleena7e3ed12011-03-03 10:34:47 +08001861}
1862
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001863struct event_constraint *
Stephane Eranian79cba822014-11-17 20:06:56 +01001864x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
1865 struct perf_event *event)
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001866{
1867 struct event_constraint *c;
1868
1869 if (x86_pmu.event_constraints) {
1870 for_each_event_constraint(c, x86_pmu.event_constraints) {
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001871 if ((event->hw.config & c->cmask) == c->code) {
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001872 event->hw.flags |= c->flags;
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001873 return c;
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001874 }
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001875 }
1876 }
1877
1878 return &unconstrained;
1879}
1880
Andi Kleena7e3ed12011-03-03 10:34:47 +08001881static struct event_constraint *
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001882__intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
Stephane Eranian79cba822014-11-17 20:06:56 +01001883 struct perf_event *event)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001884{
1885 struct event_constraint *c;
1886
Peter Zijlstraca037702010-03-02 19:52:12 +01001887 c = intel_bts_constraints(event);
1888 if (c)
1889 return c;
1890
Kan Liang687805e2015-03-27 10:38:25 -04001891 c = intel_shared_regs_constraints(cpuc, event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001892 if (c)
1893 return c;
1894
Kan Liang687805e2015-03-27 10:38:25 -04001895 c = intel_pebs_constraints(event);
Andi Kleena7e3ed12011-03-03 10:34:47 +08001896 if (c)
1897 return c;
1898
Stephane Eranian79cba822014-11-17 20:06:56 +01001899 return x86_get_event_constraints(cpuc, idx, event);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001900}
1901
Stephane Eranianefc9f052011-06-06 16:57:03 +02001902static void
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001903intel_start_scheduling(struct cpu_hw_events *cpuc)
1904{
1905 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
Peter Zijlstra1c565832015-05-21 10:57:21 +02001906 struct intel_excl_states *xl;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001907 int tid = cpuc->excl_thread_id;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001908
1909 /*
1910 * nothing needed if in group validation mode
1911 */
Stephane Eranianb37609c2014-11-17 20:07:04 +01001912 if (cpuc->is_fake || !is_ht_workaround_enabled())
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001913 return;
Stephane Eranianb37609c2014-11-17 20:07:04 +01001914
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001915 /*
1916 * no exclusion needed
1917 */
Peter Zijlstra17186cc2015-05-21 10:57:28 +02001918 if (WARN_ON_ONCE(!excl_cntrs))
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001919 return;
1920
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001921 xl = &excl_cntrs->states[tid];
1922
1923 xl->sched_started = true;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001924 /*
1925 * lock shared state until we are done scheduling
1926 * in stop_event_scheduling()
1927 * makes scheduling appear as a transaction
1928 */
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001929 raw_spin_lock(&excl_cntrs->lock);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001930}
1931
Peter Zijlstra0c41e752015-05-21 10:57:32 +02001932static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
1933{
1934 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
1935 struct event_constraint *c = cpuc->event_constraint[idx];
1936 struct intel_excl_states *xl;
1937 int tid = cpuc->excl_thread_id;
1938
1939 if (cpuc->is_fake || !is_ht_workaround_enabled())
1940 return;
1941
1942 if (WARN_ON_ONCE(!excl_cntrs))
1943 return;
1944
1945 if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
1946 return;
1947
1948 xl = &excl_cntrs->states[tid];
1949
1950 lockdep_assert_held(&excl_cntrs->lock);
1951
Peter Zijlstra1fe684e2015-05-21 10:57:36 +02001952 if (c->flags & PERF_X86_EVENT_EXCL)
Peter Zijlstra43ef2052015-05-21 10:57:39 +02001953 xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
Peter Zijlstra1fe684e2015-05-21 10:57:36 +02001954 else
Peter Zijlstra43ef2052015-05-21 10:57:39 +02001955 xl->state[cntr] = INTEL_EXCL_SHARED;
Peter Zijlstra0c41e752015-05-21 10:57:32 +02001956}
1957
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001958static void
1959intel_stop_scheduling(struct cpu_hw_events *cpuc)
1960{
1961 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
Peter Zijlstra1c565832015-05-21 10:57:21 +02001962 struct intel_excl_states *xl;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001963 int tid = cpuc->excl_thread_id;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001964
1965 /*
1966 * nothing needed if in group validation mode
1967 */
Stephane Eranianb37609c2014-11-17 20:07:04 +01001968 if (cpuc->is_fake || !is_ht_workaround_enabled())
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001969 return;
1970 /*
1971 * no exclusion needed
1972 */
Peter Zijlstra17186cc2015-05-21 10:57:28 +02001973 if (WARN_ON_ONCE(!excl_cntrs))
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001974 return;
1975
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001976 xl = &excl_cntrs->states[tid];
1977
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001978 xl->sched_started = false;
1979 /*
1980 * release shared state lock (acquired in intel_start_scheduling())
1981 */
1982 raw_spin_unlock(&excl_cntrs->lock);
1983}
1984
1985static struct event_constraint *
1986intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
1987 int idx, struct event_constraint *c)
1988{
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001989 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
Peter Zijlstra1c565832015-05-21 10:57:21 +02001990 struct intel_excl_states *xlo;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001991 int tid = cpuc->excl_thread_id;
Peter Zijlstra1c565832015-05-21 10:57:21 +02001992 int is_excl, i;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01001993
1994 /*
1995 * validating a group does not require
1996 * enforcing cross-thread exclusion
1997 */
Stephane Eranianb37609c2014-11-17 20:07:04 +01001998 if (cpuc->is_fake || !is_ht_workaround_enabled())
1999 return c;
2000
2001 /*
2002 * no exclusion needed
2003 */
Peter Zijlstra17186cc2015-05-21 10:57:28 +02002004 if (WARN_ON_ONCE(!excl_cntrs))
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002005 return c;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002006
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002007 /*
2008 * because we modify the constraint, we need
2009 * to make a copy. Static constraints come
2010 * from static const tables.
2011 *
2012 * only needed when constraint has not yet
2013 * been cloned (marked dynamic)
2014 */
2015 if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002016 struct event_constraint *cx;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002017
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002018 /*
2019 * grab pre-allocated constraint entry
2020 */
2021 cx = &cpuc->constraint_list[idx];
2022
2023 /*
2024 * initialize dynamic constraint
2025 * with static constraint
2026 */
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002027 *cx = *c;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002028
2029 /*
2030 * mark constraint as dynamic, so we
2031 * can free it later on
2032 */
2033 cx->flags |= PERF_X86_EVENT_DYNAMIC;
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002034 c = cx;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002035 }
2036
2037 /*
2038 * From here on, the constraint is dynamic.
2039 * Either it was just allocated above, or it
2040 * was allocated during a earlier invocation
2041 * of this function
2042 */
2043
2044 /*
Peter Zijlstra1c565832015-05-21 10:57:21 +02002045 * state of sibling HT
2046 */
2047 xlo = &excl_cntrs->states[tid ^ 1];
2048
2049 /*
2050 * event requires exclusive counter access
2051 * across HT threads
2052 */
2053 is_excl = c->flags & PERF_X86_EVENT_EXCL;
2054 if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
2055 event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
2056 if (!cpuc->n_excl++)
2057 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
2058 }
2059
2060 /*
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002061 * Modify static constraint with current dynamic
2062 * state of thread
2063 *
2064 * EXCLUSIVE: sibling counter measuring exclusive event
2065 * SHARED : sibling counter measuring non-exclusive event
2066 * UNUSED : sibling counter unused
2067 */
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002068 for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002069 /*
2070 * exclusive event in sibling counter
2071 * our corresponding counter cannot be used
2072 * regardless of our event
2073 */
Peter Zijlstra1c565832015-05-21 10:57:21 +02002074 if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002075 __clear_bit(i, c->idxmsk);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002076 /*
2077 * if measuring an exclusive event, sibling
2078 * measuring non-exclusive, then counter cannot
2079 * be used
2080 */
Peter Zijlstra1c565832015-05-21 10:57:21 +02002081 if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002082 __clear_bit(i, c->idxmsk);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002083 }
2084
2085 /*
2086 * recompute actual bit weight for scheduling algorithm
2087 */
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002088 c->weight = hweight64(c->idxmsk64);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002089
2090 /*
2091 * if we return an empty mask, then switch
2092 * back to static empty constraint to avoid
2093 * the cost of freeing later on
2094 */
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002095 if (c->weight == 0)
2096 c = &emptyconstraint;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002097
Peter Zijlstraaaf932e2015-05-21 10:57:24 +02002098 return c;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002099}
2100
2101static struct event_constraint *
2102intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2103 struct perf_event *event)
2104{
Peter Zijlstrab371b592015-05-21 10:57:13 +02002105 struct event_constraint *c1 = cpuc->event_constraint[idx];
Stephane Eraniana90738c2014-11-17 20:07:01 +01002106 struct event_constraint *c2;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002107
2108 /*
2109 * first time only
2110 * - static constraint: no change across incremental scheduling calls
2111 * - dynamic constraint: handled by intel_get_excl_constraints()
2112 */
Stephane Eraniana90738c2014-11-17 20:07:01 +01002113 c2 = __intel_get_event_constraints(cpuc, idx, event);
2114 if (c1 && (c1->flags & PERF_X86_EVENT_DYNAMIC)) {
2115 bitmap_copy(c1->idxmsk, c2->idxmsk, X86_PMC_IDX_MAX);
2116 c1->weight = c2->weight;
2117 c2 = c1;
2118 }
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002119
2120 if (cpuc->excl_cntrs)
Stephane Eraniana90738c2014-11-17 20:07:01 +01002121 return intel_get_excl_constraints(cpuc, event, idx, c2);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002122
Stephane Eraniana90738c2014-11-17 20:07:01 +01002123 return c2;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002124}
2125
2126static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
2127 struct perf_event *event)
2128{
2129 struct hw_perf_event *hwc = &event->hw;
2130 struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002131 int tid = cpuc->excl_thread_id;
Peter Zijlstra1c565832015-05-21 10:57:21 +02002132 struct intel_excl_states *xl;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002133
2134 /*
2135 * nothing needed if in group validation mode
2136 */
2137 if (cpuc->is_fake)
2138 return;
2139
Peter Zijlstra17186cc2015-05-21 10:57:28 +02002140 if (WARN_ON_ONCE(!excl_cntrs))
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002141 return;
2142
Peter Zijlstracc1790c2015-05-21 10:57:17 +02002143 if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
2144 hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
2145 if (!--cpuc->n_excl)
2146 WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
2147 }
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002148
2149 /*
Peter Zijlstraba040652015-05-22 11:36:13 +02002150 * If event was actually assigned, then mark the counter state as
2151 * unused now.
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002152 */
Peter Zijlstraba040652015-05-22 11:36:13 +02002153 if (hwc->idx >= 0) {
2154 xl = &excl_cntrs->states[tid];
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002155
Peter Zijlstraba040652015-05-22 11:36:13 +02002156 /*
2157 * put_constraint may be called from x86_schedule_events()
2158 * which already has the lock held so here make locking
2159 * conditional.
2160 */
2161 if (!xl->sched_started)
2162 raw_spin_lock(&excl_cntrs->lock);
2163
Peter Zijlstra1c565832015-05-21 10:57:21 +02002164 xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002165
Peter Zijlstraba040652015-05-22 11:36:13 +02002166 if (!xl->sched_started)
2167 raw_spin_unlock(&excl_cntrs->lock);
2168 }
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002169}
2170
2171static void
Stephane Eranianefc9f052011-06-06 16:57:03 +02002172intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
2173 struct perf_event *event)
2174{
2175 struct hw_perf_event_extra *reg;
2176
2177 reg = &event->hw.extra_reg;
2178 if (reg->idx != EXTRA_REG_NONE)
2179 __intel_shared_reg_put_constraints(cpuc, reg);
Stephane Eranianb36817e2012-02-09 23:20:53 +01002180
2181 reg = &event->hw.branch_reg;
2182 if (reg->idx != EXTRA_REG_NONE)
2183 __intel_shared_reg_put_constraints(cpuc, reg);
Stephane Eranianefc9f052011-06-06 16:57:03 +02002184}
2185
Andi Kleena7e3ed12011-03-03 10:34:47 +08002186static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
2187 struct perf_event *event)
2188{
Stephane Eranianefc9f052011-06-06 16:57:03 +02002189 intel_put_shared_regs_event_constraints(cpuc, event);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002190
2191 /*
2192 * is PMU has exclusive counter restrictions, then
2193 * all events are subject to and must call the
2194 * put_excl_constraints() routine
2195 */
Peter Zijlstrab371b592015-05-21 10:57:13 +02002196 if (cpuc->excl_cntrs)
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002197 intel_put_excl_constraints(cpuc, event);
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002198}
2199
Peter Zijlstra0780c922012-06-05 10:26:43 +02002200static void intel_pebs_aliases_core2(struct perf_event *event)
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02002201{
Peter Zijlstra0780c922012-06-05 10:26:43 +02002202 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
Peter Zijlstra7639dae2010-12-14 21:26:40 +01002203 /*
2204 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2205 * (0x003c) so that we can use it with PEBS.
2206 *
2207 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2208 * PEBS capable. However we can use INST_RETIRED.ANY_P
2209 * (0x00c0), which is a PEBS capable event, to get the same
2210 * count.
2211 *
2212 * INST_RETIRED.ANY_P counts the number of cycles that retires
2213 * CNTMASK instructions. By setting CNTMASK to a value (16)
2214 * larger than the maximum number of instructions that can be
2215 * retired per cycle (4) and then inverting the condition, we
2216 * count all cycles that retire 16 or less instructions, which
2217 * is every cycle.
2218 *
2219 * Thereby we gain a PEBS capable cycle counter.
2220 */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01002221 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
2222
Peter Zijlstra0780c922012-06-05 10:26:43 +02002223 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2224 event->hw.config = alt_config;
2225 }
2226}
2227
2228static void intel_pebs_aliases_snb(struct perf_event *event)
2229{
2230 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
2231 /*
2232 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
2233 * (0x003c) so that we can use it with PEBS.
2234 *
2235 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
2236 * PEBS capable. However we can use UOPS_RETIRED.ALL
2237 * (0x01c2), which is a PEBS capable event, to get the same
2238 * count.
2239 *
2240 * UOPS_RETIRED.ALL counts the number of cycles that retires
2241 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
2242 * larger than the maximum number of micro-ops that can be
2243 * retired per cycle (4) and then inverting the condition, we
2244 * count all cycles that retire 16 or less micro-ops, which
2245 * is every cycle.
2246 *
2247 * Thereby we gain a PEBS capable cycle counter.
2248 */
2249 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
Peter Zijlstra7639dae2010-12-14 21:26:40 +01002250
2251 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
2252 event->hw.config = alt_config;
2253 }
Peter Zijlstra0780c922012-06-05 10:26:43 +02002254}
2255
2256static int intel_pmu_hw_config(struct perf_event *event)
2257{
2258 int ret = x86_pmu_hw_config(event);
2259
2260 if (ret)
2261 return ret;
2262
Yan, Zheng851559e2015-05-06 15:33:47 -04002263 if (event->attr.precise_ip) {
Yan, Zheng3569c0d2015-05-06 15:33:50 -04002264 if (!event->attr.freq) {
Yan, Zheng851559e2015-05-06 15:33:47 -04002265 event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
Yan, Zheng3569c0d2015-05-06 15:33:50 -04002266 if (!(event->attr.sample_type & ~PEBS_FREERUNNING_FLAGS))
2267 event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
2268 }
Yan, Zheng851559e2015-05-06 15:33:47 -04002269 if (x86_pmu.pebs_aliases)
2270 x86_pmu.pebs_aliases(event);
2271 }
Peter Zijlstra7639dae2010-12-14 21:26:40 +01002272
Yan, Zhenga46a2302014-11-04 21:56:06 -05002273 if (needs_branch_stack(event)) {
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01002274 ret = intel_pmu_setup_lbr_filter(event);
2275 if (ret)
2276 return ret;
Alexander Shishkin48070342015-01-14 14:18:20 +02002277
2278 /*
2279 * BTS is set up earlier in this path, so don't account twice
2280 */
2281 if (!intel_pmu_has_bts(event)) {
2282 /* disallow lbr if conflicting events are present */
2283 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
2284 return -EBUSY;
2285
2286 event->destroy = hw_perf_lbr_event_destroy;
2287 }
Stephane Eranian60ce0fb2012-02-09 23:20:57 +01002288 }
2289
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02002290 if (event->attr.type != PERF_TYPE_RAW)
2291 return 0;
2292
2293 if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
2294 return 0;
2295
2296 if (x86_pmu.version < 3)
2297 return -EINVAL;
2298
2299 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2300 return -EACCES;
2301
2302 event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
2303
2304 return 0;
2305}
2306
Gleb Natapov144d31e2011-10-05 14:01:21 +02002307struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
2308{
2309 if (x86_pmu.guest_get_msrs)
2310 return x86_pmu.guest_get_msrs(nr);
2311 *nr = 0;
2312 return NULL;
2313}
2314EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
2315
2316static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
2317{
Christoph Lameter89cbc762014-08-17 12:30:40 -05002318 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Gleb Natapov144d31e2011-10-05 14:01:21 +02002319 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2320
2321 arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
2322 arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
2323 arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask;
Gleb Natapov26a4f3c2012-08-09 11:52:34 +03002324 /*
2325 * If PMU counter has PEBS enabled it is not enough to disable counter
2326 * on a guest entry since PEBS memory write can overshoot guest entry
2327 * and corrupt guest memory. Disabling PEBS solves the problem.
2328 */
2329 arr[1].msr = MSR_IA32_PEBS_ENABLE;
2330 arr[1].host = cpuc->pebs_enabled;
2331 arr[1].guest = 0;
Gleb Natapov144d31e2011-10-05 14:01:21 +02002332
Gleb Natapov26a4f3c2012-08-09 11:52:34 +03002333 *nr = 2;
Gleb Natapov144d31e2011-10-05 14:01:21 +02002334 return arr;
2335}
2336
2337static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
2338{
Christoph Lameter89cbc762014-08-17 12:30:40 -05002339 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Gleb Natapov144d31e2011-10-05 14:01:21 +02002340 struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
2341 int idx;
2342
2343 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2344 struct perf_event *event = cpuc->events[idx];
2345
2346 arr[idx].msr = x86_pmu_config_addr(idx);
2347 arr[idx].host = arr[idx].guest = 0;
2348
2349 if (!test_bit(idx, cpuc->active_mask))
2350 continue;
2351
2352 arr[idx].host = arr[idx].guest =
2353 event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE;
2354
2355 if (event->attr.exclude_host)
2356 arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2357 else if (event->attr.exclude_guest)
2358 arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
2359 }
2360
2361 *nr = x86_pmu.num_counters;
2362 return arr;
2363}
2364
2365static void core_pmu_enable_event(struct perf_event *event)
2366{
2367 if (!event->attr.exclude_host)
2368 x86_pmu_enable_event(event);
2369}
2370
2371static void core_pmu_enable_all(int added)
2372{
Christoph Lameter89cbc762014-08-17 12:30:40 -05002373 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Gleb Natapov144d31e2011-10-05 14:01:21 +02002374 int idx;
2375
2376 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
2377 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
2378
2379 if (!test_bit(idx, cpuc->active_mask) ||
2380 cpuc->events[idx]->attr.exclude_host)
2381 continue;
2382
2383 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
2384 }
2385}
2386
Andi Kleen3a632cb2013-06-17 17:36:48 -07002387static int hsw_hw_config(struct perf_event *event)
2388{
2389 int ret = intel_pmu_hw_config(event);
2390
2391 if (ret)
2392 return ret;
2393 if (!boot_cpu_has(X86_FEATURE_RTM) && !boot_cpu_has(X86_FEATURE_HLE))
2394 return 0;
2395 event->hw.config |= event->attr.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED);
2396
2397 /*
2398 * IN_TX/IN_TX-CP filters are not supported by the Haswell PMU with
2399 * PEBS or in ANY thread mode. Since the results are non-sensical forbid
2400 * this combination.
2401 */
2402 if ((event->hw.config & (HSW_IN_TX|HSW_IN_TX_CHECKPOINTED)) &&
2403 ((event->hw.config & ARCH_PERFMON_EVENTSEL_ANY) ||
2404 event->attr.precise_ip > 0))
2405 return -EOPNOTSUPP;
2406
Andi Kleen2dbf0112013-09-05 20:37:38 -07002407 if (event_is_checkpointed(event)) {
2408 /*
2409 * Sampling of checkpointed events can cause situations where
2410 * the CPU constantly aborts because of a overflow, which is
2411 * then checkpointed back and ignored. Forbid checkpointing
2412 * for sampling.
2413 *
2414 * But still allow a long sampling period, so that perf stat
2415 * from KVM works.
2416 */
2417 if (event->attr.sample_period > 0 &&
2418 event->attr.sample_period < 0x7fffffff)
2419 return -EOPNOTSUPP;
2420 }
Andi Kleen3a632cb2013-06-17 17:36:48 -07002421 return 0;
2422}
2423
2424static struct event_constraint counter2_constraint =
2425 EVENT_CONSTRAINT(0, 0x4, 0);
2426
2427static struct event_constraint *
Stephane Eranian79cba822014-11-17 20:06:56 +01002428hsw_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
2429 struct perf_event *event)
Andi Kleen3a632cb2013-06-17 17:36:48 -07002430{
Stephane Eranian79cba822014-11-17 20:06:56 +01002431 struct event_constraint *c;
2432
2433 c = intel_get_event_constraints(cpuc, idx, event);
Andi Kleen3a632cb2013-06-17 17:36:48 -07002434
2435 /* Handle special quirk on in_tx_checkpointed only in counter 2 */
2436 if (event->hw.config & HSW_IN_TX_CHECKPOINTED) {
2437 if (c->idxmsk64 & (1U << 2))
2438 return &counter2_constraint;
2439 return &emptyconstraint;
2440 }
2441
2442 return c;
2443}
2444
Andi Kleen294fe0f2015-02-17 18:18:06 -08002445/*
2446 * Broadwell:
2447 *
2448 * The INST_RETIRED.ALL period always needs to have lowest 6 bits cleared
2449 * (BDM55) and it must not use a period smaller than 100 (BDM11). We combine
2450 * the two to enforce a minimum period of 128 (the smallest value that has bits
2451 * 0-5 cleared and >= 100).
2452 *
2453 * Because of how the code in x86_perf_event_set_period() works, the truncation
2454 * of the lower 6 bits is 'harmless' as we'll occasionally add a longer period
2455 * to make up for the 'lost' events due to carrying the 'error' in period_left.
2456 *
2457 * Therefore the effective (average) period matches the requested period,
2458 * despite coarser hardware granularity.
2459 */
2460static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2461{
2462 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2463 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2464 if (left < 128)
2465 left = 128;
2466 left &= ~0x3fu;
2467 }
2468 return left;
2469}
2470
Jiri Olsa641cc932012-03-15 20:09:14 +01002471PMU_FORMAT_ATTR(event, "config:0-7" );
2472PMU_FORMAT_ATTR(umask, "config:8-15" );
2473PMU_FORMAT_ATTR(edge, "config:18" );
2474PMU_FORMAT_ATTR(pc, "config:19" );
2475PMU_FORMAT_ATTR(any, "config:21" ); /* v3 + */
2476PMU_FORMAT_ATTR(inv, "config:23" );
2477PMU_FORMAT_ATTR(cmask, "config:24-31" );
Andi Kleen3a632cb2013-06-17 17:36:48 -07002478PMU_FORMAT_ATTR(in_tx, "config:32");
2479PMU_FORMAT_ATTR(in_tx_cp, "config:33");
Jiri Olsa641cc932012-03-15 20:09:14 +01002480
2481static struct attribute *intel_arch_formats_attr[] = {
2482 &format_attr_event.attr,
2483 &format_attr_umask.attr,
2484 &format_attr_edge.attr,
2485 &format_attr_pc.attr,
2486 &format_attr_inv.attr,
2487 &format_attr_cmask.attr,
2488 NULL,
2489};
2490
Jiri Olsa0bf79d42012-10-10 14:53:14 +02002491ssize_t intel_event_sysfs_show(char *page, u64 config)
2492{
2493 u64 event = (config & ARCH_PERFMON_EVENTSEL_EVENT);
2494
2495 return x86_event_sysfs_show(page, config, event);
2496}
2497
Kevin Winchesterde0428a2011-08-30 20:41:05 -03002498struct intel_shared_regs *allocate_shared_regs(int cpu)
Stephane Eranianefc9f052011-06-06 16:57:03 +02002499{
2500 struct intel_shared_regs *regs;
2501 int i;
2502
2503 regs = kzalloc_node(sizeof(struct intel_shared_regs),
2504 GFP_KERNEL, cpu_to_node(cpu));
2505 if (regs) {
2506 /*
2507 * initialize the locks to keep lockdep happy
2508 */
2509 for (i = 0; i < EXTRA_REG_MAX; i++)
2510 raw_spin_lock_init(&regs->regs[i].lock);
2511
2512 regs->core_id = -1;
2513 }
2514 return regs;
2515}
2516
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002517static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
2518{
2519 struct intel_excl_cntrs *c;
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002520
2521 c = kzalloc_node(sizeof(struct intel_excl_cntrs),
2522 GFP_KERNEL, cpu_to_node(cpu));
2523 if (c) {
2524 raw_spin_lock_init(&c->lock);
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002525 c->core_id = -1;
2526 }
2527 return c;
2528}
2529
Andi Kleena7e3ed12011-03-03 10:34:47 +08002530static int intel_pmu_cpu_prepare(int cpu)
2531{
2532 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2533
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2535 cpuc->shared_regs = allocate_shared_regs(cpu);
2536 if (!cpuc->shared_regs)
2537 return NOTIFY_BAD;
2538 }
Lin Ming69092622011-03-03 10:34:50 +08002539
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2541 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
2542
2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2544 if (!cpuc->constraint_list)
2545 return NOTIFY_BAD;
2546
2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2548 if (!cpuc->excl_cntrs) {
2549 kfree(cpuc->constraint_list);
2550 kfree(cpuc->shared_regs);
2551 return NOTIFY_BAD;
2552 }
2553 cpuc->excl_thread_id = 0;
2554 }
Andi Kleena7e3ed12011-03-03 10:34:47 +08002555
Andi Kleena7e3ed12011-03-03 10:34:47 +08002556 return NOTIFY_OK;
2557}
2558
Peter Zijlstra74846d32010-03-05 13:49:35 +01002559static void intel_pmu_cpu_starting(int cpu)
2560{
Andi Kleena7e3ed12011-03-03 10:34:47 +08002561 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2562 int core_id = topology_core_id(cpu);
2563 int i;
2564
Lin Ming69092622011-03-03 10:34:50 +08002565 init_debug_store_on_cpu(cpu);
2566 /*
2567 * Deal with CPUs that don't clear their LBRs on power-up.
2568 */
2569 intel_pmu_lbr_reset();
2570
Stephane Eranianb36817e2012-02-09 23:20:53 +01002571 cpuc->lbr_sel = NULL;
2572
2573 if (!cpuc->shared_regs)
Lin Ming69092622011-03-03 10:34:50 +08002574 return;
2575
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01002576 if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
Stephane Eranian90413462014-11-17 20:06:54 +01002577 void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
2578
Stephane Eranianb36817e2012-02-09 23:20:53 +01002579 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2580 struct intel_shared_regs *pc;
Andi Kleena7e3ed12011-03-03 10:34:47 +08002581
Stephane Eranianb36817e2012-02-09 23:20:53 +01002582 pc = per_cpu(cpu_hw_events, i).shared_regs;
2583 if (pc && pc->core_id == core_id) {
Stephane Eranian90413462014-11-17 20:06:54 +01002584 *onln = cpuc->shared_regs;
Stephane Eranianb36817e2012-02-09 23:20:53 +01002585 cpuc->shared_regs = pc;
2586 break;
2587 }
Andi Kleena7e3ed12011-03-03 10:34:47 +08002588 }
Stephane Eranianb36817e2012-02-09 23:20:53 +01002589 cpuc->shared_regs->core_id = core_id;
2590 cpuc->shared_regs->refcnt++;
Andi Kleena7e3ed12011-03-03 10:34:47 +08002591 }
2592
Stephane Eranianb36817e2012-02-09 23:20:53 +01002593 if (x86_pmu.lbr_sel_map)
2594 cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002595
2596 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
2597 for_each_cpu(i, topology_thread_cpumask(cpu)) {
2598 struct intel_excl_cntrs *c;
2599
2600 c = per_cpu(cpu_hw_events, i).excl_cntrs;
2601 if (c && c->core_id == core_id) {
2602 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
2603 cpuc->excl_cntrs = c;
2604 cpuc->excl_thread_id = 1;
2605 break;
2606 }
2607 }
2608 cpuc->excl_cntrs->core_id = core_id;
2609 cpuc->excl_cntrs->refcnt++;
2610 }
Peter Zijlstra74846d32010-03-05 13:49:35 +01002611}
2612
Stephane Eranianb37609c2014-11-17 20:07:04 +01002613static void free_excl_cntrs(int cpu)
Peter Zijlstra74846d32010-03-05 13:49:35 +01002614{
Andi Kleena7e3ed12011-03-03 10:34:47 +08002615 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002616 struct intel_excl_cntrs *c;
Andi Kleena7e3ed12011-03-03 10:34:47 +08002617
Maria Dimakopoulou6f6539c2014-11-17 20:06:57 +01002618 c = cpuc->excl_cntrs;
2619 if (c) {
2620 if (c->core_id == -1 || --c->refcnt == 0)
2621 kfree(c);
2622 cpuc->excl_cntrs = NULL;
2623 kfree(cpuc->constraint_list);
2624 cpuc->constraint_list = NULL;
2625 }
Stephane Eranianb37609c2014-11-17 20:07:04 +01002626}
Andi Kleena7e3ed12011-03-03 10:34:47 +08002627
Stephane Eranianb37609c2014-11-17 20:07:04 +01002628static void intel_pmu_cpu_dying(int cpu)
2629{
2630 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2631 struct intel_shared_regs *pc;
2632
2633 pc = cpuc->shared_regs;
2634 if (pc) {
2635 if (pc->core_id == -1 || --pc->refcnt == 0)
2636 kfree(pc);
2637 cpuc->shared_regs = NULL;
Maria Dimakopouloue9791212014-11-17 20:06:58 +01002638 }
2639
Stephane Eranianb37609c2014-11-17 20:07:04 +01002640 free_excl_cntrs(cpu);
2641
Peter Zijlstra74846d32010-03-05 13:49:35 +01002642 fini_debug_store_on_cpu(cpu);
2643}
2644
Yan, Zheng9c964ef2015-05-06 15:33:51 -04002645static void intel_pmu_sched_task(struct perf_event_context *ctx,
2646 bool sched_in)
2647{
2648 if (x86_pmu.pebs_active)
2649 intel_pmu_pebs_sched_task(ctx, sched_in);
2650 if (x86_pmu.lbr_nr)
2651 intel_pmu_lbr_sched_task(ctx, sched_in);
2652}
2653
Jiri Olsa641cc932012-03-15 20:09:14 +01002654PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
2655
Stephane Eraniana63fcab2013-01-24 16:10:33 +01002656PMU_FORMAT_ATTR(ldlat, "config1:0-15");
2657
Jiri Olsa641cc932012-03-15 20:09:14 +01002658static struct attribute *intel_arch3_formats_attr[] = {
2659 &format_attr_event.attr,
2660 &format_attr_umask.attr,
2661 &format_attr_edge.attr,
2662 &format_attr_pc.attr,
2663 &format_attr_any.attr,
2664 &format_attr_inv.attr,
2665 &format_attr_cmask.attr,
Andi Kleen3a632cb2013-06-17 17:36:48 -07002666 &format_attr_in_tx.attr,
2667 &format_attr_in_tx_cp.attr,
Jiri Olsa641cc932012-03-15 20:09:14 +01002668
2669 &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */
Stephane Eraniana63fcab2013-01-24 16:10:33 +01002670 &format_attr_ldlat.attr, /* PEBS load latency */
Jiri Olsa641cc932012-03-15 20:09:14 +01002671 NULL,
2672};
2673
Jiri Olsa3b6e0422015-04-21 17:26:23 +02002674static __initconst const struct x86_pmu core_pmu = {
2675 .name = "core",
2676 .handle_irq = x86_pmu_handle_irq,
2677 .disable_all = x86_pmu_disable_all,
2678 .enable_all = core_pmu_enable_all,
2679 .enable = core_pmu_enable_event,
2680 .disable = x86_pmu_disable_event,
2681 .hw_config = x86_pmu_hw_config,
2682 .schedule_events = x86_schedule_events,
2683 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2684 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2685 .event_map = intel_pmu_event_map,
2686 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2687 .apic = 1,
2688 /*
2689 * Intel PMCs cannot be accessed sanely above 32-bit width,
2690 * so we install an artificial 1<<31 period regardless of
2691 * the generic event period:
2692 */
2693 .max_period = (1ULL<<31) - 1,
2694 .get_event_constraints = intel_get_event_constraints,
2695 .put_event_constraints = intel_put_event_constraints,
2696 .event_constraints = intel_core_event_constraints,
2697 .guest_get_msrs = core_guest_get_msrs,
2698 .format_attrs = intel_arch_formats_attr,
2699 .events_sysfs_show = intel_event_sysfs_show,
2700
2701 /*
2702 * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
2703 * together with PMU version 1 and thus be using core_pmu with
2704 * shared_regs. We need following callbacks here to allocate
2705 * it properly.
2706 */
2707 .cpu_prepare = intel_pmu_cpu_prepare,
2708 .cpu_starting = intel_pmu_cpu_starting,
2709 .cpu_dying = intel_pmu_cpu_dying,
2710};
2711
Peter Zijlstracaaa8be2010-03-29 13:09:53 +02002712static __initconst const struct x86_pmu intel_pmu = {
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002713 .name = "Intel",
2714 .handle_irq = intel_pmu_handle_irq,
2715 .disable_all = intel_pmu_disable_all,
2716 .enable_all = intel_pmu_enable_all,
2717 .enable = intel_pmu_enable_event,
2718 .disable = intel_pmu_disable_event,
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +02002719 .hw_config = intel_pmu_hw_config,
Cyrill Gorcunova0727382010-03-11 19:54:39 +03002720 .schedule_events = x86_schedule_events,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002721 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2722 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2723 .event_map = intel_pmu_event_map,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002724 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2725 .apic = 1,
2726 /*
2727 * Intel PMCs cannot be accessed sanely above 32 bit width,
2728 * so we install an artificial 1<<31 period regardless of
2729 * the generic event period:
2730 */
2731 .max_period = (1ULL << 31) - 1,
Peter Zijlstra3f6da392010-03-05 13:01:18 +01002732 .get_event_constraints = intel_get_event_constraints,
Andi Kleena7e3ed12011-03-03 10:34:47 +08002733 .put_event_constraints = intel_put_event_constraints,
Peter Zijlstra0780c922012-06-05 10:26:43 +02002734 .pebs_aliases = intel_pebs_aliases_core2,
Peter Zijlstra3f6da392010-03-05 13:01:18 +01002735
Jiri Olsa641cc932012-03-15 20:09:14 +01002736 .format_attrs = intel_arch3_formats_attr,
Jiri Olsa0bf79d42012-10-10 14:53:14 +02002737 .events_sysfs_show = intel_event_sysfs_show,
Jiri Olsa641cc932012-03-15 20:09:14 +01002738
Andi Kleena7e3ed12011-03-03 10:34:47 +08002739 .cpu_prepare = intel_pmu_cpu_prepare,
Peter Zijlstra74846d32010-03-05 13:49:35 +01002740 .cpu_starting = intel_pmu_cpu_starting,
2741 .cpu_dying = intel_pmu_cpu_dying,
Gleb Natapov144d31e2011-10-05 14:01:21 +02002742 .guest_get_msrs = intel_guest_get_msrs,
Yan, Zheng9c964ef2015-05-06 15:33:51 -04002743 .sched_task = intel_pmu_sched_task,
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002744};
2745
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002746static __init void intel_clovertown_quirk(void)
Peter Zijlstra3c447802010-03-04 21:49:01 +01002747{
2748 /*
2749 * PEBS is unreliable due to:
2750 *
2751 * AJ67 - PEBS may experience CPL leaks
2752 * AJ68 - PEBS PMI may be delayed by one event
2753 * AJ69 - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
2754 * AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
2755 *
2756 * AJ67 could be worked around by restricting the OS/USR flags.
2757 * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
2758 *
2759 * AJ106 could possibly be worked around by not allowing LBR
2760 * usage from PEBS, including the fixup.
2761 * AJ68 could possibly be worked around by always programming
Ingo Molnarec75a712011-04-27 11:51:41 +02002762 * a pebs_event_reset[0] value and coping with the lost events.
Peter Zijlstra3c447802010-03-04 21:49:01 +01002763 *
2764 * But taken together it might just make sense to not enable PEBS on
2765 * these chips.
2766 */
Joe Perchesc767a542012-05-21 19:50:07 -07002767 pr_warn("PEBS disabled due to CPU errata\n");
Peter Zijlstra3c447802010-03-04 21:49:01 +01002768 x86_pmu.pebs = 0;
2769 x86_pmu.pebs_constraints = NULL;
2770}
2771
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002772static int intel_snb_pebs_broken(int cpu)
2773{
2774 u32 rev = UINT_MAX; /* default to broken for unknown models */
2775
2776 switch (cpu_data(cpu).x86_model) {
2777 case 42: /* SNB */
2778 rev = 0x28;
2779 break;
2780
2781 case 45: /* SNB-EP */
2782 switch (cpu_data(cpu).x86_mask) {
2783 case 6: rev = 0x618; break;
2784 case 7: rev = 0x70c; break;
2785 }
2786 }
2787
2788 return (cpu_data(cpu).microcode < rev);
2789}
2790
2791static void intel_snb_check_microcode(void)
2792{
2793 int pebs_broken = 0;
2794 int cpu;
2795
2796 get_online_cpus();
2797 for_each_online_cpu(cpu) {
2798 if ((pebs_broken = intel_snb_pebs_broken(cpu)))
2799 break;
2800 }
2801 put_online_cpus();
2802
2803 if (pebs_broken == x86_pmu.pebs_broken)
2804 return;
2805
2806 /*
2807 * Serialized by the microcode lock..
2808 */
2809 if (x86_pmu.pebs_broken) {
2810 pr_info("PEBS enabled due to microcode update\n");
2811 x86_pmu.pebs_broken = 0;
2812 } else {
2813 pr_info("PEBS disabled due to CPU errata, please upgrade microcode\n");
2814 x86_pmu.pebs_broken = 1;
2815 }
2816}
2817
Kan Liang338b5222014-07-14 12:25:56 -07002818/*
2819 * Under certain circumstances, access certain MSR may cause #GP.
2820 * The function tests if the input MSR can be safely accessed.
2821 */
2822static bool check_msr(unsigned long msr, u64 mask)
2823{
2824 u64 val_old, val_new, val_tmp;
2825
2826 /*
2827 * Read the current value, change it and read it back to see if it
2828 * matches, this is needed to detect certain hardware emulators
2829 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
2830 */
2831 if (rdmsrl_safe(msr, &val_old))
2832 return false;
2833
2834 /*
2835 * Only change the bits which can be updated by wrmsrl.
2836 */
2837 val_tmp = val_old ^ mask;
2838 if (wrmsrl_safe(msr, val_tmp) ||
2839 rdmsrl_safe(msr, &val_new))
2840 return false;
2841
2842 if (val_new != val_tmp)
2843 return false;
2844
2845 /* Here it's sure that the MSR can be safely accessed.
2846 * Restore the old value and return.
2847 */
2848 wrmsrl(msr, val_old);
2849
2850 return true;
2851}
2852
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002853static __init void intel_sandybridge_quirk(void)
Peter Zijlstra6a600a82011-11-15 10:51:15 +01002854{
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002855 x86_pmu.check_microcode = intel_snb_check_microcode;
2856 intel_snb_check_microcode();
Peter Zijlstra6a600a82011-11-15 10:51:15 +01002857}
2858
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002859static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
2860 { PERF_COUNT_HW_CPU_CYCLES, "cpu cycles" },
2861 { PERF_COUNT_HW_INSTRUCTIONS, "instructions" },
2862 { PERF_COUNT_HW_BUS_CYCLES, "bus cycles" },
2863 { PERF_COUNT_HW_CACHE_REFERENCES, "cache references" },
2864 { PERF_COUNT_HW_CACHE_MISSES, "cache misses" },
2865 { PERF_COUNT_HW_BRANCH_INSTRUCTIONS, "branch instructions" },
2866 { PERF_COUNT_HW_BRANCH_MISSES, "branch misses" },
Gleb Natapovffb871b2011-11-10 14:57:26 +02002867};
2868
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002869static __init void intel_arch_events_quirk(void)
2870{
2871 int bit;
2872
2873 /* disable event that reported as not presend by cpuid */
2874 for_each_set_bit(bit, x86_pmu.events_mask, ARRAY_SIZE(intel_arch_events_map)) {
2875 intel_perfmon_event_map[intel_arch_events_map[bit].id] = 0;
Joe Perchesc767a542012-05-21 19:50:07 -07002876 pr_warn("CPUID marked event: \'%s\' unavailable\n",
2877 intel_arch_events_map[bit].name);
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002878 }
2879}
2880
2881static __init void intel_nehalem_quirk(void)
2882{
2883 union cpuid10_ebx ebx;
2884
2885 ebx.full = x86_pmu.events_maskl;
2886 if (ebx.split.no_branch_misses_retired) {
2887 /*
2888 * Erratum AAJ80 detected, we work it around by using
2889 * the BR_MISP_EXEC.ANY event. This will over-count
2890 * branch-misses, but it's still much better than the
2891 * architectural event which is often completely bogus:
2892 */
2893 intel_perfmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x7f89;
2894 ebx.split.no_branch_misses_retired = 0;
2895 x86_pmu.events_maskl = ebx.full;
Joe Perchesc767a542012-05-21 19:50:07 -07002896 pr_info("CPU erratum AAJ80 worked around\n");
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002897 }
2898}
2899
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01002900/*
2901 * enable software workaround for errata:
2902 * SNB: BJ122
2903 * IVB: BV98
2904 * HSW: HSD29
2905 *
2906 * Only needed when HT is enabled. However detecting
Stephane Eranianb37609c2014-11-17 20:07:04 +01002907 * if HT is enabled is difficult (model specific). So instead,
2908 * we enable the workaround in the early boot, and verify if
2909 * it is needed in a later initcall phase once we have valid
2910 * topology information to check if HT is actually enabled
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01002911 */
2912static __init void intel_ht_bug(void)
2913{
Stephane Eranianb37609c2014-11-17 20:07:04 +01002914 x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01002915
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01002916 x86_pmu.start_scheduling = intel_start_scheduling;
Peter Zijlstra0c41e752015-05-21 10:57:32 +02002917 x86_pmu.commit_scheduling = intel_commit_scheduling;
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01002918 x86_pmu.stop_scheduling = intel_stop_scheduling;
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01002919}
2920
Ingo Molnar7f2ee912013-09-12 19:17:00 +02002921EVENT_ATTR_STR(mem-loads, mem_ld_hsw, "event=0xcd,umask=0x1,ldlat=3");
2922EVENT_ATTR_STR(mem-stores, mem_st_hsw, "event=0xd0,umask=0x82")
Andi Kleenf9134f32013-06-17 17:36:52 -07002923
Andi Kleen4b2c4f12013-09-05 20:37:40 -07002924/* Haswell special events */
Ingo Molnar7f2ee912013-09-12 19:17:00 +02002925EVENT_ATTR_STR(tx-start, tx_start, "event=0xc9,umask=0x1");
2926EVENT_ATTR_STR(tx-commit, tx_commit, "event=0xc9,umask=0x2");
2927EVENT_ATTR_STR(tx-abort, tx_abort, "event=0xc9,umask=0x4");
2928EVENT_ATTR_STR(tx-capacity, tx_capacity, "event=0x54,umask=0x2");
2929EVENT_ATTR_STR(tx-conflict, tx_conflict, "event=0x54,umask=0x1");
2930EVENT_ATTR_STR(el-start, el_start, "event=0xc8,umask=0x1");
2931EVENT_ATTR_STR(el-commit, el_commit, "event=0xc8,umask=0x2");
2932EVENT_ATTR_STR(el-abort, el_abort, "event=0xc8,umask=0x4");
2933EVENT_ATTR_STR(el-capacity, el_capacity, "event=0x54,umask=0x2");
2934EVENT_ATTR_STR(el-conflict, el_conflict, "event=0x54,umask=0x1");
2935EVENT_ATTR_STR(cycles-t, cycles_t, "event=0x3c,in_tx=1");
2936EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
Andi Kleen4b2c4f12013-09-05 20:37:40 -07002937
Andi Kleenf9134f32013-06-17 17:36:52 -07002938static struct attribute *hsw_events_attrs[] = {
Andi Kleen4b2c4f12013-09-05 20:37:40 -07002939 EVENT_PTR(tx_start),
2940 EVENT_PTR(tx_commit),
2941 EVENT_PTR(tx_abort),
2942 EVENT_PTR(tx_capacity),
2943 EVENT_PTR(tx_conflict),
2944 EVENT_PTR(el_start),
2945 EVENT_PTR(el_commit),
2946 EVENT_PTR(el_abort),
2947 EVENT_PTR(el_capacity),
2948 EVENT_PTR(el_conflict),
2949 EVENT_PTR(cycles_t),
2950 EVENT_PTR(cycles_ct),
Andi Kleenf9134f32013-06-17 17:36:52 -07002951 EVENT_PTR(mem_ld_hsw),
2952 EVENT_PTR(mem_st_hsw),
2953 NULL
2954};
2955
Kevin Winchesterde0428a2011-08-30 20:41:05 -03002956__init int intel_pmu_init(void)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002957{
2958 union cpuid10_edx edx;
2959 union cpuid10_eax eax;
Gleb Natapovffb871b2011-11-10 14:57:26 +02002960 union cpuid10_ebx ebx;
Robert Richtera1eac7a2012-06-20 20:46:34 +02002961 struct event_constraint *c;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002962 unsigned int unused;
Kan Liang338b5222014-07-14 12:25:56 -07002963 struct extra_reg *er;
2964 int version, i;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002965
2966 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
Cyrill Gorcunova0727382010-03-11 19:54:39 +03002967 switch (boot_cpu_data.x86) {
2968 case 0x6:
2969 return p6_pmu_init();
Vince Weavere717bf42012-09-26 14:12:52 -04002970 case 0xb:
2971 return knc_pmu_init();
Cyrill Gorcunova0727382010-03-11 19:54:39 +03002972 case 0xf:
2973 return p4_pmu_init();
2974 }
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002975 return -ENODEV;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002976 }
2977
2978 /*
2979 * Check whether the Architectural PerfMon supports
2980 * Branch Misses Retired hw_event or not.
2981 */
Gleb Natapovffb871b2011-11-10 14:57:26 +02002982 cpuid(10, &eax.full, &ebx.full, &unused, &edx.full);
2983 if (eax.split.mask_length < ARCH_PERFMON_EVENTS_COUNT)
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002984 return -ENODEV;
2985
2986 version = eax.split.version_id;
2987 if (version < 2)
2988 x86_pmu = core_pmu;
2989 else
2990 x86_pmu = intel_pmu;
2991
2992 x86_pmu.version = version;
Robert Richter948b1bb2010-03-29 18:36:50 +02002993 x86_pmu.num_counters = eax.split.num_counters;
2994 x86_pmu.cntval_bits = eax.split.bit_width;
2995 x86_pmu.cntval_mask = (1ULL << eax.split.bit_width) - 1;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01002996
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01002997 x86_pmu.events_maskl = ebx.full;
2998 x86_pmu.events_mask_len = eax.split.mask_length;
2999
Andi Kleen70ab7002012-06-05 17:56:48 -07003000 x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
3001
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003002 /*
3003 * Quirk: v2 perfmon does not report fixed-purpose events, so
3004 * assume at least 3 events:
3005 */
3006 if (version > 1)
Robert Richter948b1bb2010-03-29 18:36:50 +02003007 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003008
Peter Zijlstrac9b08882014-02-03 14:29:03 +01003009 if (boot_cpu_has(X86_FEATURE_PDCM)) {
Peter Zijlstra8db909a2010-03-03 17:07:40 +01003010 u64 capabilities;
3011
3012 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
3013 x86_pmu.intel_cap.capabilities = capabilities;
3014 }
3015
Peter Zijlstraca037702010-03-02 19:52:12 +01003016 intel_ds_init();
3017
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01003018 x86_add_quirk(intel_arch_events_quirk); /* Install first, so it runs last */
3019
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003020 /*
3021 * Install the hw-cache-events table:
3022 */
3023 switch (boot_cpu_data.x86_model) {
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003024 case 14: /* 65nm Core "Yonah" */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003025 pr_cont("Core events, ");
3026 break;
3027
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003028 case 15: /* 65nm Core2 "Merom" */
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01003029 x86_add_quirk(intel_clovertown_quirk);
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003030 case 22: /* 65nm Core2 "Merom-L" */
3031 case 23: /* 45nm Core2 "Penryn" */
3032 case 29: /* 45nm Core2 "Dunnington (MP) */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003033 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
3034 sizeof(hw_cache_event_ids));
3035
Peter Zijlstracaff2be2010-03-03 12:02:30 +01003036 intel_pmu_lbr_init_core();
3037
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003038 x86_pmu.event_constraints = intel_core2_event_constraints;
Stephane Eranian17e31622011-03-02 17:05:01 +02003039 x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003040 pr_cont("Core2 events, ");
3041 break;
3042
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003043 case 30: /* 45nm Nehalem */
3044 case 26: /* 45nm Nehalem-EP */
3045 case 46: /* 45nm Nehalem-EX */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003046 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
3047 sizeof(hw_cache_event_ids));
Andi Kleene994d7d2011-03-03 10:34:48 +08003048 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
3049 sizeof(hw_cache_extra_regs));
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003050
Peter Zijlstracaff2be2010-03-03 12:02:30 +01003051 intel_pmu_lbr_init_nhm();
3052
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003053 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Stephane Eranian17e31622011-03-02 17:05:01 +02003054 x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01003055 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
Andi Kleena7e3ed12011-03-03 10:34:47 +08003056 x86_pmu.extra_regs = intel_nehalem_extra_regs;
Ingo Molnarec75a712011-04-27 11:51:41 +02003057
Stephane Eranianf20093e2013-01-24 16:10:32 +01003058 x86_pmu.cpu_events = nhm_events_attrs;
3059
Ingo Molnar91fc4cc2011-04-29 14:17:19 +02003060 /* UOPS_ISSUED.STALLED_CYCLES */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01003061 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3062 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
Ingo Molnar91fc4cc2011-04-29 14:17:19 +02003063 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01003064 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3065 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
Ingo Molnar94403f82011-04-24 08:18:31 +02003066
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01003067 x86_add_quirk(intel_nehalem_quirk);
Ingo Molnarec75a712011-04-27 11:51:41 +02003068
Peter Zijlstra11164cd2010-03-26 14:08:44 +01003069 pr_cont("Nehalem events, ");
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003070 break;
Peter Zijlstracaff2be2010-03-03 12:02:30 +01003071
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003072 case 28: /* 45nm Atom "Pineview" */
3073 case 38: /* 45nm Atom "Lincroft" */
3074 case 39: /* 32nm Atom "Penwell" */
3075 case 53: /* 32nm Atom "Cloverview" */
3076 case 54: /* 32nm Atom "Cedarview" */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003077 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
3078 sizeof(hw_cache_event_ids));
3079
Peter Zijlstracaff2be2010-03-03 12:02:30 +01003080 intel_pmu_lbr_init_atom();
3081
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003082 x86_pmu.event_constraints = intel_gen_event_constraints;
Stephane Eranian17e31622011-03-02 17:05:01 +02003083 x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints;
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003084 pr_cont("Atom events, ");
3085 break;
3086
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003087 case 55: /* 22nm Atom "Silvermont" */
Kan Liangef454ca2015-01-22 07:50:53 +00003088 case 76: /* 14nm Atom "Airmont" */
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003089 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
Yan, Zheng1fa64182013-07-18 17:02:24 +08003090 memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
3091 sizeof(hw_cache_event_ids));
3092 memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
3093 sizeof(hw_cache_extra_regs));
3094
3095 intel_pmu_lbr_init_atom();
3096
3097 x86_pmu.event_constraints = intel_slm_event_constraints;
3098 x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
3099 x86_pmu.extra_regs = intel_slm_extra_regs;
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01003100 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
Yan, Zheng1fa64182013-07-18 17:02:24 +08003101 pr_cont("Silvermont events, ");
3102 break;
3103
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003104 case 37: /* 32nm Westmere */
3105 case 44: /* 32nm Westmere-EP */
3106 case 47: /* 32nm Westmere-EX */
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003107 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
3108 sizeof(hw_cache_event_ids));
Andi Kleene994d7d2011-03-03 10:34:48 +08003109 memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
3110 sizeof(hw_cache_extra_regs));
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003111
Peter Zijlstracaff2be2010-03-03 12:02:30 +01003112 intel_pmu_lbr_init_nhm();
3113
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003114 x86_pmu.event_constraints = intel_westmere_event_constraints;
Peter Zijlstra40b91cd2010-03-29 16:37:17 +02003115 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
Stephane Eranian17e31622011-03-02 17:05:01 +02003116 x86_pmu.pebs_constraints = intel_westmere_pebs_event_constraints;
Andi Kleena7e3ed12011-03-03 10:34:47 +08003117 x86_pmu.extra_regs = intel_westmere_extra_regs;
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01003118 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
Ingo Molnar30112032011-04-30 09:14:54 +02003119
Stephane Eranianf20093e2013-01-24 16:10:32 +01003120 x86_pmu.cpu_events = nhm_events_attrs;
3121
Ingo Molnar30112032011-04-30 09:14:54 +02003122 /* UOPS_ISSUED.STALLED_CYCLES */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01003123 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3124 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
Ingo Molnar30112032011-04-30 09:14:54 +02003125 /* UOPS_EXECUTED.CORE_ACTIVE_CYCLES,c=1,i=1 */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01003126 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3127 X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
Ingo Molnar30112032011-04-30 09:14:54 +02003128
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003129 pr_cont("Westmere events, ");
3130 break;
Peter Zijlstrab622d642010-02-01 15:36:30 +01003131
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003132 case 42: /* 32nm SandyBridge */
3133 case 45: /* 32nm SandyBridge-E/EN/EP */
Peter Zijlstra47a88632012-06-05 10:26:43 +02003134 x86_add_quirk(intel_sandybridge_quirk);
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01003135 x86_add_quirk(intel_ht_bug);
Lin Mingb06b3d42011-03-02 21:27:04 +08003136 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
3137 sizeof(hw_cache_event_ids));
Yan, Zheng74e65432012-07-17 17:27:55 +08003138 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
3139 sizeof(hw_cache_extra_regs));
Lin Mingb06b3d42011-03-02 21:27:04 +08003140
Stephane Eranianc5cc2cd2012-02-09 23:20:55 +01003141 intel_pmu_lbr_init_snb();
Lin Mingb06b3d42011-03-02 21:27:04 +08003142
3143 x86_pmu.event_constraints = intel_snb_event_constraints;
Kevin Winchesterde0428a2011-08-30 20:41:05 -03003144 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
Peter Zijlstra0780c922012-06-05 10:26:43 +02003145 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
Stephane Eranianf1923822013-04-16 13:51:43 +02003146 if (boot_cpu_data.x86_model == 45)
3147 x86_pmu.extra_regs = intel_snbep_extra_regs;
3148 else
3149 x86_pmu.extra_regs = intel_snb_extra_regs;
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01003150
3151
Stephane Eranianee89cbc2011-06-06 16:57:12 +02003152 /* all extra regs are per-cpu when HT is on */
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01003153 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3154 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
Lin Minge04d1b22011-05-06 07:14:02 +00003155
Stephane Eranianf20093e2013-01-24 16:10:32 +01003156 x86_pmu.cpu_events = snb_events_attrs;
3157
Lin Minge04d1b22011-05-06 07:14:02 +00003158 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01003159 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3160 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
Lin Minge04d1b22011-05-06 07:14:02 +00003161 /* UOPS_DISPATCHED.THREAD,c=1,i=1 to count stall cycles*/
Peter Zijlstraf9b4eeb2012-03-12 12:44:35 +01003162 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
3163 X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
Lin Minge04d1b22011-05-06 07:14:02 +00003164
Lin Mingb06b3d42011-03-02 21:27:04 +08003165 pr_cont("SandyBridge events, ");
3166 break;
Peter Zijlstra0f7c29ce2014-07-30 12:08:56 +02003167
3168 case 58: /* 22nm IvyBridge */
3169 case 62: /* 22nm IvyBridge-EP/EX */
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01003170 x86_add_quirk(intel_ht_bug);
Stephane Eranian20a36e32012-09-11 01:07:01 +02003171 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
3172 sizeof(hw_cache_event_ids));
Vince Weaver19963882014-07-14 15:33:25 -04003173 /* dTLB-load-misses on IVB is different than SNB */
3174 hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
3175
Stephane Eranian20a36e32012-09-11 01:07:01 +02003176 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
3177 sizeof(hw_cache_extra_regs));
3178
3179 intel_pmu_lbr_init_snb();
3180
Stephane Eranian69943182013-02-20 11:15:12 +01003181 x86_pmu.event_constraints = intel_ivb_event_constraints;
Stephane Eranian20a36e32012-09-11 01:07:01 +02003182 x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
3183 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
Stephane Eranianf1923822013-04-16 13:51:43 +02003184 if (boot_cpu_data.x86_model == 62)
3185 x86_pmu.extra_regs = intel_snbep_extra_regs;
3186 else
3187 x86_pmu.extra_regs = intel_snb_extra_regs;
Stephane Eranian20a36e32012-09-11 01:07:01 +02003188 /* all extra regs are per-cpu when HT is on */
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01003189 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3190 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
Stephane Eranian20a36e32012-09-11 01:07:01 +02003191
Stephane Eranianf20093e2013-01-24 16:10:32 +01003192 x86_pmu.cpu_events = snb_events_attrs;
3193
Stephane Eranian20a36e32012-09-11 01:07:01 +02003194 /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */
3195 intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
3196 X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
3197
3198 pr_cont("IvyBridge events, ");
3199 break;
3200
Lin Mingb06b3d42011-03-02 21:27:04 +08003201
Andi Kleend86c8ea2014-09-02 11:44:12 -07003202 case 60: /* 22nm Haswell Core */
3203 case 63: /* 22nm Haswell Server */
3204 case 69: /* 22nm Haswell ULT */
3205 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
Maria Dimakopoulou93fcf722014-11-17 20:06:59 +01003206 x86_add_quirk(intel_ht_bug);
Andi Kleen72db5592013-06-17 17:36:50 -07003207 x86_pmu.late_ack = true;
Andi Kleen0f1b5ca2015-02-17 18:18:04 -08003208 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3209 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
Andi Kleen3a632cb2013-06-17 17:36:48 -07003210
Yan, Zhenge9d7f7c2014-11-04 21:56:00 -05003211 intel_pmu_lbr_init_hsw();
Andi Kleen3a632cb2013-06-17 17:36:48 -07003212
3213 x86_pmu.event_constraints = intel_hsw_event_constraints;
Andi Kleen30443182013-06-17 17:36:49 -07003214 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
Andi Kleen36bbb2f2014-07-31 14:05:22 -07003215 x86_pmu.extra_regs = intel_snbep_extra_regs;
Andi Kleen30443182013-06-17 17:36:49 -07003216 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
Andi Kleen3a632cb2013-06-17 17:36:48 -07003217 /* all extra regs are per-cpu when HT is on */
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01003218 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3219 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
Andi Kleen3a632cb2013-06-17 17:36:48 -07003220
3221 x86_pmu.hw_config = hsw_hw_config;
3222 x86_pmu.get_event_constraints = hsw_get_event_constraints;
Andi Kleenf9134f32013-06-17 17:36:52 -07003223 x86_pmu.cpu_events = hsw_events_attrs;
Andi Kleenb7af41a2013-09-20 07:40:44 -07003224 x86_pmu.lbr_double_abort = true;
Andi Kleen3a632cb2013-06-17 17:36:48 -07003225 pr_cont("Haswell events, ");
3226 break;
3227
Andi Kleen91f1b702015-02-17 18:18:05 -08003228 case 61: /* 14nm Broadwell Core-M */
3229 case 86: /* 14nm Broadwell Xeon D */
Andi Kleen4b36f1a2015-06-11 13:52:22 -07003230 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
3231 case 79: /* 14nm Broadwell Server */
Andi Kleen91f1b702015-02-17 18:18:05 -08003232 x86_pmu.late_ack = true;
3233 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
3234 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
3235
3236 /* L3_MISS_LOCAL_DRAM is BIT(26) in Broadwell */
3237 hw_cache_extra_regs[C(LL)][C(OP_READ)][C(RESULT_MISS)] = HSW_DEMAND_READ |
3238 BDW_L3_MISS|HSW_SNOOP_DRAM;
3239 hw_cache_extra_regs[C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = HSW_DEMAND_WRITE|BDW_L3_MISS|
3240 HSW_SNOOP_DRAM;
3241 hw_cache_extra_regs[C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = HSW_DEMAND_READ|
3242 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
3243 hw_cache_extra_regs[C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = HSW_DEMAND_WRITE|
3244 BDW_L3_MISS_LOCAL|HSW_SNOOP_DRAM;
3245
Kan Liang78d504b2015-04-02 04:12:57 -04003246 intel_pmu_lbr_init_hsw();
Andi Kleen91f1b702015-02-17 18:18:05 -08003247
3248 x86_pmu.event_constraints = intel_bdw_event_constraints;
3249 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
3250 x86_pmu.extra_regs = intel_snbep_extra_regs;
3251 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
3252 /* all extra regs are per-cpu when HT is on */
Stephane Eranian9a5e3fb2014-11-17 20:06:53 +01003253 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3254 x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
Andi Kleen91f1b702015-02-17 18:18:05 -08003255
3256 x86_pmu.hw_config = hsw_hw_config;
3257 x86_pmu.get_event_constraints = hsw_get_event_constraints;
3258 x86_pmu.cpu_events = hsw_events_attrs;
Andi Kleen294fe0f2015-02-17 18:18:06 -08003259 x86_pmu.limit_period = bdw_limit_period;
Andi Kleen91f1b702015-02-17 18:18:05 -08003260 pr_cont("Broadwell events, ");
3261 break;
3262
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003263 default:
Avi Kivity0af3ac12011-06-29 18:42:36 +03003264 switch (x86_pmu.version) {
3265 case 1:
3266 x86_pmu.event_constraints = intel_v1_event_constraints;
3267 pr_cont("generic architected perfmon v1, ");
3268 break;
3269 default:
3270 /*
3271 * default constraints for v2 and up
3272 */
3273 x86_pmu.event_constraints = intel_gen_event_constraints;
3274 pr_cont("generic architected perfmon, ");
3275 break;
3276 }
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003277 }
Gleb Natapovffb871b2011-11-10 14:57:26 +02003278
Robert Richtera1eac7a2012-06-20 20:46:34 +02003279 if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
3280 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
3281 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
3282 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
3283 }
3284 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
3285
3286 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
3287 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
3288 x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
3289 x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
3290 }
3291
3292 x86_pmu.intel_ctrl |=
3293 ((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
3294
3295 if (x86_pmu.event_constraints) {
3296 /*
3297 * event on fixed counter2 (REF_CYCLES) only works on this
3298 * counter, so do not extend mask to generic counters
3299 */
3300 for_each_event_constraint(c, x86_pmu.event_constraints) {
Palik, Imre2c336452015-06-08 14:46:49 +02003301 if (c->cmask == FIXED_EVENT_FLAGS
3302 && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
3303 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
Robert Richtera1eac7a2012-06-20 20:46:34 +02003304 }
Palik, Imre2c336452015-06-08 14:46:49 +02003305 c->idxmsk64 &=
3306 ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
3307 c->weight = hweight64(c->idxmsk64);
Robert Richtera1eac7a2012-06-20 20:46:34 +02003308 }
3309 }
3310
Kan Liang338b5222014-07-14 12:25:56 -07003311 /*
3312 * Access LBR MSR may cause #GP under certain circumstances.
3313 * E.g. KVM doesn't support LBR MSR
3314 * Check all LBT MSR here.
3315 * Disable LBR access if any LBR MSRs can not be accessed.
3316 */
3317 if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
3318 x86_pmu.lbr_nr = 0;
3319 for (i = 0; i < x86_pmu.lbr_nr; i++) {
3320 if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
3321 check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
3322 x86_pmu.lbr_nr = 0;
3323 }
3324
3325 /*
3326 * Access extra MSR may cause #GP under certain circumstances.
3327 * E.g. KVM doesn't support offcore event
3328 * Check all extra_regs here.
3329 */
3330 if (x86_pmu.extra_regs) {
3331 for (er = x86_pmu.extra_regs; er->msr; er++) {
3332 er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
3333 /* Disable LBR select mapping */
3334 if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
3335 x86_pmu.lbr_sel_map = NULL;
3336 }
3337 }
3338
Andi Kleen069e0c32013-06-25 08:12:33 -07003339 /* Support full width counters using alternative MSR range */
3340 if (x86_pmu.intel_cap.full_width_write) {
3341 x86_pmu.max_period = x86_pmu.cntval_mask;
3342 x86_pmu.perfctr = MSR_IA32_PMC0;
3343 pr_cont("full-width counters, ");
3344 }
3345
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01003346 return 0;
3347}
Stephane Eranianb37609c2014-11-17 20:07:04 +01003348
3349/*
3350 * HT bug: phase 2 init
3351 * Called once we have valid topology information to check
3352 * whether or not HT is enabled
3353 * If HT is off, then we disable the workaround
3354 */
3355static __init int fixup_ht_bug(void)
3356{
3357 int cpu = smp_processor_id();
3358 int w, c;
3359 /*
3360 * problem not present on this CPU model, nothing to do
3361 */
3362 if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
3363 return 0;
3364
3365 w = cpumask_weight(topology_thread_cpumask(cpu));
3366 if (w > 1) {
3367 pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
3368 return 0;
3369 }
3370
3371 watchdog_nmi_disable_all();
3372
3373 x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
3374
Stephane Eranianb37609c2014-11-17 20:07:04 +01003375 x86_pmu.start_scheduling = NULL;
Peter Zijlstra0c41e752015-05-21 10:57:32 +02003376 x86_pmu.commit_scheduling = NULL;
Stephane Eranianb37609c2014-11-17 20:07:04 +01003377 x86_pmu.stop_scheduling = NULL;
3378
3379 watchdog_nmi_enable_all();
3380
3381 get_online_cpus();
3382
3383 for_each_online_cpu(c) {
3384 free_excl_cntrs(c);
3385 }
3386
3387 put_online_cpus();
3388 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
3389 return 0;
3390}
3391subsys_initcall(fixup_ht_bug)