Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Copyright (C) 2001 Mike Corrigan IBM Corporation |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2 of the License, or |
| 7 | * (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/stddef.h> |
| 11 | #include <linux/kernel.h> |
| 12 | #include <linux/sched.h> |
Michael Ellerman | 512d31d | 2005-06-30 15:08:27 +1000 | [diff] [blame] | 13 | #include <linux/bootmem.h> |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 14 | #include <linux/seq_file.h> |
| 15 | #include <linux/proc_fs.h> |
Stephen Rothwell | cabb558 | 2005-09-30 16:16:52 +1000 | [diff] [blame] | 16 | #include <linux/module.h> |
| 17 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <asm/system.h> |
| 19 | #include <asm/paca.h> |
Stephen Rothwell | 13d2c9b | 2007-01-04 17:04:21 +1100 | [diff] [blame] | 20 | #include <asm/firmware.h> |
Kelly Daly | 8875ccf | 2005-11-02 14:13:34 +1100 | [diff] [blame] | 21 | #include <asm/iseries/it_lp_queue.h> |
Kelly Daly | e45423e | 2005-11-02 12:08:31 +1100 | [diff] [blame] | 22 | #include <asm/iseries/hv_lp_event.h> |
Kelly Daly | c0a8d05 | 2005-11-02 11:11:11 +1100 | [diff] [blame] | 23 | #include <asm/iseries/hv_call_event.h> |
Michael Ellerman | 06a36db | 2006-07-13 17:52:17 +1000 | [diff] [blame] | 24 | #include "it_lp_naca.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Michael Ellerman | ab354b6 | 2005-06-30 15:12:21 +1000 | [diff] [blame] | 26 | /* |
| 27 | * The LpQueue is used to pass event data from the hypervisor to |
| 28 | * the partition. This is where I/O interrupt events are communicated. |
| 29 | * |
| 30 | * It is written to by the hypervisor so cannot end up in the BSS. |
| 31 | */ |
Michael Ellerman | a618746 | 2005-06-30 15:15:32 +1000 | [diff] [blame] | 32 | struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data"))); |
Michael Ellerman | ab354b6 | 2005-06-30 15:12:21 +1000 | [diff] [blame] | 33 | |
Michael Ellerman | ed09415 | 2005-06-30 15:16:09 +1000 | [diff] [blame] | 34 | DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts); |
| 35 | |
| 36 | static char *event_types[HvLpEvent_Type_NumTypes] = { |
Michael Ellerman | 9b04702 | 2005-06-30 15:16:18 +1000 | [diff] [blame] | 37 | "Hypervisor", |
| 38 | "Machine Facilities", |
| 39 | "Session Manager", |
| 40 | "SPD I/O", |
| 41 | "Virtual Bus", |
| 42 | "PCI I/O", |
| 43 | "RIO I/O", |
| 44 | "Virtual Lan", |
| 45 | "Virtual I/O" |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 46 | }; |
| 47 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | /* Array of LpEvent handler functions */ |
Stephen Rothwell | 544cbba | 2005-09-28 02:18:47 +1000 | [diff] [blame] | 49 | static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; |
| 50 | static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | |
Michael Ellerman | 937b31b | 2005-06-30 15:15:42 +1000 | [diff] [blame] | 52 | static struct HvLpEvent * get_next_hvlpevent(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | { |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 54 | struct HvLpEvent * event; |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 55 | event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event; |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 56 | |
Stephen Rothwell | 677f8c0 | 2006-01-12 13:47:43 +1100 | [diff] [blame] | 57 | if (hvlpevent_is_valid(event)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | /* rmb() needed only for weakly consistent machines (regatta) */ |
| 59 | rmb(); |
| 60 | /* Set pointer to next potential event */ |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 61 | hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 + |
| 62 | IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) * |
| 63 | IT_LP_EVENT_ALIGN; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 65 | /* Wrap to beginning if no room at end */ |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 66 | if (hvlpevent_queue.hq_current_event > |
| 67 | hvlpevent_queue.hq_last_event) { |
| 68 | hvlpevent_queue.hq_current_event = |
| 69 | hvlpevent_queue.hq_event_stack; |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 70 | } |
| 71 | } else { |
| 72 | event = NULL; |
| 73 | } |
| 74 | |
| 75 | return event; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Michael Ellerman | 0c885c1 | 2005-06-30 15:07:33 +1000 | [diff] [blame] | 78 | static unsigned long spread_lpevents = NR_CPUS; |
Michael Ellerman | bea248f | 2005-06-30 15:07:09 +1000 | [diff] [blame] | 79 | |
Michael Ellerman | 937b31b | 2005-06-30 15:15:42 +1000 | [diff] [blame] | 80 | int hvlpevent_is_pending(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | { |
Michael Ellerman | bea248f | 2005-06-30 15:07:09 +1000 | [diff] [blame] | 82 | struct HvLpEvent *next_event; |
| 83 | |
| 84 | if (smp_processor_id() >= spread_lpevents) |
| 85 | return 0; |
| 86 | |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 87 | next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event; |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 88 | |
Stephen Rothwell | 677f8c0 | 2006-01-12 13:47:43 +1100 | [diff] [blame] | 89 | return hvlpevent_is_valid(next_event) || |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 90 | hvlpevent_queue.hq_overflow_pending; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | } |
| 92 | |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 93 | static void hvlpevent_clear_valid(struct HvLpEvent * event) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | { |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 95 | /* Tell the Hypervisor that we're done with this event. |
| 96 | * Also clear bits within this event that might look like valid bits. |
| 97 | * ie. on 64-byte boundaries. |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 98 | */ |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 99 | struct HvLpEvent *tmp; |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 100 | unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) / |
| 101 | IT_LP_EVENT_ALIGN) - 1; |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 102 | |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 103 | switch (extra) { |
| 104 | case 3: |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 105 | tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN); |
Stephen Rothwell | 677f8c0 | 2006-01-12 13:47:43 +1100 | [diff] [blame] | 106 | hvlpevent_invalidate(tmp); |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 107 | case 2: |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 108 | tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN); |
Stephen Rothwell | 677f8c0 | 2006-01-12 13:47:43 +1100 | [diff] [blame] | 109 | hvlpevent_invalidate(tmp); |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 110 | case 1: |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 111 | tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN); |
Stephen Rothwell | 677f8c0 | 2006-01-12 13:47:43 +1100 | [diff] [blame] | 112 | hvlpevent_invalidate(tmp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | } |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 114 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 115 | mb(); |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 116 | |
Stephen Rothwell | 677f8c0 | 2006-01-12 13:47:43 +1100 | [diff] [blame] | 117 | hvlpevent_invalidate(event); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | } |
| 119 | |
Olaf Hering | 35a84c2 | 2006-10-07 22:08:26 +1000 | [diff] [blame] | 120 | void process_hvlpevents(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 122 | struct HvLpEvent * event; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
Stephen Rothwell | 88f0178 | 2007-12-12 14:58:12 +1100 | [diff] [blame] | 124 | restart: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | /* If we have recursed, just return */ |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 126 | if (!spin_trylock(&hvlpevent_queue.hq_lock)) |
Michael Ellerman | 7488980 | 2005-06-30 15:15:53 +1000 | [diff] [blame] | 127 | return; |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 128 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | for (;;) { |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 130 | event = get_next_hvlpevent(); |
| 131 | if (event) { |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 132 | /* Call appropriate handler here, passing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | * a pointer to the LpEvent. The handler |
| 134 | * must make a copy of the LpEvent if it |
| 135 | * needs it in a bottom half. (perhaps for |
| 136 | * an ACK) |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 137 | * |
| 138 | * Handlers are responsible for ACK processing |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 139 | * |
| 140 | * The Hypervisor guarantees that LpEvents will |
| 141 | * only be delivered with types that we have |
| 142 | * registered for, so no type check is necessary |
| 143 | * here! |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 144 | */ |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 145 | if (event->xType < HvLpEvent_Type_NumTypes) |
| 146 | __get_cpu_var(hvlpevent_counts)[event->xType]++; |
| 147 | if (event->xType < HvLpEvent_Type_NumTypes && |
| 148 | lpEventHandler[event->xType]) |
Olaf Hering | 35a84c2 | 2006-10-07 22:08:26 +1000 | [diff] [blame] | 149 | lpEventHandler[event->xType](event); |
Stephen Rothwell | 88f0178 | 2007-12-12 14:58:12 +1100 | [diff] [blame] | 150 | else { |
| 151 | u8 type = event->xType; |
| 152 | |
| 153 | /* |
| 154 | * Don't printk in the spinlock as printk |
| 155 | * may require ack events form the HV to send |
| 156 | * any characters there. |
| 157 | */ |
| 158 | hvlpevent_clear_valid(event); |
| 159 | spin_unlock(&hvlpevent_queue.hq_lock); |
| 160 | printk(KERN_INFO |
| 161 | "Unexpected Lp Event type=%d\n", type); |
| 162 | goto restart; |
| 163 | } |
Michael Ellerman | 38fcdcfe | 2005-06-30 15:16:28 +1000 | [diff] [blame] | 164 | |
Michael Ellerman | ffe1b7e | 2005-06-30 15:16:48 +1000 | [diff] [blame] | 165 | hvlpevent_clear_valid(event); |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 166 | } else if (hvlpevent_queue.hq_overflow_pending) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | /* |
| 168 | * No more valid events. If overflow events are |
| 169 | * pending process them |
| 170 | */ |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 171 | HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 172 | else |
| 173 | break; |
| 174 | } |
| 175 | |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 176 | spin_unlock(&hvlpevent_queue.hq_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | } |
Michael Ellerman | 0c885c1 | 2005-06-30 15:07:33 +1000 | [diff] [blame] | 178 | |
| 179 | static int set_spread_lpevents(char *str) |
| 180 | { |
| 181 | unsigned long val = simple_strtoul(str, NULL, 0); |
| 182 | |
| 183 | /* |
| 184 | * The parameter is the number of processors to share in processing |
| 185 | * lp events. |
| 186 | */ |
| 187 | if (( val > 0) && (val <= NR_CPUS)) { |
| 188 | spread_lpevents = val; |
| 189 | printk("lpevent processing spread over %ld processors\n", val); |
| 190 | } else { |
| 191 | printk("invalid spread_lpevents %ld\n", val); |
| 192 | } |
| 193 | |
| 194 | return 1; |
| 195 | } |
| 196 | __setup("spread_lpevents=", set_spread_lpevents); |
| 197 | |
Stephen Rothwell | 16782a6 | 2007-07-25 09:29:19 +1000 | [diff] [blame] | 198 | void __init setup_hvlpevent_queue(void) |
Michael Ellerman | 512d31d | 2005-06-30 15:08:27 +1000 | [diff] [blame] | 199 | { |
| 200 | void *eventStack; |
| 201 | |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 202 | spin_lock_init(&hvlpevent_queue.hq_lock); |
Michael Ellerman | bd6ef57 | 2006-02-20 19:07:31 +1100 | [diff] [blame] | 203 | |
Stephen Rothwell | 7c7eb28 | 2005-10-24 15:21:52 +1000 | [diff] [blame] | 204 | /* Allocate a page for the Event Stack. */ |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 205 | eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE); |
| 206 | memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE); |
Michael Ellerman | 512d31d | 2005-06-30 15:08:27 +1000 | [diff] [blame] | 207 | |
| 208 | /* Invoke the hypervisor to initialize the event stack */ |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 209 | HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE); |
Michael Ellerman | 512d31d | 2005-06-30 15:08:27 +1000 | [diff] [blame] | 210 | |
Stephen Rothwell | 612f02d | 2006-06-28 11:49:10 +1000 | [diff] [blame] | 211 | hvlpevent_queue.hq_event_stack = eventStack; |
| 212 | hvlpevent_queue.hq_current_event = eventStack; |
| 213 | hvlpevent_queue.hq_last_event = (char *)eventStack + |
| 214 | (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE); |
| 215 | hvlpevent_queue.hq_index = 0; |
Michael Ellerman | 512d31d | 2005-06-30 15:08:27 +1000 | [diff] [blame] | 216 | } |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 217 | |
Stephen Rothwell | 544cbba | 2005-09-28 02:18:47 +1000 | [diff] [blame] | 218 | /* Register a handler for an LpEvent type */ |
| 219 | int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler) |
| 220 | { |
| 221 | if (eventType < HvLpEvent_Type_NumTypes) { |
| 222 | lpEventHandler[eventType] = handler; |
| 223 | return 0; |
| 224 | } |
| 225 | return 1; |
| 226 | } |
| 227 | EXPORT_SYMBOL(HvLpEvent_registerHandler); |
| 228 | |
| 229 | int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType) |
| 230 | { |
| 231 | might_sleep(); |
| 232 | |
| 233 | if (eventType < HvLpEvent_Type_NumTypes) { |
| 234 | if (!lpEventHandlerPaths[eventType]) { |
| 235 | lpEventHandler[eventType] = NULL; |
| 236 | /* |
| 237 | * We now sleep until all other CPUs have scheduled. |
| 238 | * This ensures that the deletion is seen by all |
| 239 | * other CPUs, and that the deleted handler isn't |
| 240 | * still running on another CPU when we return. |
| 241 | */ |
Stephen Rothwell | 731e74c | 2007-12-12 15:00:56 +1100 | [diff] [blame] | 242 | synchronize_sched(); |
Stephen Rothwell | 544cbba | 2005-09-28 02:18:47 +1000 | [diff] [blame] | 243 | return 0; |
| 244 | } |
| 245 | } |
| 246 | return 1; |
| 247 | } |
| 248 | EXPORT_SYMBOL(HvLpEvent_unregisterHandler); |
| 249 | |
| 250 | /* |
| 251 | * lpIndex is the partition index of the target partition. |
| 252 | * needed only for VirtualIo, VirtualLan and SessionMgr. Zero |
| 253 | * indicates to use our partition index - for the other types. |
| 254 | */ |
| 255 | int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex) |
| 256 | { |
| 257 | if ((eventType < HvLpEvent_Type_NumTypes) && |
| 258 | lpEventHandler[eventType]) { |
| 259 | if (lpIndex == 0) |
| 260 | lpIndex = itLpNaca.xLpIndex; |
| 261 | HvCallEvent_openLpEventPath(lpIndex, eventType); |
| 262 | ++lpEventHandlerPaths[eventType]; |
| 263 | return 0; |
| 264 | } |
| 265 | return 1; |
| 266 | } |
| 267 | |
| 268 | int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex) |
| 269 | { |
| 270 | if ((eventType < HvLpEvent_Type_NumTypes) && |
| 271 | lpEventHandler[eventType] && |
| 272 | lpEventHandlerPaths[eventType]) { |
| 273 | if (lpIndex == 0) |
| 274 | lpIndex = itLpNaca.xLpIndex; |
| 275 | HvCallEvent_closeLpEventPath(lpIndex, eventType); |
| 276 | --lpEventHandlerPaths[eventType]; |
| 277 | return 0; |
| 278 | } |
| 279 | return 1; |
| 280 | } |
| 281 | |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 282 | static int proc_lpevents_show(struct seq_file *m, void *v) |
| 283 | { |
Michael Ellerman | ed09415 | 2005-06-30 15:16:09 +1000 | [diff] [blame] | 284 | int cpu, i; |
| 285 | unsigned long sum; |
| 286 | static unsigned long cpu_totals[NR_CPUS]; |
| 287 | |
| 288 | /* FIXME: do we care that there's no locking here? */ |
| 289 | sum = 0; |
| 290 | for_each_online_cpu(cpu) { |
| 291 | cpu_totals[cpu] = 0; |
| 292 | for (i = 0; i < HvLpEvent_Type_NumTypes; i++) { |
| 293 | cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i]; |
| 294 | } |
| 295 | sum += cpu_totals[cpu]; |
| 296 | } |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 297 | |
| 298 | seq_printf(m, "LpEventQueue 0\n"); |
Michael Ellerman | ed09415 | 2005-06-30 15:16:09 +1000 | [diff] [blame] | 299 | seq_printf(m, " events processed:\t%lu\n", sum); |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 300 | |
Michael Ellerman | ed09415 | 2005-06-30 15:16:09 +1000 | [diff] [blame] | 301 | for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) { |
| 302 | sum = 0; |
| 303 | for_each_online_cpu(cpu) { |
| 304 | sum += per_cpu(hvlpevent_counts, cpu)[i]; |
| 305 | } |
| 306 | |
Michael Ellerman | 9b04702 | 2005-06-30 15:16:18 +1000 | [diff] [blame] | 307 | seq_printf(m, " %-20s %10lu\n", event_types[i], sum); |
Michael Ellerman | ed09415 | 2005-06-30 15:16:09 +1000 | [diff] [blame] | 308 | } |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 309 | |
| 310 | seq_printf(m, "\n events processed by processor:\n"); |
| 311 | |
Michael Ellerman | ed09415 | 2005-06-30 15:16:09 +1000 | [diff] [blame] | 312 | for_each_online_cpu(cpu) { |
| 313 | seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]); |
| 314 | } |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 315 | |
| 316 | return 0; |
| 317 | } |
| 318 | |
| 319 | static int proc_lpevents_open(struct inode *inode, struct file *file) |
| 320 | { |
| 321 | return single_open(file, proc_lpevents_show, NULL); |
| 322 | } |
| 323 | |
Arjan van de Ven | 5dfe4c9 | 2007-02-12 00:55:31 -0800 | [diff] [blame] | 324 | static const struct file_operations proc_lpevents_operations = { |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 325 | .open = proc_lpevents_open, |
| 326 | .read = seq_read, |
| 327 | .llseek = seq_lseek, |
| 328 | .release = single_release, |
| 329 | }; |
| 330 | |
| 331 | static int __init proc_lpevents_init(void) |
| 332 | { |
Stephen Rothwell | 13d2c9b | 2007-01-04 17:04:21 +1100 | [diff] [blame] | 333 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
| 334 | return 0; |
| 335 | |
Denis V. Lunev | 6674713 | 2008-04-29 01:02:26 -0700 | [diff] [blame] | 336 | proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL, |
| 337 | &proc_lpevents_operations); |
Michael Ellerman | 7b01328 | 2005-06-30 15:08:44 +1000 | [diff] [blame] | 338 | return 0; |
| 339 | } |
| 340 | __initcall(proc_lpevents_init); |
| 341 | |