blob: 22044fc691ef409d053c355d105c83537fb43b42 [file] [log] [blame]
Chris Metcalf867e3592010-05-28 23:09:12 -04001/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/seq_file.h>
17#include <linux/interrupt.h>
18#include <linux/irq.h>
19#include <linux/kernel_stat.h>
20#include <linux/uaccess.h>
21#include <hv/drv_pcie_rc_intf.h>
Chris Metcalffb702b92010-06-25 16:41:11 -040022#include <arch/spr_def.h>
23#include <asm/traps.h>
Zhigang Lu8d61dd72014-01-28 10:03:50 +080024#include <linux/perf_event.h>
Chris Metcalffb702b92010-06-25 16:41:11 -040025
26/* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */
27#define IS_HW_CLEARED 1
Chris Metcalf867e3592010-05-28 23:09:12 -040028
29/*
Chris Metcalf5d966112010-11-01 15:24:29 -040030 * The set of interrupts we enable for arch_local_irq_enable().
Chris Metcalf867e3592010-05-28 23:09:12 -040031 * This is initialized to have just a single interrupt that the kernel
32 * doesn't actually use as a sentinel. During kernel init,
33 * interrupts are added as the kernel gets prepared to support them.
34 * NOTE: we could probably initialize them all statically up front.
35 */
36DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) =
37 INITIAL_INTERRUPTS_ENABLED;
38EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask);
39
Chris Metcalffb702b92010-06-25 16:41:11 -040040/* Define per-tile device interrupt statistics state. */
Chris Metcalf867e3592010-05-28 23:09:12 -040041DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
42EXPORT_PER_CPU_SYMBOL(irq_stat);
43
Chris Metcalffb702b92010-06-25 16:41:11 -040044/*
45 * Define per-tile irq disable mask; the hardware/HV only has a single
46 * mask that we use to implement both masking and disabling.
47 */
48static DEFINE_PER_CPU(unsigned long, irq_disable_mask)
49 ____cacheline_internodealigned_in_smp;
Chris Metcalf867e3592010-05-28 23:09:12 -040050
51/*
Chris Metcalffb702b92010-06-25 16:41:11 -040052 * Per-tile IRQ nesting depth. Used to make sure we enable newly
53 * enabled IRQs before exiting the outermost interrupt.
54 */
55static DEFINE_PER_CPU(int, irq_depth);
56
Chris Metcalffb702b92010-06-25 16:41:11 -040057#if CHIP_HAS_IPI()
58/* Use SPRs to manipulate device interrupts. */
Chris Metcalfa78c9422010-10-14 16:23:03 -040059#define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask)
60#define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask)
61#define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask)
Chris Metcalffb702b92010-06-25 16:41:11 -040062#else
63/* Use HV to manipulate device interrupts. */
64#define mask_irqs(irq_mask) hv_disable_intr(irq_mask)
65#define unmask_irqs(irq_mask) hv_enable_intr(irq_mask)
66#define clear_irqs(irq_mask) hv_clear_intr(irq_mask)
67#endif
68
69/*
70 * The interrupt handling path, implemented in terms of HV interrupt
Chris Metcalfd7c96612013-08-15 16:23:24 -040071 * emulation on TILEPro, and IPI hardware on TILE-Gx.
Chris Metcalfbc1a2982013-08-07 11:36:54 -040072 * Entered with interrupts disabled.
Chris Metcalf867e3592010-05-28 23:09:12 -040073 */
74void tile_dev_intr(struct pt_regs *regs, int intnum)
75{
Christoph Lameterb4f50192014-08-17 12:30:50 -050076 int depth = __this_cpu_inc_return(irq_depth);
Chris Metcalffb702b92010-06-25 16:41:11 -040077 unsigned long original_irqs;
78 unsigned long remaining_irqs;
79 struct pt_regs *old_regs;
Chris Metcalf867e3592010-05-28 23:09:12 -040080
Chris Metcalffb702b92010-06-25 16:41:11 -040081#if CHIP_HAS_IPI()
Chris Metcalf867e3592010-05-28 23:09:12 -040082 /*
Chris Metcalffb702b92010-06-25 16:41:11 -040083 * Pending interrupts are listed in an SPR. We might be
84 * nested, so be sure to only handle irqs that weren't already
85 * masked by a previous interrupt. Then, mask out the ones
86 * we're going to handle.
Chris Metcalf867e3592010-05-28 23:09:12 -040087 */
Chris Metcalfa78c9422010-10-14 16:23:03 -040088 unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K);
89 original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked;
90 __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs);
Chris Metcalffb702b92010-06-25 16:41:11 -040091#else
92 /*
93 * Hypervisor performs the equivalent of the Gx code above and
94 * then puts the pending interrupt mask into a system save reg
95 * for us to find.
96 */
Chris Metcalfa78c9422010-10-14 16:23:03 -040097 original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3);
Chris Metcalffb702b92010-06-25 16:41:11 -040098#endif
99 remaining_irqs = original_irqs;
Chris Metcalf867e3592010-05-28 23:09:12 -0400100
101 /* Track time spent here in an interrupt context. */
Chris Metcalffb702b92010-06-25 16:41:11 -0400102 old_regs = set_irq_regs(regs);
Chris Metcalf867e3592010-05-28 23:09:12 -0400103 irq_enter();
104
105#ifdef CONFIG_DEBUG_STACKOVERFLOW
106 /* Debugging check for stack overflow: less than 1/8th stack free? */
107 {
108 long sp = stack_pointer - (long) current_thread_info();
109 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
Joe Perchesf4743672014-10-31 10:50:46 -0700110 pr_emerg("%s: stack overflow: %ld\n",
111 __func__, sp - sizeof(struct thread_info));
Chris Metcalf867e3592010-05-28 23:09:12 -0400112 dump_stack();
113 }
114 }
115#endif
Chris Metcalffb702b92010-06-25 16:41:11 -0400116 while (remaining_irqs) {
117 unsigned long irq = __ffs(remaining_irqs);
118 remaining_irqs &= ~(1UL << irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400119
Chris Metcalffb702b92010-06-25 16:41:11 -0400120 /* Count device irqs; Linux IPIs are counted elsewhere. */
121 if (irq != IRQ_RESCHEDULE)
Christoph Lameterb4f50192014-08-17 12:30:50 -0500122 __this_cpu_inc(irq_stat.irq_dev_intr_count);
Chris Metcalf867e3592010-05-28 23:09:12 -0400123
Chris Metcalffb702b92010-06-25 16:41:11 -0400124 generic_handle_irq(irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400125 }
126
127 /*
Chris Metcalffb702b92010-06-25 16:41:11 -0400128 * If we weren't nested, turn on all enabled interrupts,
129 * including any that were reenabled during interrupt
130 * handling.
131 */
Christoph Lameterb4f50192014-08-17 12:30:50 -0500132 if (depth == 1)
133 unmask_irqs(~__this_cpu_read(irq_disable_mask));
Chris Metcalffb702b92010-06-25 16:41:11 -0400134
Christoph Lameterb4f50192014-08-17 12:30:50 -0500135 __this_cpu_dec(irq_depth);
Chris Metcalffb702b92010-06-25 16:41:11 -0400136
137 /*
Chris Metcalf867e3592010-05-28 23:09:12 -0400138 * Track time spent against the current process again and
139 * process any softirqs if they are waiting.
140 */
141 irq_exit();
142 set_irq_regs(old_regs);
143}
144
145
Chris Metcalffb702b92010-06-25 16:41:11 -0400146/*
147 * Remove an irq from the disabled mask. If we're in an interrupt
148 * context, defer enabling the HW interrupt until we leave.
149 */
Chris Metcalf0c905472011-12-01 12:58:19 -0500150static void tile_irq_chip_enable(struct irq_data *d)
Chris Metcalf867e3592010-05-28 23:09:12 -0400151{
Chris Metcalf0c905472011-12-01 12:58:19 -0500152 get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
Christoph Lameterb4f50192014-08-17 12:30:50 -0500153 if (__this_cpu_read(irq_depth) == 0)
Chris Metcalf0c905472011-12-01 12:58:19 -0500154 unmask_irqs(1UL << d->irq);
Chris Metcalffb702b92010-06-25 16:41:11 -0400155 put_cpu_var(irq_disable_mask);
156}
Chris Metcalffb702b92010-06-25 16:41:11 -0400157
158/*
159 * Add an irq to the disabled mask. We disable the HW interrupt
160 * immediately so that there's no possibility of it firing. If we're
161 * in an interrupt context, the return path is careful to avoid
162 * unmasking a newly disabled interrupt.
163 */
Chris Metcalf0c905472011-12-01 12:58:19 -0500164static void tile_irq_chip_disable(struct irq_data *d)
Chris Metcalffb702b92010-06-25 16:41:11 -0400165{
Chris Metcalf0c905472011-12-01 12:58:19 -0500166 get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
167 mask_irqs(1UL << d->irq);
Chris Metcalffb702b92010-06-25 16:41:11 -0400168 put_cpu_var(irq_disable_mask);
169}
Chris Metcalffb702b92010-06-25 16:41:11 -0400170
171/* Mask an interrupt. */
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000172static void tile_irq_chip_mask(struct irq_data *d)
Chris Metcalffb702b92010-06-25 16:41:11 -0400173{
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000174 mask_irqs(1UL << d->irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400175}
176
177/* Unmask an interrupt. */
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000178static void tile_irq_chip_unmask(struct irq_data *d)
Chris Metcalf867e3592010-05-28 23:09:12 -0400179{
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000180 unmask_irqs(1UL << d->irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400181}
182
183/*
Chris Metcalffb702b92010-06-25 16:41:11 -0400184 * Clear an interrupt before processing it so that any new assertions
185 * will trigger another irq.
Chris Metcalf867e3592010-05-28 23:09:12 -0400186 */
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000187static void tile_irq_chip_ack(struct irq_data *d)
Chris Metcalf867e3592010-05-28 23:09:12 -0400188{
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000189 if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED)
190 clear_irqs(1UL << d->irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400191}
192
193/*
Chris Metcalffb702b92010-06-25 16:41:11 -0400194 * For per-cpu interrupts, we need to avoid unmasking any interrupts
195 * that we disabled via disable_percpu_irq().
Chris Metcalf867e3592010-05-28 23:09:12 -0400196 */
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000197static void tile_irq_chip_eoi(struct irq_data *d)
Chris Metcalf867e3592010-05-28 23:09:12 -0400198{
Christoph Lameterb4f50192014-08-17 12:30:50 -0500199 if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000200 unmask_irqs(1UL << d->irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400201}
202
Chris Metcalffb702b92010-06-25 16:41:11 -0400203static struct irq_chip tile_irq_chip = {
Thomas Gleixnerd1ea13c2010-09-23 18:40:07 +0200204 .name = "tile_irq_chip",
Chris Metcalf0c905472011-12-01 12:58:19 -0500205 .irq_enable = tile_irq_chip_enable,
206 .irq_disable = tile_irq_chip_disable,
Thomas Gleixnerf5b42c92011-02-06 23:04:40 +0000207 .irq_ack = tile_irq_chip_ack,
208 .irq_eoi = tile_irq_chip_eoi,
209 .irq_mask = tile_irq_chip_mask,
210 .irq_unmask = tile_irq_chip_unmask,
Chris Metcalf867e3592010-05-28 23:09:12 -0400211};
212
213void __init init_IRQ(void)
214{
Chris Metcalffb702b92010-06-25 16:41:11 -0400215 ipi_init();
Chris Metcalf867e3592010-05-28 23:09:12 -0400216}
217
Paul Gortmaker18f894c2013-06-18 17:28:07 -0400218void setup_irq_regs(void)
Chris Metcalf867e3592010-05-28 23:09:12 -0400219{
Chris Metcalffb702b92010-06-25 16:41:11 -0400220 /* Enable interrupt delivery. */
221 unmask_irqs(~0UL);
222#if CHIP_HAS_IPI()
Chris Metcalf5d966112010-11-01 15:24:29 -0400223 arch_local_irq_unmask(INT_IPI_K);
Chris Metcalffb702b92010-06-25 16:41:11 -0400224#endif
Chris Metcalf867e3592010-05-28 23:09:12 -0400225}
226
Chris Metcalffb702b92010-06-25 16:41:11 -0400227void tile_irq_activate(unsigned int irq, int tile_irq_type)
Chris Metcalf867e3592010-05-28 23:09:12 -0400228{
229 /*
Chris Metcalffb702b92010-06-25 16:41:11 -0400230 * We use handle_level_irq() by default because the pending
Chris Metcalfd7c96612013-08-15 16:23:24 -0400231 * interrupt vector (whether modeled by the HV on
Chris Metcalffb702b92010-06-25 16:41:11 -0400232 * TILEPro or implemented in hardware on TILE-Gx) has
233 * level-style semantics for each bit. An interrupt fires
234 * whenever a bit is high, not just at edges.
Chris Metcalf867e3592010-05-28 23:09:12 -0400235 */
Chris Metcalffb702b92010-06-25 16:41:11 -0400236 irq_flow_handler_t handle = handle_level_irq;
237 if (tile_irq_type == TILE_IRQ_PERCPU)
238 handle = handle_percpu_irq;
Thomas Gleixner1919d642011-03-25 14:21:16 +0000239 irq_set_chip_and_handler(irq, &tile_irq_chip, handle);
Chris Metcalffb702b92010-06-25 16:41:11 -0400240
241 /*
242 * Flag interrupts that are hardware-cleared so that ack()
243 * won't clear them.
244 */
245 if (tile_irq_type == TILE_IRQ_HW_CLEAR)
Thomas Gleixner1919d642011-03-25 14:21:16 +0000246 irq_set_chip_data(irq, (void *)IS_HW_CLEARED);
Chris Metcalf867e3592010-05-28 23:09:12 -0400247}
Chris Metcalffb702b92010-06-25 16:41:11 -0400248EXPORT_SYMBOL(tile_irq_activate);
249
Chris Metcalf867e3592010-05-28 23:09:12 -0400250
251void ack_bad_irq(unsigned int irq)
252{
Chris Metcalffb702b92010-06-25 16:41:11 -0400253 pr_err("unexpected IRQ trap at vector %02x\n", irq);
Chris Metcalf867e3592010-05-28 23:09:12 -0400254}
255
256/*
Zhigang Lu8d61dd72014-01-28 10:03:50 +0800257 * /proc/interrupts printing:
258 */
259int arch_show_interrupts(struct seq_file *p, int prec)
260{
261#ifdef CONFIG_PERF_EVENTS
262 int i;
263
264 seq_printf(p, "%*s: ", prec, "PMI");
265
266 for_each_online_cpu(i)
267 seq_printf(p, "%10llu ", per_cpu(perf_irqs, i));
268 seq_puts(p, " perf_events\n");
269#endif
270 return 0;
271}
272
Chris Metcalffb702b92010-06-25 16:41:11 -0400273#if CHIP_HAS_IPI()
Thomas Gleixnerb26d8512014-05-07 15:44:16 +0000274int arch_setup_hwirq(unsigned int irq, int node)
Chris Metcalffb702b92010-06-25 16:41:11 -0400275{
Thomas Gleixnerb26d8512014-05-07 15:44:16 +0000276 return irq >= NR_IRQS ? -EINVAL : 0;
Thomas Gleixner6ef40512014-05-07 15:44:13 +0000277}
278
Thomas Gleixnerb26d8512014-05-07 15:44:16 +0000279void arch_teardown_hwirq(unsigned int irq) { }
Chris Metcalffb702b92010-06-25 16:41:11 -0400280#endif