[SPARC64]: Add clocksource/clockevents support.

I'd like to thank John Stul and others for helping
me along the way.

A lot of cleanups fell out of this.  For example, the get_compare()
tick_op was totally unused, so was deleted.  And the most often used
tick_op members were grouped together for cache-friendlyness.

The sparc64 TSC is given to the kernel as a one-shot timer.

tick_ops->init_timer() simply turns off the privileged bit in
the tick register (when possible), and disables the interrupt
by setting bit 63 in the compare register.  The ->disable_irq()
op also sets this bit.

tick_ops->add_compare() is changed to:

1) Add the given delta to "tick" not to "compare"
2) Return a boolean which, if true, means that the tick
   value read after writing the compare value was found
   to have incremented past the initial tick value.  This
   mirrors logic used in the HPET driver's ->next_event()
   method.

Each tick_ops implementation also now provides a name string.
And we feed this into the clocksource and clockevents layers.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 39deb03..d4f0a70 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -123,7 +123,7 @@
 	       cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
 }
 
-static void smp_setup_percpu_timer(void);
+extern void setup_sparc64_timer(void);
 
 static volatile unsigned long callin_flag = 0;
 
@@ -138,7 +138,7 @@
 
 	__flush_tlb_all();
 
-	smp_setup_percpu_timer();
+	setup_sparc64_timer();
 
 	if (cheetah_pcache_forced_on)
 		cheetah_enable_pcache();
@@ -175,8 +175,6 @@
 	panic("SMP bolixed\n");
 }
 
-static unsigned long current_tick_offset __read_mostly;
-
 /* This tick register synchronization scheme is taken entirely from
  * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  *
@@ -259,7 +257,7 @@
 				} else
 					adj = -delta;
 
-				tick_ops->add_tick(adj, current_tick_offset);
+				tick_ops->add_tick(adj);
 			}
 #if DEBUG_TICK_SYNC
 			t[i].rt = rt;
@@ -1178,30 +1176,9 @@
 	preempt_enable();
 }
 
-static void __init smp_setup_percpu_timer(void)
-{
-	unsigned long pstate;
-
-	/* Guarantee that the following sequences execute
-	 * uninterrupted.
-	 */
-	__asm__ __volatile__("rdpr	%%pstate, %0\n\t"
-			     "wrpr	%0, %1, %%pstate"
-			     : "=r" (pstate)
-			     : "i" (PSTATE_IE));
-
-	tick_ops->init_tick(current_tick_offset);
-
-	/* Restore PSTATE_IE. */
-	__asm__ __volatile__("wrpr	%0, 0x0, %%pstate"
-			     : /* no outputs */
-			     : "r" (pstate));
-}
-
 void __init smp_tick_init(void)
 {
 	boot_cpu_id = hard_smp_processor_id();
-	current_tick_offset = timer_tick_offset;
 }
 
 /* /proc/profile writes can call this, don't __init it please. */