sched: eliminate __rq_clock() use

eliminate __rq_clock() use by changing it to:

   __update_rq_clock(rq)
   now = rq->clock;

identity transformation - no change in behavior.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/kernel/sched.c b/kernel/sched.c
index 8932110..d673451 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1967,9 +1967,12 @@
 	unsigned long total_load = this_rq->ls.load.weight;
 	unsigned long this_load =  total_load;
 	struct load_stat *ls = &this_rq->ls;
-	u64 now = __rq_clock(this_rq);
+	u64 now;
 	int i, scale;
 
+	__update_rq_clock(this_rq);
+	now = this_rq->clock;
+
 	this_rq->nr_load_updates++;
 	if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
 		goto do_avg;
@@ -3458,7 +3461,8 @@
 
 	spin_lock_irq(&rq->lock);
 	clear_tsk_need_resched(prev);
-	now = __rq_clock(rq);
+	__update_rq_clock(rq);
+	now = rq->clock;
 
 	if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
 		if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 969f08c..bd20fad 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -672,7 +672,10 @@
 {
 	struct rq *rq = rq_of(cfs_rq);
 	struct sched_entity *next;
-	u64 now = __rq_clock(rq);
+	u64 now;
+
+	__update_rq_clock(rq);
+	now = rq->clock;
 
 	/*
 	 * Dequeue and enqueue the task to update its
@@ -824,8 +827,10 @@
 static void yield_task_fair(struct rq *rq, struct task_struct *p)
 {
 	struct cfs_rq *cfs_rq = task_cfs_rq(p);
-	u64 now = __rq_clock(rq);
+	u64 now;
 
+	__update_rq_clock(rq);
+	now = rq->clock;
 	/*
 	 * Dequeue and enqueue the task to update its
 	 * position within the tree: