cpu: Style fixes in the AtomicSimpleCPU.

Change-Id: I42391e5a75c55022077f1ef78df97c54fa70f198
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/36976
Reviewed-by: Gabe Black <gabe.black@gmail.com>
Maintainer: Gabe Black <gabe.black@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 70162c9..bceed39 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -202,9 +202,9 @@
 
 
 void
-AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
+AtomicSimpleCPU::takeOverFrom(BaseCPU *old_cpu)
 {
-    BaseSimpleCPU::takeOverFrom(oldCPU);
+    BaseSimpleCPU::takeOverFrom(old_cpu);
 
     // The tick event should have been descheduled by drain()
     assert(!tickEvent.scheduled());
@@ -213,10 +213,9 @@
 void
 AtomicSimpleCPU::verifyMemoryMode() const
 {
-    if (!system->isAtomicMode()) {
-        fatal("The atomic CPU requires the memory system to be in "
-              "'atomic' mode.\n");
-    }
+    fatal_if(!system->isAtomicMode(),
+            "The atomic CPU requires the memory system to be in "
+              "'atomic' mode.");
 }
 
 void
@@ -236,8 +235,8 @@
         schedule(tickEvent, clockEdge(Cycles(0)));
     }
     _status = BaseSimpleCPU::Running;
-    if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
-        == activeThreads.end()) {
+    if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
+        activeThreads.end()) {
         activeThreads.push_back(thread_num);
     }
 
@@ -332,17 +331,17 @@
 }
 
 bool
-AtomicSimpleCPU::genMemFragmentRequest(const RequestPtr& req, Addr frag_addr,
+AtomicSimpleCPU::genMemFragmentRequest(const RequestPtr &req, Addr frag_addr,
                                        int size, Request::Flags flags,
-                                       const std::vector<bool>& byte_enable,
-                                       int& frag_size, int& size_left) const
+                                       const std::vector<bool> &byte_enable,
+                                       int &frag_size, int &size_left) const
 {
     bool predicate = true;
     Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
 
     frag_size = std::min(
         cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
-        (Addr) size_left);
+        (Addr)size_left);
     size_left -= frag_size;
 
     // Set up byte-enable mask for the current fragment
@@ -360,12 +359,12 @@
 }
 
 Fault
-AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
+AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
                          Request::Flags flags,
-                         const std::vector<bool>& byte_enable)
+                         const std::vector<bool> &byte_enable)
 {
-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;
 
     // use the CPU's statically allocated read request and packet objects
     const RequestPtr &req = data_read_req;
@@ -414,13 +413,8 @@
         }
 
         //If there's a fault, return it
-        if (fault != NoFault) {
-            if (req->isPrefetch()) {
-                return NoFault;
-            } else {
-                return fault;
-            }
-        }
+        if (fault != NoFault)
+            return req->isPrefetch() ? NoFault : fault;
 
         // If we don't need to access further cache lines, stop now.
         if (size_left == 0) {
@@ -446,8 +440,8 @@
                           Request::Flags flags, uint64_t *res,
                           const std::vector<bool>& byte_enable)
 {
-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;
     static uint8_t zero_array[64] = {};
 
     if (data == NULL) {
@@ -529,18 +523,14 @@
 
         //If there's a fault or we don't need to access a second cache line,
         //stop now.
-        if (fault != NoFault || size_left == 0)
-        {
+        if (fault != NoFault || size_left == 0) {
             if (req->isLockedRMW() && fault == NoFault) {
                 assert(!req->isMasked());
                 locked = false;
             }
 
-            if (fault != NoFault && req->isPrefetch()) {
-                return NoFault;
-            } else {
-                return fault;
-            }
+            //Supress faults from prefetches.
+            return req->isPrefetch() ? NoFault : fault;
         }
 
         /*
@@ -559,8 +549,8 @@
 AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
                         Request::Flags flags, AtomicOpFunctorPtr amo_op)
 {
-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;
 
     // use the CPU's statically allocated amo request and packet objects
     const RequestPtr &req = data_amo_req;
@@ -579,9 +569,8 @@
     // accesses that cross cache-line boundaries, the cache needs to be
     // modified to support locking both cache lines to guarantee the
     // atomicity.
-    if (secondAddr > addr) {
-        panic("AMO request should not access across a cache line boundary\n");
-    }
+    panic_if(secondAddr > addr,
+        "AMO request should not access across a cache line boundary.");
 
     dcache_latency = 0;
 
@@ -600,9 +589,9 @@
         Packet pkt(req, Packet::makeWriteCmd(req));
         pkt.dataStatic(data);
 
-        if (req->isLocalAccess())
+        if (req->isLocalAccess()) {
             dcache_latency += req->localAccessor(thread->getTC(), &pkt);
-        else {
+        } else {
             dcache_latency += sendPacket(dcachePort, &pkt);
         }
 
@@ -628,7 +617,7 @@
     // Change thread if multi-threaded
     swapActiveThread();
 
-    // Set memroy request ids to current thread
+    // Set memory request ids to current thread
     if (numThreads > 1) {
         ContextID cid = threadContexts[curThread]->contextId();
 
@@ -638,8 +627,8 @@
         data_amo_req->setContext(cid);
     }
 
-    SimpleExecContext& t_info = *threadInfo[curThread];
-    SimpleThread* thread = t_info.thread;
+    SimpleExecContext &t_info = *threadInfo[curThread];
+    SimpleThread *thread = t_info.thread;
 
     Tick latency = 0;
 
@@ -692,8 +681,8 @@
 
                     assert(!ifetch_pkt.isError());
 
-                    // ifetch_req is initialized to read the instruction directly
-                    // into the CPU object's inst field.
+                    // ifetch_req is initialized to read the instruction
+                    // directly into the CPU object's inst field.
                 //}
             }
 
@@ -724,8 +713,9 @@
 
             // @todo remove me after debugging with legion done
             if (curStaticInst && (!curStaticInst->isMicroop() ||
-                        curStaticInst->isFirstMicroop()))
+                        curStaticInst->isFirstMicroop())) {
                 instCnt++;
+            }
 
             if (simulate_inst_stalls && icache_access)
                 stall_ticks += icache_latency;
diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh
index 26f4c0c..3ff2580 100644
--- a/src/cpu/simple/atomic.hh
+++ b/src/cpu/simple/atomic.hh
@@ -86,12 +86,12 @@
      * <li>Stay at PC is true.
      * </ul>
      */
-    bool isCpuDrained() const {
+    bool
+    isCpuDrained() const
+    {
         SimpleExecContext &t_info = *threadInfo[curThread];
-
         return t_info.thread->microPC() == 0 &&
-            !locked &&
-            !t_info.stayAtPC;
+            !locked && !t_info.stayAtPC;
     }
 
     /**
@@ -120,13 +120,14 @@
 
       protected:
 
-        bool recvTimingResp(PacketPtr pkt)
+        bool
+        recvTimingResp(PacketPtr pkt)
         {
             panic("Atomic CPU doesn't expect recvTimingResp!\n");
-            return true;
         }
 
-        void recvReqRetry()
+        void
+        recvReqRetry()
         {
             panic("Atomic CPU doesn't expect recvRetry!\n");
         }
@@ -137,7 +138,7 @@
     {
 
       public:
-        AtomicCPUDPort(const std::string &_name, BaseSimpleCPU* _cpu)
+        AtomicCPUDPort(const std::string &_name, BaseSimpleCPU *_cpu)
             : AtomicCPUPort(_name, _cpu), cpu(_cpu)
         {
             cacheBlockMask = ~(cpu->cacheLineSize() - 1);
@@ -167,7 +168,7 @@
     Tick dcache_latency;
 
     /** Probe Points. */
-    ProbePointArg<std::pair<SimpleThread*, const StaticInstPtr>> *ppCommit;
+    ProbePointArg<std::pair<SimpleThread *, const StaticInstPtr>> *ppCommit;
 
   protected:
 
@@ -186,7 +187,7 @@
     void drainResume() override;
 
     void switchOut() override;
-    void takeOverFrom(BaseCPU *oldCPU) override;
+    void takeOverFrom(BaseCPU *old_cpu) override;
 
     void verifyMemoryMode() const override;
 
@@ -209,23 +210,25 @@
      * @param[in,out] size_left Size left to be processed in the memory access.
      * @return True if the byte-enable mask for the fragment is not all-false.
      */
-    bool genMemFragmentRequest(const RequestPtr& req, Addr frag_addr,
+    bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr,
                                int size, Request::Flags flags,
-                               const std::vector<bool>& byte_enable,
-                               int& frag_size, int& size_left) const;
+                               const std::vector<bool> &byte_enable,
+                               int &frag_size, int &size_left) const;
 
     Fault readMem(Addr addr, uint8_t *data, unsigned size,
                   Request::Flags flags,
-                  const std::vector<bool>& byte_enable = std::vector<bool>())
+                  const std::vector<bool> &byte_enable=std::vector<bool>())
         override;
 
-    Fault initiateHtmCmd(Request::Flags flags) override
+    Fault
+    initiateHtmCmd(Request::Flags flags) override
     {
         panic("initiateHtmCmd() is for timing accesses, and should "
               "never be called on AtomicSimpleCPU.\n");
     }
 
-    void htmSendAbortSignal(HtmFailureFaultCause cause) override
+    void
+    htmSendAbortSignal(HtmFailureFaultCause cause) override
     {
         panic("htmSendAbortSignal() is for timing accesses, and should "
               "never be called on AtomicSimpleCPU.\n");
@@ -233,10 +236,10 @@
 
     Fault writeMem(uint8_t *data, unsigned size,
                    Addr addr, Request::Flags flags, uint64_t *res,
-                   const std::vector<bool>& byte_enable = std::vector<bool>())
+                   const std::vector<bool> &byte_enable=std::vector<bool>())
         override;
 
-    Fault amoMem(Addr addr, uint8_t* data, unsigned size,
+    Fault amoMem(Addr addr, uint8_t *data, unsigned size,
                  Request::Flags flags, AtomicOpFunctorPtr amo_op) override;
 
     void regProbePoints() override;