resources: Update gapbs' gem5 system configs to run newer Linux kernel

The current system config of gapbs runs into kernel panic due to SWIOTLB.

This commit replaces gapbs system config by boot-exit system config,
which is known to work with most of currently supported Linux kernels.

Signed-off-by: Hoa Nguyen <hoanguyen@ucdavis.edu>
Change-Id: I7468bf7c429bf897bb716e7fd82fc007b6ea19dd
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5-resources/+/41557
Reviewed-by: Bobby R. Bruce <bbruce@ucdavis.edu>
Maintainer: Bobby R. Bruce <bbruce@ucdavis.edu>
Tested-by: Bobby R. Bruce <bbruce@ucdavis.edu>
diff --git a/src/gapbs/configs/system/MESI_Two_Level.py b/src/gapbs/configs/system/MESI_Two_Level.py
index dc868ab..d5c00bd 100644
--- a/src/gapbs/configs/system/MESI_Two_Level.py
+++ b/src/gapbs/configs/system/MESI_Two_Level.py
@@ -29,11 +29,13 @@
 """ This file creates a set of Ruby caches for the MESI TWO Level protocol
 This protocol models two level cache hierarchy. The L1 cache is split into
 instruction and data cache.
+
 This system support the memory size of up to 3GB.
+
 """
 
-
-
+from __future__ import print_function
+from __future__ import absolute_import
 
 import math
 
@@ -68,13 +70,14 @@
         # There is a single global list of all of the controllers to make it
         # easier to connect everything to the global network. This can be
         # customized depending on the topology/network requirements.
-        # L1 caches are private to a core, hence there are one L1 cache per CPU core.
-        # The number of L2 caches are dependent to the architecture.
+        # L1 caches are private to a core, hence there are one L1 cache per CPU
+        # core. The number of L2 caches are dependent to the architecture.
         self.controllers = \
             [L1Cache(system, self, cpu, self._numL2Caches) for cpu in cpus] + \
-            [L2Cache(system, self, self._numL2Caches) for num in range(self._numL2Caches)] + \
-            [DirController(self, system.mem_ranges, mem_ctrls)] + \
-            [DMAController(self) for i in range(len(dma_ports))]
+            [L2Cache(system, self, self._numL2Caches) for num in \
+            range(self._numL2Caches)] + [DirController(self, \
+            system.mem_ranges, mem_ctrls)] + [DMAController(self) for i \
+            in range(len(dma_ports))]
 
         # Create one sequencer per CPU and dma controller.
         # Sequencers for other controllers can be here here.
@@ -220,7 +223,8 @@
         # This is the cache memory object that stores the cache data and tags
         self.L2cache = RubyCache(size = '1 MB',
                                 assoc = 16,
-                                start_index_bit = self.getBlockSizeBits(system, num_l2Caches))
+                                start_index_bit = self.getBlockSizeBits(system,
+                                num_l2Caches))
 
         self.transitions_per_cycle = '4'
         self.ruby_system = ruby_system
@@ -269,7 +273,7 @@
         self.ruby_system = ruby_system
         self.directory = RubyDirectoryMemory()
         # Connect this directory to the memory side.
-        self.memory = mem_ctrls[0].port
+        self.memory_out_port = mem_ctrls[0].port
         self.connectQueues(ruby_system)
 
     def connectQueues(self, ruby_system):
diff --git a/src/gapbs/configs/system/MI_example_caches.py b/src/gapbs/configs/system/MI_example_caches.py
index 79ca7a6..2fb8433 100644
--- a/src/gapbs/configs/system/MI_example_caches.py
+++ b/src/gapbs/configs/system/MI_example_caches.py
@@ -38,8 +38,8 @@
 
 """
 
-
-
+from __future__ import print_function
+from __future__ import absolute_import
 
 import math
 
@@ -205,7 +205,7 @@
         self.ruby_system = ruby_system
         self.directory = RubyDirectoryMemory()
         # Connect this directory to the memory side.
-        self.memory = mem_ctrls[0].port
+        self.memory_out_port = mem_ctrls[0].port
         self.connectQueues(ruby_system)
 
     def connectQueues(self, ruby_system):
diff --git a/src/gapbs/configs/system/MOESI_CMP_directory.py b/src/gapbs/configs/system/MOESI_CMP_directory.py
new file mode 100644
index 0000000..f24022a
--- /dev/null
+++ b/src/gapbs/configs/system/MOESI_CMP_directory.py
@@ -0,0 +1,351 @@
+#Copyright (c) 2020 The Regents of the University of California.
+#All Rights Reserved
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+""" This file creates a set of Ruby caches for the MOESI CMP directory
+protocol.
+This protocol models two level cache hierarchy. The L1 cache is split into
+instruction and data cache.
+
+This system support the memory size of up to 3GB.
+
+"""
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import math
+
+from m5.defines import buildEnv
+from m5.util import fatal, panic
+
+from m5.objects import *
+
+class MOESICMPDirCache(RubySystem):
+
+    def __init__(self):
+        if buildEnv['PROTOCOL'] != 'MOESI_CMP_directory':
+            fatal("This system assumes MOESI_CMP_directory!")
+
+        super(MOESICMPDirCache, self).__init__()
+
+        self._numL2Caches = 8
+
+    def setup(self, system, cpus, mem_ctrls, dma_ports, iobus):
+        """Set up the Ruby cache subsystem. Note: This can't be done in the
+           constructor because many of these items require a pointer to the
+           ruby system (self). This causes infinite recursion in initialize()
+           if we do this in the __init__.
+        """
+        # Ruby's global network.
+        self.network = MyNetwork(self)
+
+        # MOESI_CMP_directory example uses 3 virtual networks
+        self.number_of_virtual_networks = 3
+        self.network.number_of_virtual_networks = 3
+
+        # There is a single global list of all of the controllers to make it
+        # easier to connect everything to the global network. This can be
+        # customized depending on the topology/network requirements.
+        # L1 caches are private to a core, hence there are one L1 cache per CPU
+        # core. The number of L2 caches are dependent to the architecture.
+        self.controllers = \
+            [L1Cache(system, self, cpu, self._numL2Caches) for cpu in cpus] + \
+            [L2Cache(system, self, self._numL2Caches) for num in \
+            range(self._numL2Caches)] + [DirController(self, \
+            system.mem_ranges, mem_ctrls)] + [DMAController(self) for i \
+            in range(len(dma_ports))]
+
+        # Create one sequencer per CPU and dma controller.
+        # Sequencers for other controllers can be here here.
+        self.sequencers = [RubySequencer(version = i,
+                                # I/D cache is combined and grab from ctrl
+                                icache = self.controllers[i].L1Icache,
+                                dcache = self.controllers[i].L1Dcache,
+                                clk_domain = self.controllers[i].clk_domain,
+                                pio_request_port = iobus.cpu_side_ports,
+                                mem_request_port = iobus.cpu_side_ports,
+                                pio_response_port = iobus.mem_side_ports
+                                ) for i in range(len(cpus))] + \
+                          [DMASequencer(version = i,
+                                        in_ports = port)
+                            for i,port in enumerate(dma_ports)
+                          ]
+
+        for i,c in enumerate(self.controllers[:len(cpus)]):
+            c.sequencer = self.sequencers[i]
+
+        #Connecting the DMA sequencer to DMA controller
+        for i,d in enumerate(self.controllers[-len(dma_ports):]):
+            i += len(cpus)
+            d.dma_sequencer = self.sequencers[i]
+
+        self.num_of_sequencers = len(self.sequencers)
+
+        # Create the network and connect the controllers.
+        # NOTE: This is quite different if using Garnet!
+        self.network.connectControllers(self.controllers)
+        self.network.setup_buffers()
+
+        # Set up a proxy port for the system_port. Used for load binaries and
+        # other functional-only things.
+        self.sys_port_proxy = RubyPortProxy()
+        system.system_port = self.sys_port_proxy.in_ports
+        self.sys_port_proxy.pio_request_port = iobus.cpu_side_ports
+
+        # Connect the cpu's cache, interrupt, and TLB ports to Ruby
+        for i,cpu in enumerate(cpus):
+            cpu.icache_port = self.sequencers[i].in_ports
+            cpu.dcache_port = self.sequencers[i].in_ports
+            isa = buildEnv['TARGET_ISA']
+            if isa == 'x86':
+                cpu.interrupts[0].pio = self.sequencers[i].interrupt_out_port
+                cpu.interrupts[0].int_requestor = self.sequencers[i].in_ports
+                cpu.interrupts[0].int_responder = self.sequencers[i].interrupt_out_port
+            if isa == 'x86' or isa == 'arm':
+                cpu.itb.walker.port = self.sequencers[i].in_ports
+                cpu.dtb.walker.port = self.sequencers[i].in_ports
+
+
+class L1Cache(L1Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, cpu, num_l2Caches):
+        """Creating L1 cache controller. Consist of both instruction
+           and data cache. The size of data cache is 512KB and
+           8-way set associative. The instruction cache is 32KB,
+           2-way set associative.
+        """
+        super(L1Cache, self).__init__()
+
+        self.version = self.versionCount()
+        block_size_bits = int(math.log(system.cache_line_size, 2))
+        l1i_size = '32kB'
+        l1i_assoc = '2'
+        l1d_size = '512kB'
+        l1d_assoc = '8'
+        # This is the cache memory object that stores the cache data and tags
+        self.L1Icache = RubyCache(size = l1i_size,
+                                assoc = l1i_assoc,
+                                start_index_bit = block_size_bits ,
+                                is_icache = True,
+                                dataAccessLatency = 1,
+                                tagAccessLatency = 1)
+        self.L1Dcache = RubyCache(size = l1d_size,
+                            assoc = l1d_assoc,
+                            start_index_bit = block_size_bits,
+                            is_icache = False,
+                            dataAccessLatency = 1,
+                            tagAccessLatency = 1)
+        self.clk_domain = cpu.clk_domain
+        self.prefetcher = RubyPrefetcher()
+        self.send_evictions = self.sendEvicts(cpu)
+        self.transitions_per_cycle = 4
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getBlockSizeBits(self, system):
+        bits = int(math.log(system.cache_line_size, 2))
+        if 2**bits != system.cache_line_size.value:
+            panic("Cache line size not a power of 2!")
+        return bits
+
+    def sendEvicts(self, cpu):
+        """True if the CPU model or ISA requires sending evictions from caches
+           to the CPU. Two scenarios warrant forwarding evictions to the CPU:
+           1. The O3 model must keep the LSQ coherent with the caches
+           2. The x86 mwait instruction is built on top of coherence
+           3. The local exclusive monitor in ARM systems
+        """
+        if type(cpu) is DerivO3CPU or \
+           buildEnv['TARGET_ISA'] in ('x86', 'arm'):
+            return True
+        return False
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.mandatoryQueue = MessageBuffer()
+        self.requestFromL1Cache = MessageBuffer()
+        self.requestFromL1Cache.out_port = ruby_system.network.in_port
+        self.responseFromL1Cache = MessageBuffer()
+        self.responseFromL1Cache.out_port = ruby_system.network.in_port
+        self.requestToL1Cache = MessageBuffer()
+        self.requestToL1Cache.in_port = ruby_system.network.out_port
+        self.responseToL1Cache = MessageBuffer()
+        self.responseToL1Cache.in_port = ruby_system.network.out_port
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+class L2Cache(L2Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, num_l2Caches):
+
+        super(L2Cache, self).__init__()
+
+        self.version = self.versionCount()
+        # This is the cache memory object that stores the cache data and tags
+        self.L2cache = RubyCache(size = '1 MB',
+                                assoc = 16,
+                                start_index_bit = self.getL2StartIdx(system,
+                                num_l2Caches),
+                                dataAccessLatency = 20,
+                                tagAccessLatency = 20)
+
+        self.transitions_per_cycle = '4'
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getL2StartIdx(self, system, num_l2caches):
+        l2_bits = int(math.log(num_l2caches, 2))
+        bits = int(math.log(system.cache_line_size, 2)) + l2_bits
+        return bits
+
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.GlobalRequestFromL2Cache = MessageBuffer()
+        self.GlobalRequestFromL2Cache.out_port = ruby_system.network.in_port
+        self.L1RequestFromL2Cache = MessageBuffer()
+        self.L1RequestFromL2Cache.out_port = ruby_system.network.in_port
+        self.responseFromL2Cache = MessageBuffer()
+        self.responseFromL2Cache.out_port = ruby_system.network.in_port
+
+        self.GlobalRequestToL2Cache = MessageBuffer()
+        self.GlobalRequestToL2Cache.in_port = ruby_system.network.out_port
+        self.L1RequestToL2Cache = MessageBuffer()
+        self.L1RequestToL2Cache.in_port = ruby_system.network.out_port
+        self.responseToL2Cache = MessageBuffer()
+        self.responseToL2Cache.in_port = ruby_system.network.out_port
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+
+
+class DirController(Directory_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system, ranges, mem_ctrls):
+        """ranges are the memory ranges assigned to this controller.
+        """
+        if len(mem_ctrls) > 1:
+            panic("This cache system can only be connected to one mem ctrl")
+        super(DirController, self).__init__()
+        self.version = self.versionCount()
+        self.addr_ranges = ranges
+        self.ruby_system = ruby_system
+        self.directory = RubyDirectoryMemory()
+        # Connect this directory to the memory side.
+        self.memory_out_port = mem_ctrls[0].port
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.requestToDir = MessageBuffer()
+        self.requestToDir.in_port = ruby_system.network.out_port
+        self.responseToDir = MessageBuffer()
+        self.responseToDir.in_port = ruby_system.network.out_port
+        self.responseFromDir = MessageBuffer()
+        self.responseFromDir.out_port = ruby_system.network.in_port
+        self.forwardFromDir = MessageBuffer()
+        self.forwardFromDir.out_port = ruby_system.network.in_port
+        self.requestToMemory = MessageBuffer()
+        self.responseFromMemory = MessageBuffer()
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+class DMAController(DMA_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system):
+        super(DMAController, self).__init__()
+        self.version = self.versionCount()
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.mandatoryQueue = MessageBuffer()
+        self.responseFromDir = MessageBuffer()
+        self.responseFromDir.in_port = ruby_system.network.out_port
+        self.reqToDir = MessageBuffer()
+        self.reqToDir.out_port = ruby_system.network.in_port
+        self.respToDir = MessageBuffer()
+        self.respToDir.out_port = ruby_system.network.in_port
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+
+class MyNetwork(SimpleNetwork):
+    """A simple point-to-point network. This doesn't not use garnet.
+    """
+
+    def __init__(self, ruby_system):
+        super(MyNetwork, self).__init__()
+        self.netifs = []
+        self.ruby_system = ruby_system
+
+    def connectControllers(self, controllers):
+        """Connect all of the controllers to routers and connec the routers
+           together in a point-to-point network.
+        """
+        # Create one router/switch per controller in the system
+        self.routers = [Switch(router_id = i) for i in range(len(controllers))]
+
+        # Make a link from each controller to the router. The link goes
+        # externally to the network.
+        self.ext_links = [SimpleExtLink(link_id=i, ext_node=c,
+                                        int_node=self.routers[i])
+                          for i, c in enumerate(controllers)]
+
+        # Make an "internal" link (internal to the network) between every pair
+        # of routers.
+        link_count = 0
+        self.int_links = []
+        for ri in self.routers:
+            for rj in self.routers:
+                if ri == rj: continue # Don't connect a router to itself!
+                link_count += 1
+                self.int_links.append(SimpleIntLink(link_id = link_count,
+                                                    src_node = ri,
+                                                    dst_node = rj))
diff --git a/src/gapbs/configs/system/__init__.py b/src/gapbs/configs/system/__init__.py
old mode 100755
new mode 100644
index 94e676f..1bce258
--- a/src/gapbs/configs/system/__init__.py
+++ b/src/gapbs/configs/system/__init__.py
@@ -29,3 +29,4 @@
 
 from .system import MySystem
 from .ruby_system import MyRubySystem
+
diff --git a/src/gapbs/configs/system/caches.py b/src/gapbs/configs/system/caches.py
old mode 100755
new mode 100644
index c13f099..6f82baa
--- a/src/gapbs/configs/system/caches.py
+++ b/src/gapbs/configs/system/caches.py
@@ -45,11 +45,8 @@
 
 class PrefetchCache(Cache):
 
-
-    def __init__(self, options):
+    def __init__(self):
         super(PrefetchCache, self).__init__()
-        if not options or options.no_prefetchers:
-            return
         self.prefetcher = StridePrefetcher()
 
 class L1Cache(PrefetchCache):
@@ -63,9 +60,8 @@
     tgts_per_mshr = 20
     writeback_clean = True
 
-    def __init__(self, options=None):
-        super(L1Cache, self).__init__(options)
-        pass
+    def __init__(self):
+        super(L1Cache, self).__init__()
 
     def connectBus(self, bus):
         """Connect this cache to a memory-side bus"""
@@ -82,12 +78,8 @@
     # Set the default size
     size = '32kB'
 
-
-    def __init__(self, opts=None):
-        super(L1ICache, self).__init__(opts)
-        if not opts or not opts.l1i_size:
-            return
-        self.size = opts.l1i_size
+    def __init__(self):
+        super(L1ICache, self).__init__()
 
     def connectCPU(self, cpu):
         """Connect this cache's port to a CPU icache port"""
@@ -99,12 +91,8 @@
     # Set the default size
     size = '32kB'
 
-
-    def __init__(self, opts=None):
-        super(L1DCache, self).__init__(opts)
-        if not opts or not opts.l1d_size:
-            return
-        self.size = opts.l1d_size
+    def __init__(self):
+        super(L1DCache, self).__init__()
 
     def connectCPU(self, cpu):
         """Connect this cache's port to a CPU dcache port"""
@@ -150,39 +138,8 @@
     tgts_per_mshr = 12
     writeback_clean = True
 
-
-    def __init__(self, opts=None):
-        super(L2Cache, self).__init__(opts)
-        if not opts or not opts.l2_size:
-            return
-        self.size = opts.l2_size
-
-    def connectCPUSideBus(self, bus):
-        self.cpu_side = bus.mem_side_ports
-
-    def connectMemSideBus(self, bus):
-        self.mem_side = bus.cpu_side_ports
-
-class L3Cache(Cache):
-    """Simple L3 Cache bank with default values
-       This assumes that the L3 is made up of multiple banks. This cannot
-       be used as a standalone L3 cache.
-    """
-
-
-    # Default parameters
-    assoc = 32
-    tag_latency = 40
-    data_latency = 40
-    response_latency = 10
-    mshrs = 256
-    tgts_per_mshr = 12
-    clusivity = 'mostly_excl'
-    # size = '4MB'
-
-    def __init__(self, opts=None):
-        super(L3Cache, self).__init__()
-        self.size = ('4MB')
+    def __init__(self):
+        super(L2Cache, self).__init__()
 
     def connectCPUSideBus(self, bus):
         self.cpu_side = bus.mem_side_ports
diff --git a/src/gapbs/configs/system/fs_tools.py b/src/gapbs/configs/system/fs_tools.py
old mode 100755
new mode 100644
diff --git a/src/gapbs/configs/system/ruby_system.py b/src/gapbs/configs/system/ruby_system.py
old mode 100755
new mode 100644
index e5c602a..e7c1135
--- a/src/gapbs/configs/system/ruby_system.py
+++ b/src/gapbs/configs/system/ruby_system.py
@@ -32,6 +32,7 @@
 from m5.util import convert
 from .fs_tools import *
 
+
 class MyRubySystem(System):
 
     def __init__(self, kernel, disk, cpu_type, mem_sys, num_cpus):
@@ -69,17 +70,17 @@
         self.createMemoryControllersDDR3()
 
         # Create the cache hierarchy for the system.
-
         if mem_sys == 'MI_example':
             from .MI_example_caches import MIExampleSystem
             self.caches = MIExampleSystem()
         elif mem_sys == 'MESI_Two_Level':
             from .MESI_Two_Level import MESITwoLevelCache
             self.caches = MESITwoLevelCache()
-
+        elif mem_sys == 'MOESI_CMP_directory':
+            from .MOESI_CMP_directory import MOESICMPDirCache
+            self.caches = MOESICMPDirCache()
         self.caches.setup(self, self.cpu, self.mem_cntrls,
-                          [self.pc.south_bridge.ide.dma,
-                           self.iobus.mem_side_ports],
+                          [self.pc.south_bridge.ide.dma, self.iobus.mem_side_ports],
                           self.iobus)
 
         if self._host_parallel:
@@ -96,43 +97,32 @@
     def totalInsts(self):
         return sum([cpu.totalInsts() for cpu in self.cpu])
 
-    def createCPUThreads(self, cpu):
-        for c in cpu:
-            c.createThreads()
-
     def createCPU(self, cpu_type, num_cpus):
-        self.cpu = [X86KvmCPU(cpu_id = i)
-                        for i in range(num_cpus)]
-        self.kvm_vm = KvmVM()
-        self.mem_mode = 'atomic_noncaching'
         if cpu_type == "atomic":
-            self.timingCpu = [AtomicSimpleCPU(cpu_id = i,
-                                        switched_out = True)
+            self.cpu = [AtomicSimpleCPU(cpu_id = i)
                               for i in range(num_cpus)]
-            self.createCPUThreads(self.timingCpu)
-        elif cpu_type == "o3":
-            self.timingCpu = [DerivO3CPU(cpu_id = i,
-                                        switched_out = True)
-                        for i in range(num_cpus)]
-            self.createCPUThreads(self.timingCpu)
-        elif cpu_type == "simple":
-            self.timingCpu = [TimingSimpleCPU(cpu_id = i,
-                                        switched_out = True)
-                        for i in range(num_cpus)]
-            self.createCPUThreads(self.timingCpu)
+            self.mem_mode = 'atomic'
         elif cpu_type == "kvm":
-            pass
+            # Note KVM needs a VM and atomic_noncaching
+            self.cpu = [X86KvmCPU(cpu_id = i)
+                        for i in range(num_cpus)]
+            self.kvm_vm = KvmVM()
+            self.mem_mode = 'atomic_noncaching'
+        elif cpu_type == "o3":
+            self.cpu = [DerivO3CPU(cpu_id = i)
+                        for i in range(num_cpus)]
+            self.mem_mode = 'timing'
+        elif cpu_type == "simple":
+            self.cpu = [TimingSimpleCPU(cpu_id = i)
+                        for i in range(num_cpus)]
+            self.mem_mode = 'timing'
         else:
             m5.fatal("No CPU type {}".format(cpu_type))
 
-        self.createCPUThreads(self.cpu)
         for cpu in self.cpu:
+            cpu.createThreads()
             cpu.createInterruptController()
 
-    def switchCpus(self, old, new):
-        assert(new[0].switchedOut())
-        m5.switchCpus(self, list(zip(old, new)))
-
     def setDiskImages(self, img_path_1, img_path_2):
         disk0 = CowDisk(img_path_1)
         disk2 = CowDisk(img_path_2)
diff --git a/src/gapbs/configs/system/system.py b/src/gapbs/configs/system/system.py
old mode 100755
new mode 100644
index 9a4cb4d..3287c3e
--- a/src/gapbs/configs/system/system.py
+++ b/src/gapbs/configs/system/system.py
@@ -35,10 +35,10 @@
 
 class MySystem(System):
 
-
-    def __init__(self, kernel, disk, cpu_type, num_cpus):
+    def __init__(self, kernel, disk, cpu_type, num_cpus, no_kvm = False):
         super(MySystem, self).__init__()
-        no_kvm=False
+
+        self._no_kvm = no_kvm
         self._host_parallel = cpu_type == "kvm"
 
         # Set up the clock domain and the voltage domain
@@ -46,10 +46,8 @@
         self.clk_domain.clock = '3GHz'
         self.clk_domain.voltage_domain = VoltageDomain()
 
-        mem_size = '16GB'
-        self.mem_ranges = [AddrRange('100MB'), # For kernel
+        self.mem_ranges = [AddrRange(Addr('3GB')), # All data
                            AddrRange(0xC0000000, size=0x100000), # For I/0
-                           AddrRange(Addr('4GB'), size = mem_size) # All data
                            ]
 
         # Create the main memory bus
@@ -61,13 +59,12 @@
         # Set up the system port for functional access from the simulator
         self.system_port = self.membus.cpu_side_ports
 
-        self.initFS(self.membus,num_cpus)
+        self.initFS(self.membus, num_cpus)
 
         # Replace these paths with the path to your disk images.
         # The first disk is the root disk. The second could be used for swap
         # or anything else.
-        imagepath = disk
-        self.setDiskImages(imagepath, imagepath)
+        self.setDiskImages(disk, disk)
 
         # Change this path to point to the kernel you want to use
         self.workload.object_file = kernel
@@ -102,37 +99,47 @@
     def totalInsts(self):
         return sum([cpu.totalInsts() for cpu in self.cpu])
 
-    def createCPUThreads(self, cpu):
-        for c in cpu:
-            c.createThreads()
-
     def createCPU(self, cpu_type, num_cpus):
-        self.cpu = [X86KvmCPU(cpu_id = i)
-                        for i in range(num_cpus)]
-        self.kvm_vm = KvmVM()
-        self.mem_mode = 'atomic_noncaching'
-        if cpu_type == "atomic":
-            self.timingCpu = [AtomicSimpleCPU(cpu_id = i,
-                                        switched_out = True)
+        # set up a kvm core or an atomic core to boot
+        if self._no_kvm:
+            self.cpu = [AtomicSimpleCPU(cpu_id = i, switched_out = False)
                               for i in range(num_cpus)]
-            self.createCPUThreads(self.timingCpu)
-        elif cpu_type == "o3":
-            self.timingCpu = [DerivO3CPU(cpu_id = i,
-                                        switched_out = True)
+            self.mem_mode = 'atomic'
+        else:
+            # Note KVM needs a VM and atomic_noncaching
+            self.cpu = [X86KvmCPU(cpu_id = i, switched_out = False)
                         for i in range(num_cpus)]
-            self.createCPUThreads(self.timingCpu)
-        elif cpu_type == "simple":
-            self.timingCpu = [TimingSimpleCPU(cpu_id = i,
-                                        switched_out = True)
-                        for i in range(num_cpus)]
-            self.createCPUThreads(self.timingCpu)
+            self.kvm_vm = KvmVM()
+            self.mem_mode = 'atomic_noncaching'
+
+        for cpu in self.cpu:
+            cpu.createThreads()
+            cpu.createInterruptController()
+
+        # set up the detailed cpu or a kvm model with more cores
+        if cpu_type == "atomic":
+            self.detailedCpu = [AtomicSimpleCPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+            self.createCPUThreads(self.detailedCpu)
         elif cpu_type == "kvm":
-            pass
+            # Note KVM needs a VM and atomic_noncaching
+            self.detailedCpu = [X86KvmCPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+            self.kvm_vm = KvmVM()
+            self.createCPUThreads(self.detailedCpu)
+        elif cpu_type == "o3":
+            self.detailedCpu = [DerivO3CPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+            self.createCPUThreads(self.detailedCpu)
+        elif cpu_type == "simple" or cpu_type == "timing":
+            self.detailedCpu = [TimingSimpleCPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+            self.createCPUThreads(self.detailedCpu)
         else:
             m5.fatal("No CPU type {}".format(cpu_type))
 
-        self.createCPUThreads(self.cpu)
-        for cpu in self.cpu:
+        for cpu in self.detailedCpu:
+            cpu.createThreads()
             cpu.createInterruptController()
 
     def switchCpus(self, old, new):
@@ -145,10 +152,6 @@
         self.pc.south_bridge.ide.disks = [disk0, disk2]
 
     def createCacheHierarchy(self):
-        # Create an L3 cache (with crossbar)
-        self.l3bus = L2XBar(width = 64,
-                            snoop_filter = SnoopFilter(max_capacity='32MB'))
-
         for cpu in self.cpu:
             # Create a memory bus, a coherent crossbar, in this case
             cpu.l2bus = L2XBar()
@@ -173,13 +176,7 @@
             cpu.l2cache.connectCPUSideBus(cpu.l2bus)
 
             # Connect the L2 cache to the L3 bus
-            cpu.l2cache.connectMemSideBus(self.l3bus)
-
-        self.l3cache = L3Cache()
-        self.l3cache.connectCPUSideBus(self.l3bus)
-
-        # Connect the L3 cache to the membus
-        self.l3cache.connectMemSideBus(self.membus)
+            cpu.l2cache.connectMemSideBus(self.membus)
 
     def setupInterrupts(self):
         for cpu in self.cpu:
@@ -195,41 +192,14 @@
 
 
     def createMemoryControllersDDR3(self):
-        self._createMemoryControllers(2, DDR3_1600_8x8)
+        self._createMemoryControllers(1, DDR3_1600_8x8)
 
     def _createMemoryControllers(self, num, cls):
-        kernel_controller = self._createKernelMemoryController(cls)
-
-        ranges = self._getInterleaveRanges(self.mem_ranges[-1], num, 7, 20)
-
         self.mem_cntrls = [
-            MemCtrl(dram = cls(range = ranges[i]),
+            MemCtrl(dram = cls(range = self.mem_ranges[0]),
                     port = self.membus.mem_side_ports)
             for i in range(num)
-        ] + [kernel_controller]
-
-    def _createKernelMemoryController(self, cls):
-        return MemCtrl(dram = cls(range = self.mem_ranges[0]),
-                       port = self.membus.mem_side_ports)
-
-    def _getInterleaveRanges(self, rng, num, intlv_low_bit, xor_low_bit):
-        from math import log
-        bits = int(log(num, 2))
-        if 2**bits != num:
-            m5.fatal("Non-power of two number of memory controllers")
-
-        intlv_bits = bits
-        ranges = [
-            AddrRange(start=rng.start,
-                      end=rng.end,
-                      intlvHighBit = intlv_low_bit + intlv_bits - 1,
-                      xorHighBit = xor_low_bit + intlv_bits - 1,
-                      intlvBits = intlv_bits,
-                      intlvMatch = i)
-                for i in range(num)
-            ]
-
-        return ranges
+        ]
 
     def initFS(self, membus, cpus):
         self.pc = Pc()
@@ -240,7 +210,7 @@
         IO_address_space_base = 0x8000000000000000
         pci_config_address_space_base = 0xc000000000000000
         interrupts_address_space_base = 0xa000000000000000
-        APIC_range_size = 1 << 12
+        APIC_range_size = 1 << 12;
 
         # North Bridge
         self.iobus = IOXBar()
@@ -365,20 +335,9 @@
                     size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
                     range_type = 1),
             ]
-        # Mark [mem_size, 3GB) as reserved if memory less than 3GB, which
-        # force IO devices to be mapped to [0xC0000000, 0xFFFF0000). Requests
-        # to this specific range can pass though bridge to iobus.
-        entries.append(X86E820Entry(addr = self.mem_ranges[0].size(),
-            size='%dB' % (0xC0000000 - self.mem_ranges[0].size()),
-            range_type=2))
 
         # Reserve the last 16kB of the 32-bit address space for m5ops
         entries.append(X86E820Entry(addr = 0xFFFF0000, size = '64kB',
                                     range_type=2))
 
-        # Add the rest of memory. This is where all the actual data is
-        entries.append(X86E820Entry(addr = self.mem_ranges[-1].start,
-            size='%dB' % (self.mem_ranges[-1].size()),
-            range_type=1))
-
         self.workload.e820_table.entries = entries