resources: Update parsec gem5 system config to run newer Linux kernels

The current system config of parsec runs into kernel panic due to SWIOTLB
when booting with Linux kernels versions 5.4.49 and 5.4.51.

This commit replaces parsec system config by boot-exit system config,
which is known to work with most of currently supported Linux kernels
including version 5.4.49.

Signed-off-by: Hoa Nguyen <hoanguyen@ucdavis.edu>
Change-Id: I1c6d3fb8c6d2d21e02a2eda9aa3c789a3b9968d5
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5-resources/+/42261
Reviewed-by: Bobby R. Bruce <bbruce@ucdavis.edu>
Maintainer: Bobby R. Bruce <bbruce@ucdavis.edu>
Tested-by: Bobby R. Bruce <bbruce@ucdavis.edu>
diff --git a/src/parsec/configs/run_parsec.py b/src/parsec/configs/run_parsec.py
old mode 100755
new mode 100644
index 22bbece..e654f3d
--- a/src/parsec/configs/run_parsec.py
+++ b/src/parsec/configs/run_parsec.py
@@ -75,8 +75,8 @@
     if not cpu in ['kvm', 'timing']:
         m5.fatal("cpu not supported")
 
-    # create the system we are going to simulate
-    system = MySystem(kernel, disk, int(num_cpus), opts, no_kvm=False)
+    # create the system
+    system = MySystem(kernel, disk, cpu, int(num_cpus))
 
     # Exit from guest on workbegin/workend
     system.exit_on_work_items = True
@@ -123,7 +123,7 @@
         start_insts = system.totalInsts()
         # switching to timing cpu if argument cpu == timing
         if cpu == 'timing':
-            system.switchCpus(system.cpu, system.timingCpu)
+            system.switchCpus(system.cpu, system.detailedCpu)
     else:
         print("Unexpected termination of simulation!")
         print()
diff --git a/src/parsec/configs/system/MESI_Two_Level.py b/src/parsec/configs/system/MESI_Two_Level.py
new file mode 100644
index 0000000..314f640
--- /dev/null
+++ b/src/parsec/configs/system/MESI_Two_Level.py
@@ -0,0 +1,341 @@
+#Copyright (c) 2020 The Regents of the University of California.
+#All Rights Reserved
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+""" This file creates a set of Ruby caches for the MESI TWO Level protocol
+This protocol models two level cache hierarchy. The L1 cache is split into
+instruction and data cache.
+
+This system support the memory size of up to 3GB.
+
+"""
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import math
+
+from m5.defines import buildEnv
+from m5.util import fatal, panic
+
+from m5.objects import *
+
+class MESITwoLevelCache(RubySystem):
+
+    def __init__(self):
+        if buildEnv['PROTOCOL'] != 'MESI_Two_Level':
+            fatal("This system assumes MESI_Two_Level!")
+
+        super(MESITwoLevelCache, self).__init__()
+
+        self._numL2Caches = 8
+
+    def setup(self, system, cpus, mem_ctrls, dma_ports, iobus):
+        """Set up the Ruby cache subsystem. Note: This can't be done in the
+           constructor because many of these items require a pointer to the
+           ruby system (self). This causes infinite recursion in initialize()
+           if we do this in the __init__.
+        """
+        # Ruby's global network.
+        self.network = MyNetwork(self)
+
+        # MESI_Two_Level example uses 5 virtual networks
+        self.number_of_virtual_networks = 5
+        self.network.number_of_virtual_networks = 5
+
+        # There is a single global list of all of the controllers to make it
+        # easier to connect everything to the global network. This can be
+        # customized depending on the topology/network requirements.
+        # L1 caches are private to a core, hence there are one L1 cache per CPU
+        # core. The number of L2 caches are dependent to the architecture.
+        self.controllers = \
+            [L1Cache(system, self, cpu, self._numL2Caches) for cpu in cpus] + \
+            [L2Cache(system, self, self._numL2Caches) for num in \
+            range(self._numL2Caches)] + [DirController(self, \
+            system.mem_ranges, mem_ctrls)] + [DMAController(self) for i \
+            in range(len(dma_ports))]
+
+        # Create one sequencer per CPU and dma controller.
+        # Sequencers for other controllers can be here here.
+        self.sequencers = [RubySequencer(version = i,
+                                # I/D cache is combined and grab from ctrl
+                                icache = self.controllers[i].L1Icache,
+                                dcache = self.controllers[i].L1Dcache,
+                                clk_domain = self.controllers[i].clk_domain,
+                                pio_request_port = iobus.cpu_side_ports,
+                                mem_request_port = iobus.cpu_side_ports,
+                                pio_response_port = iobus.mem_side_ports
+                                ) for i in range(len(cpus))] + \
+                          [DMASequencer(version = i,
+                                        in_ports = port)
+                            for i,port in enumerate(dma_ports)
+                          ]
+
+        for i,c in enumerate(self.controllers[:len(cpus)]):
+            c.sequencer = self.sequencers[i]
+
+        #Connecting the DMA sequencer to DMA controller
+        for i,d in enumerate(self.controllers[-len(dma_ports):]):
+            i += len(cpus)
+            d.dma_sequencer = self.sequencers[i]
+
+        self.num_of_sequencers = len(self.sequencers)
+
+        # Create the network and connect the controllers.
+        # NOTE: This is quite different if using Garnet!
+        self.network.connectControllers(self.controllers)
+        self.network.setup_buffers()
+
+        # Set up a proxy port for the system_port. Used for load binaries and
+        # other functional-only things.
+        self.sys_port_proxy = RubyPortProxy()
+        system.system_port = self.sys_port_proxy.in_ports
+        self.sys_port_proxy.pio_request_port = iobus.cpu_side_ports
+
+        # Connect the cpu's cache, interrupt, and TLB ports to Ruby
+        for i,cpu in enumerate(cpus):
+            cpu.icache_port = self.sequencers[i].in_ports
+            cpu.dcache_port = self.sequencers[i].in_ports
+            isa = buildEnv['TARGET_ISA']
+            if isa == 'x86':
+                cpu.interrupts[0].pio = self.sequencers[i].interrupt_out_port
+                cpu.interrupts[0].int_requestor = self.sequencers[i].in_ports
+                cpu.interrupts[0].int_responder = \
+                                        self.sequencers[i].interrupt_out_port
+            if isa == 'x86' or isa == 'arm':
+                cpu.itb.walker.port = self.sequencers[i].in_ports
+                cpu.dtb.walker.port = self.sequencers[i].in_ports
+
+class L1Cache(L1Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, cpu, num_l2Caches):
+        """Creating L1 cache controller. Consist of both instruction
+           and data cache. The size of data cache is 512KB and
+           8-way set associative. The instruction cache is 32KB,
+           2-way set associative.
+        """
+        super(L1Cache, self).__init__()
+
+        self.version = self.versionCount()
+        block_size_bits = int(math.log(system.cache_line_size, 2))
+        l1i_size = '32kB'
+        l1i_assoc = '2'
+        l1d_size = '512kB'
+        l1d_assoc = '8'
+        # This is the cache memory object that stores the cache data and tags
+        self.L1Icache = RubyCache(size = l1i_size,
+                                assoc = l1i_assoc,
+                                start_index_bit = block_size_bits ,
+                                is_icache = True)
+        self.L1Dcache = RubyCache(size = l1d_size,
+                            assoc = l1d_assoc,
+                            start_index_bit = block_size_bits,
+                            is_icache = False)
+        self.l2_select_num_bits = int(math.log(num_l2Caches , 2))
+        self.clk_domain = cpu.clk_domain
+        self.prefetcher = RubyPrefetcher()
+        self.send_evictions = self.sendEvicts(cpu)
+        self.transitions_per_cycle = 4
+        self.enable_prefetch = False
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getBlockSizeBits(self, system):
+        bits = int(math.log(system.cache_line_size, 2))
+        if 2**bits != system.cache_line_size.value:
+            panic("Cache line size not a power of 2!")
+        return bits
+
+    def sendEvicts(self, cpu):
+        """True if the CPU model or ISA requires sending evictions from caches
+           to the CPU. Two scenarios warrant forwarding evictions to the CPU:
+           1. The O3 model must keep the LSQ coherent with the caches
+           2. The x86 mwait instruction is built on top of coherence
+           3. The local exclusive monitor in ARM systems
+        """
+        if type(cpu) is DerivO3CPU or \
+           buildEnv['TARGET_ISA'] in ('x86', 'arm'):
+            return True
+        return False
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.mandatoryQueue = MessageBuffer()
+        self.requestFromL1Cache = MessageBuffer()
+        self.requestFromL1Cache.out_port = ruby_system.network.in_port
+        self.responseFromL1Cache = MessageBuffer()
+        self.responseFromL1Cache.out_port = ruby_system.network.in_port
+        self.unblockFromL1Cache = MessageBuffer()
+        self.unblockFromL1Cache.out_port = ruby_system.network.in_port
+
+        self.optionalQueue = MessageBuffer()
+
+        self.requestToL1Cache = MessageBuffer()
+        self.requestToL1Cache.in_port = ruby_system.network.out_port
+        self.responseToL1Cache = MessageBuffer()
+        self.responseToL1Cache.in_port = ruby_system.network.out_port
+
+class L2Cache(L2Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, num_l2Caches):
+
+        super(L2Cache, self).__init__()
+
+        self.version = self.versionCount()
+        # This is the cache memory object that stores the cache data and tags
+        self.L2cache = RubyCache(size = '1 MB',
+                                assoc = 16,
+                                start_index_bit = self.getBlockSizeBits(system,
+                                num_l2Caches))
+
+        self.transitions_per_cycle = '4'
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getBlockSizeBits(self, system, num_l2caches):
+        l2_bits = int(math.log(num_l2caches, 2))
+        bits = int(math.log(system.cache_line_size, 2)) + l2_bits
+        return bits
+
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.DirRequestFromL2Cache = MessageBuffer()
+        self.DirRequestFromL2Cache.out_port = ruby_system.network.in_port
+        self.L1RequestFromL2Cache = MessageBuffer()
+        self.L1RequestFromL2Cache.out_port = ruby_system.network.in_port
+        self.responseFromL2Cache = MessageBuffer()
+        self.responseFromL2Cache.out_port = ruby_system.network.in_port
+        self.unblockToL2Cache = MessageBuffer()
+        self.unblockToL2Cache.in_port = ruby_system.network.out_port
+        self.L1RequestToL2Cache = MessageBuffer()
+        self.L1RequestToL2Cache.in_port = ruby_system.network.out_port
+        self.responseToL2Cache = MessageBuffer()
+        self.responseToL2Cache.in_port = ruby_system.network.out_port
+
+
+class DirController(Directory_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system, ranges, mem_ctrls):
+        """ranges are the memory ranges assigned to this controller.
+        """
+        if len(mem_ctrls) > 1:
+            panic("This cache system can only be connected to one mem ctrl")
+        super(DirController, self).__init__()
+        self.version = self.versionCount()
+        self.addr_ranges = ranges
+        self.ruby_system = ruby_system
+        self.directory = RubyDirectoryMemory()
+        # Connect this directory to the memory side.
+        self.memory_out_port = mem_ctrls[0].port
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.requestToDir = MessageBuffer()
+        self.requestToDir.in_port = ruby_system.network.out_port
+        self.responseToDir = MessageBuffer()
+        self.responseToDir.in_port = ruby_system.network.out_port
+        self.responseFromDir = MessageBuffer()
+        self.responseFromDir.out_port = ruby_system.network.in_port
+        self.requestToMemory = MessageBuffer()
+        self.responseFromMemory = MessageBuffer()
+
+class DMAController(DMA_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system):
+        super(DMAController, self).__init__()
+        self.version = self.versionCount()
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.mandatoryQueue = MessageBuffer()
+        self.responseFromDir = MessageBuffer(ordered = True)
+        self.responseFromDir.in_port = ruby_system.network.out_port
+        self.requestToDir = MessageBuffer()
+        self.requestToDir.out_port = ruby_system.network.in_port
+
+
+class MyNetwork(SimpleNetwork):
+    """A simple point-to-point network. This doesn't not use garnet.
+    """
+
+    def __init__(self, ruby_system):
+        super(MyNetwork, self).__init__()
+        self.netifs = []
+        self.ruby_system = ruby_system
+
+    def connectControllers(self, controllers):
+        """Connect all of the controllers to routers and connec the routers
+           together in a point-to-point network.
+        """
+        # Create one router/switch per controller in the system
+        self.routers = [Switch(router_id = i) for i in range(len(controllers))]
+
+        # Make a link from each controller to the router. The link goes
+        # externally to the network.
+        self.ext_links = [SimpleExtLink(link_id=i, ext_node=c,
+                                        int_node=self.routers[i])
+                          for i, c in enumerate(controllers)]
+
+        # Make an "internal" link (internal to the network) between every pair
+        # of routers.
+        link_count = 0
+        self.int_links = []
+        for ri in self.routers:
+            for rj in self.routers:
+                if ri == rj: continue # Don't connect a router to itself!
+                link_count += 1
+                self.int_links.append(SimpleIntLink(link_id = link_count,
+                                                    src_node = ri,
+                                                    dst_node = rj))
diff --git a/src/parsec/configs/system/MI_example_caches.py b/src/parsec/configs/system/MI_example_caches.py
new file mode 100644
index 0000000..a9a171c
--- /dev/null
+++ b/src/parsec/configs/system/MI_example_caches.py
@@ -0,0 +1,277 @@
+# Copyright (c) 2021 The Regents of the University of California
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+""" This file creates a set of Ruby caches, the Ruby network, and a simple
+point-to-point topology.
+See Part 3 in the Learning gem5 book: learning.gem5.org/book/part3
+You can change simple_ruby to import from this file instead of from msi_caches
+to use the MI_example protocol instead of MSI.
+
+IMPORTANT: If you modify this file, it's likely that the Learning gem5 book
+           also needs to be updated. For now, email Jason <jason@lowepower.com>
+
+"""
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import math
+
+from m5.defines import buildEnv
+from m5.util import fatal, panic
+
+from m5.objects import *
+
+class MIExampleSystem(RubySystem):
+
+    def __init__(self):
+        if buildEnv['PROTOCOL'] != 'MI_example':
+            fatal("This system assumes MI_example!")
+
+        super(MIExampleSystem, self).__init__()
+
+    def setup(self, system, cpus, mem_ctrls, dma_ports, iobus):
+        """Set up the Ruby cache subsystem. Note: This can't be done in the
+           constructor because many of these items require a pointer to the
+           ruby system (self). This causes infinite recursion in initialize()
+           if we do this in the __init__.
+        """
+        # Ruby's global network.
+        self.network = MyNetwork(self)
+
+        # MI example uses 5 virtual networks
+        self.number_of_virtual_networks = 5
+        self.network.number_of_virtual_networks = 5
+
+        # There is a single global list of all of the controllers to make it
+        # easier to connect everything to the global network. This can be
+        # customized depending on the topology/network requirements.
+        # Create one controller for each L1 cache (and the cache mem obj.)
+        # Create a single directory controller (Really the memory cntrl)
+        self.controllers = \
+            [L1Cache(system, self, cpu) for cpu in cpus] + \
+            [DirController(self, system.mem_ranges, mem_ctrls)] + \
+            [DMAController(self) for i in range(len(dma_ports))]
+
+        # Create one sequencer per CPU. In many systems this is more
+        # complicated since you have to create sequencers for DMA controllers
+        # and other controllers, too.
+        self.sequencers = [RubySequencer(version = i,
+                                # I/D cache is combined and grab from ctrl
+                                icache = self.controllers[i].cacheMemory,
+                                dcache = self.controllers[i].cacheMemory,
+                                clk_domain = self.controllers[i].clk_domain,
+                                pio_request_port = iobus.cpu_side_ports,
+                                mem_request_port = iobus.cpu_side_ports,
+                                pio_response_port = iobus.mem_side_ports
+                                ) for i in range(len(cpus))] + \
+                          [DMASequencer(version = i,
+                                        in_ports = port)
+                            for i,port in enumerate(dma_ports)
+                          ]
+
+        for i,c in enumerate(self.controllers[0:len(cpus)]):
+            c.sequencer = self.sequencers[i]
+
+        for i,d in enumerate(self.controllers[-len(dma_ports):]):
+            i += len(cpus)
+            d.dma_sequencer = self.sequencers[i]
+
+        self.num_of_sequencers = len(self.sequencers)
+
+        # Create the network and connect the controllers.
+        # NOTE: This is quite different if using Garnet!
+        self.network.connectControllers(self.controllers)
+        self.network.setup_buffers()
+
+        # Set up a proxy port for the system_port. Used for load binaries and
+        # other functional-only things.
+        self.sys_port_proxy = RubyPortProxy()
+        system.system_port = self.sys_port_proxy.in_ports
+        self.sys_port_proxy.pio_request_port = iobus.cpu_side_ports
+
+        # Connect the cpu's cache, interrupt, and TLB ports to Ruby
+        for i,cpu in enumerate(cpus):
+            cpu.icache_port = self.sequencers[i].in_ports
+            cpu.dcache_port = self.sequencers[i].in_ports
+            isa = buildEnv['TARGET_ISA']
+            if isa == 'x86':
+                cpu.interrupts[0].pio = self.sequencers[i].interrupt_out_port
+                cpu.interrupts[0].int_requestor = self.sequencers[i].in_ports
+                cpu.interrupts[0].int_responder = \
+                                        self.sequencers[i].interrupt_out_port
+            if isa == 'x86' or isa == 'arm':
+                cpu.itb.walker.port = self.sequencers[i].in_ports
+                cpu.dtb.walker.port = self.sequencers[i].in_ports
+
+
+class L1Cache(L1Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, cpu):
+        """CPUs are needed to grab the clock domain and system is needed for
+           the cache block size.
+        """
+        super(L1Cache, self).__init__()
+
+        self.version = self.versionCount()
+        # This is the cache memory object that stores the cache data and tags
+        self.cacheMemory = RubyCache(size = '16kB',
+                               assoc = 8,
+                               start_index_bit = self.getBlockSizeBits(system))
+        self.clk_domain = cpu.clk_domain
+        self.send_evictions = self.sendEvicts(cpu)
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getBlockSizeBits(self, system):
+        bits = int(math.log(system.cache_line_size, 2))
+        if 2**bits != system.cache_line_size.value:
+            panic("Cache line size not a power of 2!")
+        return bits
+
+    def sendEvicts(self, cpu):
+        """True if the CPU model or ISA requires sending evictions from caches
+           to the CPU. Two scenarios warrant forwarding evictions to the CPU:
+           1. The O3 model must keep the LSQ coherent with the caches
+           2. The x86 mwait instruction is built on top of coherence
+           3. The local exclusive monitor in ARM systems
+        """
+        if type(cpu) is DerivO3CPU or \
+           buildEnv['TARGET_ISA'] in ('x86', 'arm'):
+            return True
+        return False
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.mandatoryQueue = MessageBuffer()
+        self.requestFromCache = MessageBuffer(ordered = True)
+        self.requestFromCache.out_port = ruby_system.network.in_port
+        self.responseFromCache = MessageBuffer(ordered = True)
+        self.responseFromCache.out_port = ruby_system.network.in_port
+        self.forwardToCache = MessageBuffer(ordered = True)
+        self.forwardToCache.in_port = ruby_system.network.out_port
+        self.responseToCache = MessageBuffer(ordered = True)
+        self.responseToCache.in_port = ruby_system.network.out_port
+
+class DirController(Directory_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system, ranges, mem_ctrls):
+        """ranges are the memory ranges assigned to this controller.
+        """
+        if len(mem_ctrls) > 1:
+            panic("This cache system can only be connected to one mem ctrl")
+        super(DirController, self).__init__()
+        self.version = self.versionCount()
+        self.addr_ranges = ranges
+        self.ruby_system = ruby_system
+        self.directory = RubyDirectoryMemory()
+        # Connect this directory to the memory side.
+        self.memory_out_port = mem_ctrls[0].port
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.requestToDir = MessageBuffer(ordered = True)
+        self.requestToDir.in_port = ruby_system.network.out_port
+        self.dmaRequestToDir = MessageBuffer(ordered = True)
+        self.dmaRequestToDir.in_port = ruby_system.network.out_port
+
+        self.responseFromDir = MessageBuffer()
+        self.responseFromDir.out_port = ruby_system.network.in_port
+        self.dmaResponseFromDir = MessageBuffer(ordered = True)
+        self.dmaResponseFromDir.out_port = ruby_system.network.in_port
+        self.forwardFromDir = MessageBuffer()
+        self.forwardFromDir.out_port = ruby_system.network.in_port
+        self.requestToMemory = MessageBuffer()
+        self.responseFromMemory = MessageBuffer()
+
+class DMAController(DMA_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system):
+        super(DMAController, self).__init__()
+        self.version = self.versionCount()
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.mandatoryQueue = MessageBuffer()
+        self.requestToDir = MessageBuffer()
+        self.requestToDir.out_port = ruby_system.network.in_port
+        self.responseFromDir = MessageBuffer(ordered = True)
+        self.responseFromDir.in_port = ruby_system.network.out_port
+
+
+class MyNetwork(SimpleNetwork):
+    """A simple point-to-point network. This doesn't not use garnet.
+    """
+
+    def __init__(self, ruby_system):
+        super(MyNetwork, self).__init__()
+        self.netifs = []
+        self.ruby_system = ruby_system
+
+    def connectControllers(self, controllers):
+        """Connect all of the controllers to routers and connec the routers
+           together in a point-to-point network.
+        """
+        # Create one router/switch per controller in the system
+        self.routers = [Switch(router_id = i) for i in range(len(controllers))]
+
+        # Make a link from each controller to the router. The link goes
+        # externally to the network.
+        self.ext_links = [SimpleExtLink(link_id=i, ext_node=c,
+                                        int_node=self.routers[i])
+                          for i, c in enumerate(controllers)]
+
+        # Make an "internal" link (internal to the network) between every pair
+        # of routers.
+        link_count = 0
+        self.int_links = []
+        for ri in self.routers:
+            for rj in self.routers:
+                if ri == rj: continue # Don't connect a router to itself!
+                link_count += 1
+                self.int_links.append(SimpleIntLink(link_id = link_count,
+                                                    src_node = ri,
+                                                    dst_node = rj))
diff --git a/src/parsec/configs/system/MOESI_CMP_directory.py b/src/parsec/configs/system/MOESI_CMP_directory.py
new file mode 100644
index 0000000..f24022a
--- /dev/null
+++ b/src/parsec/configs/system/MOESI_CMP_directory.py
@@ -0,0 +1,351 @@
+#Copyright (c) 2020 The Regents of the University of California.
+#All Rights Reserved
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+
+""" This file creates a set of Ruby caches for the MOESI CMP directory
+protocol.
+This protocol models two level cache hierarchy. The L1 cache is split into
+instruction and data cache.
+
+This system support the memory size of up to 3GB.
+
+"""
+
+from __future__ import print_function
+from __future__ import absolute_import
+
+import math
+
+from m5.defines import buildEnv
+from m5.util import fatal, panic
+
+from m5.objects import *
+
+class MOESICMPDirCache(RubySystem):
+
+    def __init__(self):
+        if buildEnv['PROTOCOL'] != 'MOESI_CMP_directory':
+            fatal("This system assumes MOESI_CMP_directory!")
+
+        super(MOESICMPDirCache, self).__init__()
+
+        self._numL2Caches = 8
+
+    def setup(self, system, cpus, mem_ctrls, dma_ports, iobus):
+        """Set up the Ruby cache subsystem. Note: This can't be done in the
+           constructor because many of these items require a pointer to the
+           ruby system (self). This causes infinite recursion in initialize()
+           if we do this in the __init__.
+        """
+        # Ruby's global network.
+        self.network = MyNetwork(self)
+
+        # MOESI_CMP_directory example uses 3 virtual networks
+        self.number_of_virtual_networks = 3
+        self.network.number_of_virtual_networks = 3
+
+        # There is a single global list of all of the controllers to make it
+        # easier to connect everything to the global network. This can be
+        # customized depending on the topology/network requirements.
+        # L1 caches are private to a core, hence there are one L1 cache per CPU
+        # core. The number of L2 caches are dependent to the architecture.
+        self.controllers = \
+            [L1Cache(system, self, cpu, self._numL2Caches) for cpu in cpus] + \
+            [L2Cache(system, self, self._numL2Caches) for num in \
+            range(self._numL2Caches)] + [DirController(self, \
+            system.mem_ranges, mem_ctrls)] + [DMAController(self) for i \
+            in range(len(dma_ports))]
+
+        # Create one sequencer per CPU and dma controller.
+        # Sequencers for other controllers can be here here.
+        self.sequencers = [RubySequencer(version = i,
+                                # I/D cache is combined and grab from ctrl
+                                icache = self.controllers[i].L1Icache,
+                                dcache = self.controllers[i].L1Dcache,
+                                clk_domain = self.controllers[i].clk_domain,
+                                pio_request_port = iobus.cpu_side_ports,
+                                mem_request_port = iobus.cpu_side_ports,
+                                pio_response_port = iobus.mem_side_ports
+                                ) for i in range(len(cpus))] + \
+                          [DMASequencer(version = i,
+                                        in_ports = port)
+                            for i,port in enumerate(dma_ports)
+                          ]
+
+        for i,c in enumerate(self.controllers[:len(cpus)]):
+            c.sequencer = self.sequencers[i]
+
+        #Connecting the DMA sequencer to DMA controller
+        for i,d in enumerate(self.controllers[-len(dma_ports):]):
+            i += len(cpus)
+            d.dma_sequencer = self.sequencers[i]
+
+        self.num_of_sequencers = len(self.sequencers)
+
+        # Create the network and connect the controllers.
+        # NOTE: This is quite different if using Garnet!
+        self.network.connectControllers(self.controllers)
+        self.network.setup_buffers()
+
+        # Set up a proxy port for the system_port. Used for load binaries and
+        # other functional-only things.
+        self.sys_port_proxy = RubyPortProxy()
+        system.system_port = self.sys_port_proxy.in_ports
+        self.sys_port_proxy.pio_request_port = iobus.cpu_side_ports
+
+        # Connect the cpu's cache, interrupt, and TLB ports to Ruby
+        for i,cpu in enumerate(cpus):
+            cpu.icache_port = self.sequencers[i].in_ports
+            cpu.dcache_port = self.sequencers[i].in_ports
+            isa = buildEnv['TARGET_ISA']
+            if isa == 'x86':
+                cpu.interrupts[0].pio = self.sequencers[i].interrupt_out_port
+                cpu.interrupts[0].int_requestor = self.sequencers[i].in_ports
+                cpu.interrupts[0].int_responder = self.sequencers[i].interrupt_out_port
+            if isa == 'x86' or isa == 'arm':
+                cpu.itb.walker.port = self.sequencers[i].in_ports
+                cpu.dtb.walker.port = self.sequencers[i].in_ports
+
+
+class L1Cache(L1Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, cpu, num_l2Caches):
+        """Creating L1 cache controller. Consist of both instruction
+           and data cache. The size of data cache is 512KB and
+           8-way set associative. The instruction cache is 32KB,
+           2-way set associative.
+        """
+        super(L1Cache, self).__init__()
+
+        self.version = self.versionCount()
+        block_size_bits = int(math.log(system.cache_line_size, 2))
+        l1i_size = '32kB'
+        l1i_assoc = '2'
+        l1d_size = '512kB'
+        l1d_assoc = '8'
+        # This is the cache memory object that stores the cache data and tags
+        self.L1Icache = RubyCache(size = l1i_size,
+                                assoc = l1i_assoc,
+                                start_index_bit = block_size_bits ,
+                                is_icache = True,
+                                dataAccessLatency = 1,
+                                tagAccessLatency = 1)
+        self.L1Dcache = RubyCache(size = l1d_size,
+                            assoc = l1d_assoc,
+                            start_index_bit = block_size_bits,
+                            is_icache = False,
+                            dataAccessLatency = 1,
+                            tagAccessLatency = 1)
+        self.clk_domain = cpu.clk_domain
+        self.prefetcher = RubyPrefetcher()
+        self.send_evictions = self.sendEvicts(cpu)
+        self.transitions_per_cycle = 4
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getBlockSizeBits(self, system):
+        bits = int(math.log(system.cache_line_size, 2))
+        if 2**bits != system.cache_line_size.value:
+            panic("Cache line size not a power of 2!")
+        return bits
+
+    def sendEvicts(self, cpu):
+        """True if the CPU model or ISA requires sending evictions from caches
+           to the CPU. Two scenarios warrant forwarding evictions to the CPU:
+           1. The O3 model must keep the LSQ coherent with the caches
+           2. The x86 mwait instruction is built on top of coherence
+           3. The local exclusive monitor in ARM systems
+        """
+        if type(cpu) is DerivO3CPU or \
+           buildEnv['TARGET_ISA'] in ('x86', 'arm'):
+            return True
+        return False
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.mandatoryQueue = MessageBuffer()
+        self.requestFromL1Cache = MessageBuffer()
+        self.requestFromL1Cache.out_port = ruby_system.network.in_port
+        self.responseFromL1Cache = MessageBuffer()
+        self.responseFromL1Cache.out_port = ruby_system.network.in_port
+        self.requestToL1Cache = MessageBuffer()
+        self.requestToL1Cache.in_port = ruby_system.network.out_port
+        self.responseToL1Cache = MessageBuffer()
+        self.responseToL1Cache.in_port = ruby_system.network.out_port
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+class L2Cache(L2Cache_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, system, ruby_system, num_l2Caches):
+
+        super(L2Cache, self).__init__()
+
+        self.version = self.versionCount()
+        # This is the cache memory object that stores the cache data and tags
+        self.L2cache = RubyCache(size = '1 MB',
+                                assoc = 16,
+                                start_index_bit = self.getL2StartIdx(system,
+                                num_l2Caches),
+                                dataAccessLatency = 20,
+                                tagAccessLatency = 20)
+
+        self.transitions_per_cycle = '4'
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def getL2StartIdx(self, system, num_l2caches):
+        l2_bits = int(math.log(num_l2caches, 2))
+        bits = int(math.log(system.cache_line_size, 2)) + l2_bits
+        return bits
+
+
+    def connectQueues(self, ruby_system):
+        """Connect all of the queues for this controller.
+        """
+        self.GlobalRequestFromL2Cache = MessageBuffer()
+        self.GlobalRequestFromL2Cache.out_port = ruby_system.network.in_port
+        self.L1RequestFromL2Cache = MessageBuffer()
+        self.L1RequestFromL2Cache.out_port = ruby_system.network.in_port
+        self.responseFromL2Cache = MessageBuffer()
+        self.responseFromL2Cache.out_port = ruby_system.network.in_port
+
+        self.GlobalRequestToL2Cache = MessageBuffer()
+        self.GlobalRequestToL2Cache.in_port = ruby_system.network.out_port
+        self.L1RequestToL2Cache = MessageBuffer()
+        self.L1RequestToL2Cache.in_port = ruby_system.network.out_port
+        self.responseToL2Cache = MessageBuffer()
+        self.responseToL2Cache.in_port = ruby_system.network.out_port
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+
+
+class DirController(Directory_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system, ranges, mem_ctrls):
+        """ranges are the memory ranges assigned to this controller.
+        """
+        if len(mem_ctrls) > 1:
+            panic("This cache system can only be connected to one mem ctrl")
+        super(DirController, self).__init__()
+        self.version = self.versionCount()
+        self.addr_ranges = ranges
+        self.ruby_system = ruby_system
+        self.directory = RubyDirectoryMemory()
+        # Connect this directory to the memory side.
+        self.memory_out_port = mem_ctrls[0].port
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.requestToDir = MessageBuffer()
+        self.requestToDir.in_port = ruby_system.network.out_port
+        self.responseToDir = MessageBuffer()
+        self.responseToDir.in_port = ruby_system.network.out_port
+        self.responseFromDir = MessageBuffer()
+        self.responseFromDir.out_port = ruby_system.network.in_port
+        self.forwardFromDir = MessageBuffer()
+        self.forwardFromDir.out_port = ruby_system.network.in_port
+        self.requestToMemory = MessageBuffer()
+        self.responseFromMemory = MessageBuffer()
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+class DMAController(DMA_Controller):
+
+    _version = 0
+    @classmethod
+    def versionCount(cls):
+        cls._version += 1 # Use count for this particular type
+        return cls._version - 1
+
+    def __init__(self, ruby_system):
+        super(DMAController, self).__init__()
+        self.version = self.versionCount()
+        self.ruby_system = ruby_system
+        self.connectQueues(ruby_system)
+
+    def connectQueues(self, ruby_system):
+        self.mandatoryQueue = MessageBuffer()
+        self.responseFromDir = MessageBuffer()
+        self.responseFromDir.in_port = ruby_system.network.out_port
+        self.reqToDir = MessageBuffer()
+        self.reqToDir.out_port = ruby_system.network.in_port
+        self.respToDir = MessageBuffer()
+        self.respToDir.out_port = ruby_system.network.in_port
+        self.triggerQueue = MessageBuffer(ordered = True)
+
+
+class MyNetwork(SimpleNetwork):
+    """A simple point-to-point network. This doesn't not use garnet.
+    """
+
+    def __init__(self, ruby_system):
+        super(MyNetwork, self).__init__()
+        self.netifs = []
+        self.ruby_system = ruby_system
+
+    def connectControllers(self, controllers):
+        """Connect all of the controllers to routers and connec the routers
+           together in a point-to-point network.
+        """
+        # Create one router/switch per controller in the system
+        self.routers = [Switch(router_id = i) for i in range(len(controllers))]
+
+        # Make a link from each controller to the router. The link goes
+        # externally to the network.
+        self.ext_links = [SimpleExtLink(link_id=i, ext_node=c,
+                                        int_node=self.routers[i])
+                          for i, c in enumerate(controllers)]
+
+        # Make an "internal" link (internal to the network) between every pair
+        # of routers.
+        link_count = 0
+        self.int_links = []
+        for ri in self.routers:
+            for rj in self.routers:
+                if ri == rj: continue # Don't connect a router to itself!
+                link_count += 1
+                self.int_links.append(SimpleIntLink(link_id = link_count,
+                                                    src_node = ri,
+                                                    dst_node = rj))
diff --git a/src/parsec/configs/system/__init__.py b/src/parsec/configs/system/__init__.py
old mode 100755
new mode 100644
index 994e0e4..5b02b9a
--- a/src/parsec/configs/system/__init__.py
+++ b/src/parsec/configs/system/__init__.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2016 Jason Lowe-Power
+# Copyright (c) 2021 The Regents of the University of California
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -24,6 +23,7 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
 
-from .system import MySystem
\ No newline at end of file
+from .system import MySystem
+from .ruby_system import MyRubySystem
+
diff --git a/src/parsec/configs/system/caches.py b/src/parsec/configs/system/caches.py
old mode 100755
new mode 100644
index 15c9b14..497c7e4
--- a/src/parsec/configs/system/caches.py
+++ b/src/parsec/configs/system/caches.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2016 Jason Lowe-Power
+# Copyright (c) 2021 The Regents of the University of California
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -24,7 +23,6 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
 
 """ Caches with options for a simple gem5 configuration script
 
@@ -38,21 +36,13 @@
 from m5.params import AddrRange, AllMemory, MemorySize
 from m5.util.convert import toMemorySize
 
-import SimpleOpts
-
 # Some specific options for caches
 # For all options see src/mem/cache/BaseCache.py
 
 class PrefetchCache(Cache):
 
-    SimpleOpts.add_option("--no_prefetchers", default=False,
-                          action="store_true",
-                          help="Enable prefectchers on the caches")
-
-    def __init__(self, options):
+    def __init__(self):
         super(PrefetchCache, self).__init__()
-        if not options or options.no_prefetchers:
-            return
         self.prefetcher = StridePrefetcher()
 
 class L1Cache(PrefetchCache):
@@ -66,9 +56,8 @@
     tgts_per_mshr = 20
     writeback_clean = True
 
-    def __init__(self, options=None):
-        super(L1Cache, self).__init__(options)
-        pass
+    def __init__(self):
+        super(L1Cache, self).__init__()
 
     def connectBus(self, bus):
         """Connect this cache to a memory-side bus"""
@@ -85,14 +74,8 @@
     # Set the default size
     size = '32kB'
 
-    SimpleOpts.add_option('--l1i_size',
-                        help="L1 instruction cache size. Default: %s" % size)
-
-    def __init__(self, opts=None):
-        super(L1ICache, self).__init__(opts)
-        if not opts or not opts.l1i_size:
-            return
-        self.size = opts.l1i_size
+    def __init__(self):
+        super(L1ICache, self).__init__()
 
     def connectCPU(self, cpu):
         """Connect this cache's port to a CPU icache port"""
@@ -104,14 +87,8 @@
     # Set the default size
     size = '32kB'
 
-    SimpleOpts.add_option('--l1d_size',
-                          help="L1 data cache size. Default: %s" % size)
-
-    def __init__(self, opts=None):
-        super(L1DCache, self).__init__(opts)
-        if not opts or not opts.l1d_size:
-            return
-        self.size = opts.l1d_size
+    def __init__(self):
+        super(L1DCache, self).__init__()
 
     def connectCPU(self, cpu):
         """Connect this cache's port to a CPU dcache port"""
@@ -157,42 +134,8 @@
     tgts_per_mshr = 12
     writeback_clean = True
 
-    SimpleOpts.add_option('--l2_size',
-                          help="L2 cache size. Default: %s" % size)
-
-    def __init__(self, opts=None):
-        super(L2Cache, self).__init__(opts)
-        if not opts or not opts.l2_size:
-            return
-        self.size = opts.l2_size
-
-    def connectCPUSideBus(self, bus):
-        self.cpu_side = bus.mem_side_ports
-
-    def connectMemSideBus(self, bus):
-        self.mem_side = bus.cpu_side_ports
-
-class L3Cache(Cache):
-    """Simple L3 Cache bank with default values
-       This assumes that the L3 is made up of multiple banks. This cannot
-       be used as a standalone L3 cache.
-    """
-
-    SimpleOpts.add_option('--l3_size', default = '4MB',
-                          help="L3 cache size. Default: 4MB")
-
-    # Default parameters
-    assoc = 32
-    tag_latency = 40
-    data_latency = 40
-    response_latency = 10
-    mshrs = 256
-    tgts_per_mshr = 12
-    clusivity = 'mostly_excl'
-
-    def __init__(self, opts):
-        super(L3Cache, self).__init__()
-        self.size = (opts.l3_size)
+    def __init__(self):
+        super(L2Cache, self).__init__()
 
     def connectCPUSideBus(self, bus):
         self.cpu_side = bus.mem_side_ports
diff --git a/src/parsec/configs/system/fs_tools.py b/src/parsec/configs/system/fs_tools.py
old mode 100755
new mode 100644
index 91f6646..9e49ce7
--- a/src/parsec/configs/system/fs_tools.py
+++ b/src/parsec/configs/system/fs_tools.py
@@ -1,5 +1,4 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2016 Jason Lowe-Power
+# Copyright (c) 2021 The Regents of the University of California
 # All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
@@ -24,7 +23,6 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
 
 from m5.objects import IdeDisk, CowDiskImage, RawDiskImage
 
diff --git a/src/parsec/configs/system/ruby_system.py b/src/parsec/configs/system/ruby_system.py
new file mode 100644
index 0000000..30eebd4
--- /dev/null
+++ b/src/parsec/configs/system/ruby_system.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2021 The Regents of the University of California
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import m5
+from m5.objects import *
+from m5.util import convert
+from .fs_tools import *
+
+
+class MyRubySystem(System):
+
+    def __init__(self, kernel, disk, cpu_type, mem_sys, num_cpus):
+        super(MyRubySystem, self).__init__()
+
+        self._host_parallel = cpu_type == "kvm"
+
+        # Set up the clock domain and the voltage domain
+        self.clk_domain = SrcClockDomain()
+        self.clk_domain.clock = '3GHz'
+        self.clk_domain.voltage_domain = VoltageDomain()
+
+        self.mem_ranges = [AddrRange(Addr('3GB')), # All data
+                           AddrRange(0xC0000000, size=0x100000), # For I/0
+                           ]
+
+        self.initFS(num_cpus)
+
+        # Replace these paths with the path to your disk images.
+        # The first disk is the root disk. The second could be used for swap
+        # or anything else.
+        self.setDiskImages(disk, disk)
+
+        # Change this path to point to the kernel you want to use
+        self.workload.object_file = kernel
+        # Options specified on the kernel command line
+        boot_options = ['earlyprintk=ttyS0', 'console=ttyS0', 'lpj=7999923',
+                         'root=/dev/hda1']
+
+        self.workload.command_line = ' '.join(boot_options)
+
+        # Create the CPUs for our system.
+        self.createCPU(cpu_type, num_cpus)
+
+        self.createMemoryControllersDDR3()
+
+        # Create the cache hierarchy for the system.
+        if mem_sys == 'MI_example':
+            from .MI_example_caches import MIExampleSystem
+            self.caches = MIExampleSystem()
+        elif mem_sys == 'MESI_Two_Level':
+            from .MESI_Two_Level import MESITwoLevelCache
+            self.caches = MESITwoLevelCache()
+        elif mem_sys == 'MOESI_CMP_directory':
+            from .MOESI_CMP_directory import MOESICMPDirCache
+            self.caches = MOESICMPDirCache()
+        self.caches.setup(self, self.cpu, self.mem_cntrls,
+                          [self.pc.south_bridge.ide.dma, self.iobus.mem_side_ports],
+                          self.iobus)
+
+        if self._host_parallel:
+            # To get the KVM CPUs to run on different host CPUs
+            # Specify a different event queue for each CPU
+            for i,cpu in enumerate(self.cpu):
+                for obj in cpu.descendants():
+                    obj.eventq_index = 0
+                cpu.eventq_index = i + 1
+
+    def getHostParallel(self):
+        return self._host_parallel
+
+    def totalInsts(self):
+        return sum([cpu.totalInsts() for cpu in self.cpu])
+
+    def createCPU(self, cpu_type, num_cpus):
+        if cpu_type == "atomic":
+            self.cpu = [AtomicSimpleCPU(cpu_id = i)
+                              for i in range(num_cpus)]
+            self.mem_mode = 'atomic'
+        elif cpu_type == "kvm":
+            # Note KVM needs a VM and atomic_noncaching
+            self.cpu = [X86KvmCPU(cpu_id = i)
+                        for i in range(num_cpus)]
+            self.kvm_vm = KvmVM()
+            self.mem_mode = 'atomic_noncaching'
+        elif cpu_type == "o3":
+            self.cpu = [DerivO3CPU(cpu_id = i)
+                        for i in range(num_cpus)]
+            self.mem_mode = 'timing'
+        elif cpu_type == "simple":
+            self.cpu = [TimingSimpleCPU(cpu_id = i)
+                        for i in range(num_cpus)]
+            self.mem_mode = 'timing'
+        else:
+            m5.fatal("No CPU type {}".format(cpu_type))
+
+        for cpu in self.cpu:
+            cpu.createThreads()
+            cpu.createInterruptController()
+
+    def setDiskImages(self, img_path_1, img_path_2):
+        disk0 = CowDisk(img_path_1)
+        disk2 = CowDisk(img_path_2)
+        self.pc.south_bridge.ide.disks = [disk0, disk2]
+
+    def createMemoryControllersDDR3(self):
+        self._createMemoryControllers(1, DDR3_1600_8x8)
+
+    def _createMemoryControllers(self, num, cls):
+        self.mem_cntrls = [
+            MemCtrl(dram = cls(range = self.mem_ranges[0]))
+            for i in range(num)
+        ]
+
+    def initFS(self, cpus):
+        self.pc = Pc()
+
+        self.workload = X86FsLinux()
+
+        # North Bridge
+        self.iobus = IOXBar()
+
+        # connect the io bus
+        # Note: pass in a reference to where Ruby will connect to in the future
+        # so the port isn't connected twice.
+        self.pc.attachIO(self.iobus, [self.pc.south_bridge.ide.dma])
+
+        self.intrctrl = IntrControl()
+
+        ###############################################
+
+        # Add in a Bios information structure.
+        self.workload.smbios_table.structures = [X86SMBiosBiosInformation()]
+
+        # Set up the Intel MP table
+        base_entries = []
+        ext_entries = []
+        for i in range(cpus):
+            bp = X86IntelMPProcessor(
+                    local_apic_id = i,
+                    local_apic_version = 0x14,
+                    enable = True,
+                    bootstrap = (i ==0))
+            base_entries.append(bp)
+        io_apic = X86IntelMPIOAPIC(
+                id = cpus,
+                version = 0x11,
+                enable = True,
+                address = 0xfec00000)
+        self.pc.south_bridge.io_apic.apic_id = io_apic.id
+        base_entries.append(io_apic)
+        pci_bus = X86IntelMPBus(bus_id = 0, bus_type='PCI   ')
+        base_entries.append(pci_bus)
+        isa_bus = X86IntelMPBus(bus_id = 1, bus_type='ISA   ')
+        base_entries.append(isa_bus)
+        connect_busses = X86IntelMPBusHierarchy(bus_id=1,
+                subtractive_decode=True, parent_bus=0)
+        ext_entries.append(connect_busses)
+        pci_dev4_inta = X86IntelMPIOIntAssignment(
+                interrupt_type = 'INT',
+                polarity = 'ConformPolarity',
+                trigger = 'ConformTrigger',
+                source_bus_id = 0,
+                source_bus_irq = 0 + (4 << 2),
+                dest_io_apic_id = io_apic.id,
+                dest_io_apic_intin = 16)
+        base_entries.append(pci_dev4_inta)
+        def assignISAInt(irq, apicPin):
+            assign_8259_to_apic = X86IntelMPIOIntAssignment(
+                    interrupt_type = 'ExtInt',
+                    polarity = 'ConformPolarity',
+                    trigger = 'ConformTrigger',
+                    source_bus_id = 1,
+                    source_bus_irq = irq,
+                    dest_io_apic_id = io_apic.id,
+                    dest_io_apic_intin = 0)
+            base_entries.append(assign_8259_to_apic)
+            assign_to_apic = X86IntelMPIOIntAssignment(
+                    interrupt_type = 'INT',
+                    polarity = 'ConformPolarity',
+                    trigger = 'ConformTrigger',
+                    source_bus_id = 1,
+                    source_bus_irq = irq,
+                    dest_io_apic_id = io_apic.id,
+                    dest_io_apic_intin = apicPin)
+            base_entries.append(assign_to_apic)
+        assignISAInt(0, 2)
+        assignISAInt(1, 1)
+        for i in range(3, 15):
+            assignISAInt(i, i)
+        self.workload.intel_mp_table.base_entries = base_entries
+        self.workload.intel_mp_table.ext_entries = ext_entries
+
+        entries = \
+           [
+            # Mark the first megabyte of memory as reserved
+            X86E820Entry(addr = 0, size = '639kB', range_type = 1),
+            X86E820Entry(addr = 0x9fc00, size = '385kB', range_type = 2),
+            # Mark the rest of physical memory as available
+            X86E820Entry(addr = 0x100000,
+                    size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
+                    range_type = 1),
+            ]
+
+        # Reserve the last 16kB of the 32-bit address space for m5ops
+        entries.append(X86E820Entry(addr = 0xFFFF0000, size = '64kB',
+                                    range_type=2))
+
+        self.workload.e820_table.entries = entries
diff --git a/src/parsec/configs/system/system.py b/src/parsec/configs/system/system.py
index 5becf46..fe4d198 100644
--- a/src/parsec/configs/system/system.py
+++ b/src/parsec/configs/system/system.py
@@ -1,6 +1,5 @@
-# -*- coding: utf-8 -*-
-# Copyright (c) 2018 The Regents of the University of California
-# All Rights Reserved.
+# Copyright (c) 2021 The Regents of the University of California
+# All rights reserved.
 #
 # Redistribution and use in source and binary forms, with or without
 # modification, are permitted provided that the following conditions are
@@ -24,8 +23,6 @@
 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-# Authors: Jason Lowe-Power
 
 import m5
 from m5.objects import *
@@ -33,32 +30,21 @@
 from .fs_tools import *
 from .caches import *
 
-
 class MySystem(System):
 
-    SimpleOpts.add_option("--no_host_parallel", default=False,
-                action="store_true",
-                help="Do NOT run gem5 on multiple host threads (kvm only)")
-
-    SimpleOpts.add_option("--second_disk", default='',
-                          help="The second disk image to mount (/dev/hdb)")
-
-    def __init__(self, kernel, disk, num_cpus, opts, no_kvm=False):
+    def __init__(self, kernel, disk, cpu_type, num_cpus, no_kvm = False):
         super(MySystem, self).__init__()
-        self._opts = opts
-        self._no_kvm = no_kvm
 
-        self._host_parallel = not self._opts.no_host_parallel
+        self._no_kvm = no_kvm
+        self._host_parallel = cpu_type == "kvm"
 
         # Set up the clock domain and the voltage domain
         self.clk_domain = SrcClockDomain()
-        self.clk_domain.clock = '2.3GHz'
+        self.clk_domain.clock = '3GHz'
         self.clk_domain.voltage_domain = VoltageDomain()
 
-        mem_size = '16GB'
-        self.mem_ranges = [AddrRange('100MB'), # For kernel
+        self.mem_ranges = [AddrRange(Addr('3GB')), # All data
                            AddrRange(0xC0000000, size=0x100000), # For I/0
-                           AddrRange(Addr('4GB'), size = mem_size) # All data
                            ]
 
         # Create the main memory bus
@@ -72,18 +58,11 @@
 
         self.initFS(self.membus, num_cpus)
 
-
         # Replace these paths with the path to your disk images.
         # The first disk is the root disk. The second could be used for swap
         # or anything else.
-
         self.setDiskImages(disk, disk)
 
-        if opts.second_disk:
-            self.setDiskImages(disk, opts.second_disk)
-        else:
-            self.setDiskImages(disk, disk)
-
         # Change this path to point to the kernel you want to use
         self.workload.object_file = kernel
         # Options specified on the kernel command line
@@ -93,7 +72,7 @@
         self.workload.command_line = ' '.join(boot_options)
 
         # Create the CPUs for our system.
-        self.createCPU(num_cpus)
+        self.createCPU(cpu_type, num_cpus)
 
         # Create the cache heirarchy for the system.
         self.createCacheHierarchy()
@@ -101,7 +80,7 @@
         # Set up the interrupt controllers for the system (x86 specific)
         self.setupInterrupts()
 
-        self.createMemoryControllersDDR4()
+        self.createMemoryControllersDDR3()
 
         if self._host_parallel:
             # To get the KVM CPUs to run on different host CPUs
@@ -109,49 +88,50 @@
             for i,cpu in enumerate(self.cpu):
                 for obj in cpu.descendants():
                     obj.eventq_index = 0
+                cpu.eventq_index = i + 1
 
-                # the number of eventqs are set based
-                # on experiments with few benchmarks
-
-                if len(self.cpu) > 16:
-                    cpu.eventq_index = (i/4) + 1
-                else:
-                    cpu.eventq_index = (i/2) + 1
     def getHostParallel(self):
         return self._host_parallel
 
     def totalInsts(self):
         return sum([cpu.totalInsts() for cpu in self.cpu])
 
-    def createCPUThreads(self, cpu):
-        for c in cpu:
-            c.createThreads()
-
-    def createCPU(self, num_cpus):
+    def createCPU(self, cpu_type, num_cpus):
+        # set up a kvm core or an atomic core to boot
         if self._no_kvm:
             self.cpu = [AtomicSimpleCPU(cpu_id = i, switched_out = False)
                               for i in range(num_cpus)]
-            self.createCPUThreads(self.cpu)
-            self.mem_mode = 'timing'
-
+            self.mem_mode = 'atomic'
         else:
             # Note KVM needs a VM and atomic_noncaching
-            self.cpu = [X86KvmCPU(cpu_id = i)
+            self.cpu = [X86KvmCPU(cpu_id = i, switched_out = False)
                         for i in range(num_cpus)]
-            self.createCPUThreads(self.cpu)
             self.kvm_vm = KvmVM()
             self.mem_mode = 'atomic_noncaching'
 
-            self.atomicCpu = [AtomicSimpleCPU(cpu_id = i,
-                                    switched_out = True)
-                                for i in range(num_cpus)]
-            self.createCPUThreads(self.atomicCpu)
+        for cpu in self.cpu:
+            cpu.createThreads()
 
-        self.timingCpu = [TimingSimpleCPU(cpu_id = i,
-                                    switched_out = True)
-				                for i in range(num_cpus)]
+        # set up the detailed cpu or a kvm model with more cores
+        if cpu_type == "atomic":
+            self.detailedCpu = [AtomicSimpleCPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+        elif cpu_type == "kvm":
+            # Note KVM needs a VM and atomic_noncaching
+            self.detailedCpu = [X86KvmCPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+            self.kvm_vm = KvmVM()
+        elif cpu_type == "o3":
+            self.detailedCpu = [DerivO3CPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+        elif cpu_type == "simple" or cpu_type == "timing":
+            self.detailedCpu = [TimingSimpleCPU(cpu_id = i, switched_out = True)
+                                 for i in range(num_cpus)]
+        else:
+            m5.fatal("No CPU type {}".format(cpu_type))
 
-        self.createCPUThreads(self.timingCpu)
+        for cpu in self.detailedCpu:
+            cpu.createThreads()
 
     def switchCpus(self, old, new):
         assert(new[0].switchedOut())
@@ -163,17 +143,13 @@
         self.pc.south_bridge.ide.disks = [disk0, disk2]
 
     def createCacheHierarchy(self):
-        # Create an L3 cache (with crossbar)
-        self.l3bus = L2XBar(width = 64,
-                            snoop_filter = SnoopFilter(max_capacity='32MB'))
-
         for cpu in self.cpu:
             # Create a memory bus, a coherent crossbar, in this case
             cpu.l2bus = L2XBar()
 
             # Create an L1 instruction and data cache
-            cpu.icache = L1ICache(self._opts)
-            cpu.dcache = L1DCache(self._opts)
+            cpu.icache = L1ICache()
+            cpu.dcache = L1DCache()
             cpu.mmucache = MMUCache()
 
             # Connect the instruction and data caches to the CPU
@@ -187,17 +163,11 @@
             cpu.mmucache.connectBus(cpu.l2bus)
 
             # Create an L2 cache and connect it to the l2bus
-            cpu.l2cache = L2Cache(self._opts)
+            cpu.l2cache = L2Cache()
             cpu.l2cache.connectCPUSideBus(cpu.l2bus)
 
             # Connect the L2 cache to the L3 bus
-            cpu.l2cache.connectMemSideBus(self.l3bus)
-
-        self.l3cache = L3Cache(self._opts)
-        self.l3cache.connectCPUSideBus(self.l3bus)
-
-        # Connect the L3 cache to the membus
-        self.l3cache.connectMemSideBus(self.membus)
+            cpu.l2cache.connectMemSideBus(self.membus)
 
     def setupInterrupts(self):
         for cpu in self.cpu:
@@ -211,52 +181,20 @@
             cpu.interrupts[0].int_requestor = self.membus.cpu_side_ports
             cpu.interrupts[0].int_responder = self.membus.mem_side_ports
 
-    # Memory latency: Using the smaller number from [3]: 96ns
-    def createMemoryControllersDDR4(self):
-        self._createMemoryControllers(8, DDR4_2400_16x4)
+
+    def createMemoryControllersDDR3(self):
+        self._createMemoryControllers(1, DDR3_1600_8x8)
 
     def _createMemoryControllers(self, num, cls):
-        kernel_controller = self._createKernelMemoryController(cls)
-        ranges = self._getInterleaveRanges(self.mem_ranges[-1], num, 7, 20)
-        mem_ctrls = []
-        for i in range(num):
-          interface = cls()
-          interface.range = ranges[i]
-          ctrl = MemCtrl()
-          ctrl.dram = interface
-          ctrl.port = self.membus.mem_side_ports
-          mem_ctrls.append(ctrl)
-        self.mem_cntrls = mem_ctrls + [kernel_controller]
-
-    def _createKernelMemoryController(self, cls):
-        interface = cls()
-        interface.range = self.mem_ranges[0]
-        ctrl = MemCtrl()
-        ctrl.dram = interface
-        ctrl.port = self.membus.mem_side_ports
-        return ctrl
-
-    def _getInterleaveRanges(self, rng, num, intlv_low_bit, xor_low_bit):
-        from math import log
-        bits = int(log(num, 2))
-        if 2**bits != num:
-            m5.fatal("Non-power of two number of memory controllers")
-
-        intlv_bits = bits
-        ranges = [
-            AddrRange(start=rng.start,
-                      end=rng.end,
-                      intlvHighBit = intlv_low_bit + intlv_bits - 1,
-                      xorHighBit = xor_low_bit + intlv_bits - 1,
-                      intlvBits = intlv_bits,
-                      intlvMatch = i)
-                for i in range(num)
-            ]
-
-        return ranges
+        self.mem_cntrls = [
+            MemCtrl(dram = cls(range = self.mem_ranges[0]),
+                    port = self.membus.mem_side_ports)
+            for i in range(num)
+        ]
 
     def initFS(self, membus, cpus):
         self.pc = Pc()
+
         self.workload = X86FsLinux()
 
         # Constants similar to x86_traits.hh
@@ -388,20 +326,9 @@
                     size = '%dB' % (self.mem_ranges[0].size() - 0x100000),
                     range_type = 1),
             ]
-        # Mark [mem_size, 3GB) as reserved if memory less than 3GB, which
-        # force IO devices to be mapped to [0xC0000000, 0xFFFF0000). Requests
-        # to this specific range can pass though bridge to iobus.
-        entries.append(X86E820Entry(addr = self.mem_ranges[0].size(),
-            size='%dB' % (0xC0000000 - self.mem_ranges[0].size()),
-            range_type=2))
 
         # Reserve the last 16kB of the 32-bit address space for m5ops
         entries.append(X86E820Entry(addr = 0xFFFF0000, size = '64kB',
                                     range_type=2))
 
-        # Add the rest of memory. This is where all the actual data is
-        entries.append(X86E820Entry(addr = self.mem_ranges[-1].start,
-            size='%dB' % (self.mem_ranges[-1].size()),
-            range_type=1))
-
         self.workload.e820_table.entries = entries