mem: Clean up Memory Controller

Make the actual controller more generic
    - Rename DRAMCtrl to MemCtrl
    - Rename DRAMacket to MemPacket
    - Rename dram_ctrl.cc to mem_ctrl.cc
    - Rename dram_ctrl.hh to mem_ctrl.hh
    - Create MemCtrl debug flag

Move the memory interface classes/functions to separate files
    - mem_interface.cc
    - mem_interface.hh

Change-Id: I1acba44c855776343e205e7733a7d8bbba92a82c
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/31654
Reviewed-by: Jason Lowe-Power <power.jg@gmail.com>
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Tested-by: kokoro <noreply+kokoro@google.com>
diff --git a/configs/common/MemConfig.py b/configs/common/MemConfig.py
index 7aa6761..941b381 100644
--- a/configs/common/MemConfig.py
+++ b/configs/common/MemConfig.py
@@ -224,11 +224,11 @@
                 if opt_mem_type == "HMC_2500_1x32":
                     # The static latency of the vault controllers is estimated
                     # to be smaller than a full DRAM channel controller
-                    mem_ctrl = m5.objects.DRAMCtrl(min_writes_per_switch = 8,
+                    mem_ctrl = m5.objects.MemCtrl(min_writes_per_switch = 8,
                                              static_backend_latency = '4ns',
                                              static_frontend_latency = '4ns')
                 else:
-                    mem_ctrl = m5.objects.DRAMCtrl()
+                    mem_ctrl = m5.objects.MemCtrl()
 
                 # Hookup the controller to the interface and add to the list
                 mem_ctrl.dram = dram_intf
@@ -246,7 +246,7 @@
                 # Create a controller if not sharing a channel with DRAM
                 # in which case the controller has already been created
                 if not opt_hybrid_channel:
-                    mem_ctrl = m5.objects.DRAMCtrl()
+                    mem_ctrl = m5.objects.MemCtrl()
                     mem_ctrl.nvm = nvm_intf
 
                     mem_ctrls.append(mem_ctrl)
diff --git a/configs/common/Options.py b/configs/common/Options.py
index 0409fb8..32f8dd9 100644
--- a/configs/common/Options.py
+++ b/configs/common/Options.py
@@ -109,7 +109,7 @@
                       default="512MB",
                       help="Specify the physical memory size (single memory)")
     parser.add_option("--enable-dram-powerdown", action="store_true",
-                       help="Enable low-power states in DRAMCtrl")
+                       help="Enable low-power states in DRAMInterface")
     parser.add_option("--mem-channels-intlv", type="int", default=0,
                       help="Memory channels interleave")
 
diff --git a/configs/dram/lat_mem_rd.py b/configs/dram/lat_mem_rd.py
index 9b04e4b..4183d4a 100644
--- a/configs/dram/lat_mem_rd.py
+++ b/configs/dram/lat_mem_rd.py
@@ -130,7 +130,7 @@
 
     # the following assumes that we are using the native DRAM
     # controller, check to be sure
-    if isinstance(ctrl, m5.objects.DRAMCtrl):
+    if isinstance(ctrl, m5.objects.MemCtrl):
         # make the DRAM refresh interval sufficiently infinite to avoid
         # latency spikes
         ctrl.tREFI = '100s'
diff --git a/configs/dram/low_power_sweep.py b/configs/dram/low_power_sweep.py
index 0da2b93..292b0fa 100644
--- a/configs/dram/low_power_sweep.py
+++ b/configs/dram/low_power_sweep.py
@@ -110,8 +110,8 @@
 MemConfig.config_mem(args, system)
 
 # Sanity check for memory controller class.
-if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
-    fatal("This script assumes the controller is a DRAMCtrl subclass")
+if not isinstance(system.mem_ctrls[0], m5.objects.MemCtrl):
+    fatal("This script assumes the controller is a MemCtrl subclass")
 if not isinstance(system.mem_ctrls[0].dram, m5.objects.DRAMInterface):
     fatal("This script assumes the memory is a DRAMInterface subclass")
 
diff --git a/configs/dram/sweep.py b/configs/dram/sweep.py
index a771c5c..2f38373 100644
--- a/configs/dram/sweep.py
+++ b/configs/dram/sweep.py
@@ -115,8 +115,8 @@
 
 # the following assumes that we are using the native DRAM
 # controller, check to be sure
-if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
-    fatal("This script assumes the controller is a DRAMCtrl subclass")
+if not isinstance(system.mem_ctrls[0], m5.objects.MemCtrl):
+    fatal("This script assumes the controller is a MemCtrl subclass")
 if not isinstance(system.mem_ctrls[0].dram, m5.objects.DRAMInterface):
     fatal("This script assumes the memory is a DRAMInterface subclass")
 
diff --git a/configs/example/memcheck.py b/configs/example/memcheck.py
index 6bccd54..bffd5a0 100644
--- a/configs/example/memcheck.py
+++ b/configs/example/memcheck.py
@@ -217,7 +217,7 @@
 proto_tester = TrafficGen(config_file = cfg_file_path)
 
 # Set up the system along with a DRAM controller
-system = System(physmem = DRAMCtrl(dram = DDR3_1600_8x8()))
+system = System(physmem = MemCtrl(dram = DDR3_1600_8x8()))
 
 system.voltage_domain = VoltageDomain(voltage = '1V')
 
diff --git a/configs/learning_gem5/part1/simple.py b/configs/learning_gem5/part1/simple.py
index cfd15be..22b2cf7 100644
--- a/configs/learning_gem5/part1/simple.py
+++ b/configs/learning_gem5/part1/simple.py
@@ -77,7 +77,7 @@
     system.cpu.interrupts[0].int_slave = system.membus.master
 
 # Create a DDR3 memory controller and connect it to the membus
-system.mem_ctrl = DRAMCtrl()
+system.mem_ctrl = MemCtrl()
 system.mem_ctrl.dram = DDR3_1600_8x8()
 system.mem_ctrl.dram.range = system.mem_ranges[0]
 system.mem_ctrl.port = system.membus.master
diff --git a/configs/learning_gem5/part1/two_level.py b/configs/learning_gem5/part1/two_level.py
index 0dbcfc7..53e1137 100644
--- a/configs/learning_gem5/part1/two_level.py
+++ b/configs/learning_gem5/part1/two_level.py
@@ -132,7 +132,7 @@
 system.system_port = system.membus.slave
 
 # Create a DDR3 memory controller
-system.mem_ctrl = DRAMCtrl()
+system.mem_ctrl = MemCtrl()
 system.mem_ctrl.dram = DDR3_1600_8x8()
 system.mem_ctrl.dram.range = system.mem_ranges[0]
 system.mem_ctrl.port = system.membus.master
diff --git a/configs/learning_gem5/part2/simple_cache.py b/configs/learning_gem5/part2/simple_cache.py
index fbea73d..533aa23 100644
--- a/configs/learning_gem5/part2/simple_cache.py
+++ b/configs/learning_gem5/part2/simple_cache.py
@@ -76,7 +76,7 @@
 system.cpu.interrupts[0].int_slave = system.membus.master
 
 # Create a DDR3 memory controller and connect it to the membus
-system.mem_ctrl = DRAMCtrl()
+system.mem_ctrl = MemCtrl()
 system.mem_ctrl.dram = DDR3_1600_8x8()
 system.mem_ctrl.dram.range = system.mem_ranges[0]
 system.mem_ctrl.port = system.membus.master
diff --git a/configs/learning_gem5/part2/simple_memobj.py b/configs/learning_gem5/part2/simple_memobj.py
index e792eb9..b7d2561 100644
--- a/configs/learning_gem5/part2/simple_memobj.py
+++ b/configs/learning_gem5/part2/simple_memobj.py
@@ -74,7 +74,7 @@
 system.cpu.interrupts[0].int_slave = system.membus.master
 
 # Create a DDR3 memory controller and connect it to the membus
-system.mem_ctrl = DRAMCtrl()
+system.mem_ctrl = MemCtrl()
 system.mem_ctrl.dram = DDR3_1600_8x8()
 system.mem_ctrl.dram.range = system.mem_ranges[0]
 system.mem_ctrl.port = system.membus.master
diff --git a/configs/learning_gem5/part3/simple_ruby.py b/configs/learning_gem5/part3/simple_ruby.py
index 7f70a8c..760a168 100644
--- a/configs/learning_gem5/part3/simple_ruby.py
+++ b/configs/learning_gem5/part3/simple_ruby.py
@@ -68,7 +68,7 @@
 system.cpu = [TimingSimpleCPU() for i in range(2)]
 
 # Create a DDR3 memory controller and connect it to the membus
-system.mem_ctrl = DRAMCtrl()
+system.mem_ctrl = MemCtrl()
 system.mem_ctrl.dram = DDR3_1600_8x8()
 system.mem_ctrl.dram.range = system.mem_ranges[0]
 
diff --git a/configs/nvm/sweep.py b/configs/nvm/sweep.py
index 5bc5819..7e0bd9e 100644
--- a/configs/nvm/sweep.py
+++ b/configs/nvm/sweep.py
@@ -113,8 +113,8 @@
 
 # the following assumes that we are using the native memory
 # controller with an NVM interface, check to be sure
-if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
-    fatal("This script assumes the controller is a DRAMCtrl subclass")
+if not isinstance(system.mem_ctrls[0], m5.objects.MemCtrl):
+    fatal("This script assumes the controller is a MemCtrl subclass")
 if not isinstance(system.mem_ctrls[0].nvm, m5.objects.NVMInterface):
     fatal("This script assumes the memory is a NVMInterface class")
 
diff --git a/configs/nvm/sweep_hybrid.py b/configs/nvm/sweep_hybrid.py
index a2513df..94edfd4 100644
--- a/configs/nvm/sweep_hybrid.py
+++ b/configs/nvm/sweep_hybrid.py
@@ -126,8 +126,8 @@
 
 # the following assumes that we are using the native controller
 # with NVM and DRAM interfaces, check to be sure
-if not isinstance(system.mem_ctrls[0], m5.objects.DRAMCtrl):
-    fatal("This script assumes the controller is a DRAMCtrl subclass")
+if not isinstance(system.mem_ctrls[0], m5.objects.MemCtrl):
+    fatal("This script assumes the controller is a MemCtrl subclass")
 if not isinstance(system.mem_ctrls[0].dram, m5.objects.DRAMInterface):
     fatal("This script assumes the first memory is a DRAMInterface subclass")
 if not isinstance(system.mem_ctrls[0].nvm, m5.objects.NVMInterface):
diff --git a/configs/ruby/Ruby.py b/configs/ruby/Ruby.py
index 4e382af..622771a 100644
--- a/configs/ruby/Ruby.py
+++ b/configs/ruby/Ruby.py
@@ -133,7 +133,7 @@
             dram_intf = MemConfig.create_mem_intf(mem_type, r, index,
                 options.num_dirs, int(math.log(options.num_dirs, 2)),
                 intlv_size, options.xor_low_bit)
-            mem_ctrl = m5.objects.DRAMCtrl(dram = dram_intf)
+            mem_ctrl = m5.objects.MemCtrl(dram = dram_intf)
 
             if options.access_backing_store:
                 mem_ctrl.kvm_map=False
diff --git a/src/mem/DRAMInterface.py b/src/mem/DRAMInterface.py
index aa415fe..85a6092 100644
--- a/src/mem/DRAMInterface.py
+++ b/src/mem/DRAMInterface.py
@@ -47,7 +47,7 @@
 
 class DRAMInterface(MemInterface):
     type = 'DRAMInterface'
-    cxx_header = "mem/dram_ctrl.hh"
+    cxx_header = "mem/mem_interface.hh"
 
     # scheduler page policy
     page_policy = Param.PageManage('open_adaptive', "Page management policy")
diff --git a/src/mem/DRAMCtrl.py b/src/mem/MemCtrl.py
similarity index 94%
rename from src/mem/DRAMCtrl.py
rename to src/mem/MemCtrl.py
index 4ef421b..e0f3424 100644
--- a/src/mem/DRAMCtrl.py
+++ b/src/mem/MemCtrl.py
@@ -46,13 +46,13 @@
 # First-Served and a First-Row Hit then First-Come First-Served
 class MemSched(Enum): vals = ['fcfs', 'frfcfs']
 
-# DRAMCtrl is a single-channel single-ported DRAM controller model
+# MemCtrl is a single-channel single-ported Memory controller model
 # that aims to model the most important system-level performance
-# effects of a DRAM without getting into too much detail of the DRAM
-# itself.
-class DRAMCtrl(QoSMemCtrl):
-    type = 'DRAMCtrl'
-    cxx_header = "mem/dram_ctrl.hh"
+# effects of a memory controller, interfacing with media specific
+# interfaces
+class MemCtrl(QoSMemCtrl):
+    type = 'MemCtrl'
+    cxx_header = "mem/mem_ctrl.hh"
 
     # single-ported on the system interface side, instantiate with a
     # bus in front of the controller for multiple ports
diff --git a/src/mem/MemInterface.py b/src/mem/MemInterface.py
index 3a8b917..85fe0a0 100644
--- a/src/mem/MemInterface.py
+++ b/src/mem/MemInterface.py
@@ -54,7 +54,7 @@
 class MemInterface(AbstractMemory):
     type = 'MemInterface'
     abstract = True
-    cxx_header = "mem/dram_ctrl.hh"
+    cxx_header = "mem/mem_interface.hh"
 
     # Allow the interface to set required controller buffer sizes
     # each entry corresponds to a burst for the specific memory channel
diff --git a/src/mem/NVMInterface.py b/src/mem/NVMInterface.py
index f28dd81..3f6fbc4 100644
--- a/src/mem/NVMInterface.py
+++ b/src/mem/NVMInterface.py
@@ -43,7 +43,7 @@
 # are modeled without getting into too much detail of the media itself.
 class NVMInterface(MemInterface):
     type = 'NVMInterface'
-    cxx_header = "mem/dram_ctrl.hh"
+    cxx_header = "mem/mem_interface.hh"
 
     # NVM DIMM could have write buffer to offload writes
     # define buffer depth, which will limit the number of pending writes
diff --git a/src/mem/SConscript b/src/mem/SConscript
index 409116c..cf7adc8 100644
--- a/src/mem/SConscript
+++ b/src/mem/SConscript
@@ -46,7 +46,7 @@
 SimObject('AbstractMemory.py')
 SimObject('AddrMapper.py')
 SimObject('Bridge.py')
-SimObject('DRAMCtrl.py')
+SimObject('MemCtrl.py')
 SimObject('MemInterface.py')
 SimObject('DRAMInterface.py')
 SimObject('NVMInterface.py')
@@ -64,9 +64,10 @@
 Source('bridge.cc')
 Source('coherent_xbar.cc')
 Source('drampower.cc')
-Source('dram_ctrl.cc')
 Source('external_master.cc')
 Source('external_slave.cc')
+Source('mem_ctrl.cc')
+Source('mem_interface.cc')
 Source('noncoherent_xbar.cc')
 Source('packet.cc')
 Source('port.cc')
@@ -120,6 +121,7 @@
 DebugFlag('ExternalPort')
 DebugFlag('HtmMem', 'Hardware Transactional Memory (Mem side)')
 DebugFlag('LLSC')
+DebugFlag('MemCtrl')
 DebugFlag('MMU')
 DebugFlag('MemoryAccess')
 DebugFlag('PacketQueue')
diff --git a/src/mem/mem_ctrl.cc b/src/mem/mem_ctrl.cc
new file mode 100644
index 0000000..66d3c2a
--- /dev/null
+++ b/src/mem/mem_ctrl.cc
@@ -0,0 +1,1475 @@
+/*
+ * Copyright (c) 2010-2020 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder.  You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2013 Amin Farmahini-Farahani
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/mem_ctrl.hh"
+
+#include "base/trace.hh"
+#include "debug/DRAM.hh"
+#include "debug/Drain.hh"
+#include "debug/MemCtrl.hh"
+#include "debug/NVM.hh"
+#include "debug/QOS.hh"
+#include "mem/mem_interface.hh"
+#include "sim/system.hh"
+
+using namespace std;
+
+MemCtrl::MemCtrl(const MemCtrlParams* p) :
+    QoS::MemCtrl(p),
+    port(name() + ".port", *this), isTimingMode(false),
+    retryRdReq(false), retryWrReq(false),
+    nextReqEvent([this]{ processNextReqEvent(); }, name()),
+    respondEvent([this]{ processRespondEvent(); }, name()),
+    dram(p->dram), nvm(p->nvm),
+    readBufferSize((dram ? dram->readBufferSize : 0) +
+                   (nvm ? nvm->readBufferSize : 0)),
+    writeBufferSize((dram ? dram->writeBufferSize : 0) +
+                    (nvm ? nvm->writeBufferSize : 0)),
+    writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
+    writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
+    minWritesPerSwitch(p->min_writes_per_switch),
+    writesThisTime(0), readsThisTime(0),
+    memSchedPolicy(p->mem_sched_policy),
+    frontendLatency(p->static_frontend_latency),
+    backendLatency(p->static_backend_latency),
+    commandWindow(p->command_window),
+    nextBurstAt(0), prevArrival(0),
+    nextReqTime(0),
+    stats(*this)
+{
+    DPRINTF(MemCtrl, "Setting up controller\n");
+    readQueue.resize(p->qos_priorities);
+    writeQueue.resize(p->qos_priorities);
+
+    // Hook up interfaces to the controller
+    if (dram)
+        dram->setCtrl(this, commandWindow);
+    if (nvm)
+        nvm->setCtrl(this, commandWindow);
+
+    fatal_if(!dram && !nvm, "Memory controller must have an interface");
+
+    // perform a basic check of the write thresholds
+    if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
+        fatal("Write buffer low threshold %d must be smaller than the "
+              "high threshold %d\n", p->write_low_thresh_perc,
+              p->write_high_thresh_perc);
+}
+
+void
+MemCtrl::init()
+{
+   if (!port.isConnected()) {
+        fatal("MemCtrl %s is unconnected!\n", name());
+    } else {
+        port.sendRangeChange();
+    }
+}
+
+void
+MemCtrl::startup()
+{
+    // remember the memory system mode of operation
+    isTimingMode = system()->isTimingMode();
+
+    if (isTimingMode) {
+        // shift the bus busy time sufficiently far ahead that we never
+        // have to worry about negative values when computing the time for
+        // the next request, this will add an insignificant bubble at the
+        // start of simulation
+        nextBurstAt = curTick() + (dram ? dram->commandOffset() :
+                                          nvm->commandOffset());
+    }
+}
+
+Tick
+MemCtrl::recvAtomic(PacketPtr pkt)
+{
+    DPRINTF(MemCtrl, "recvAtomic: %s 0x%x\n",
+                     pkt->cmdString(), pkt->getAddr());
+
+    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
+             "is responding");
+
+    Tick latency = 0;
+    // do the actual memory access and turn the packet into a response
+    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
+        dram->access(pkt);
+
+        if (pkt->hasData()) {
+            // this value is not supposed to be accurate, just enough to
+            // keep things going, mimic a closed page
+            latency = dram->accessLatency();
+        }
+    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
+        nvm->access(pkt);
+
+        if (pkt->hasData()) {
+            // this value is not supposed to be accurate, just enough to
+            // keep things going, mimic a closed page
+            latency = nvm->accessLatency();
+        }
+    } else {
+        panic("Can't handle address range for packet %s\n",
+              pkt->print());
+    }
+
+    return latency;
+}
+
+bool
+MemCtrl::readQueueFull(unsigned int neededEntries) const
+{
+    DPRINTF(MemCtrl,
+            "Read queue limit %d, current size %d, entries needed %d\n",
+            readBufferSize, totalReadQueueSize + respQueue.size(),
+            neededEntries);
+
+    auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
+    return rdsize_new > readBufferSize;
+}
+
+bool
+MemCtrl::writeQueueFull(unsigned int neededEntries) const
+{
+    DPRINTF(MemCtrl,
+            "Write queue limit %d, current size %d, entries needed %d\n",
+            writeBufferSize, totalWriteQueueSize, neededEntries);
+
+    auto wrsize_new = (totalWriteQueueSize + neededEntries);
+    return  wrsize_new > writeBufferSize;
+}
+
+void
+MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
+{
+    // only add to the read queue here. whenever the request is
+    // eventually done, set the readyTime, and call schedule()
+    assert(!pkt->isWrite());
+
+    assert(pkt_count != 0);
+
+    // if the request size is larger than burst size, the pkt is split into
+    // multiple packets
+    // Note if the pkt starting address is not aligened to burst size, the
+    // address of first packet is kept unaliged. Subsequent packets
+    // are aligned to burst size boundaries. This is to ensure we accurately
+    // check read packets against packets in write queue.
+    const Addr base_addr = pkt->getAddr();
+    Addr addr = base_addr;
+    unsigned pktsServicedByWrQ = 0;
+    BurstHelper* burst_helper = NULL;
+
+    uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
+                                    nvm->bytesPerBurst();
+    for (int cnt = 0; cnt < pkt_count; ++cnt) {
+        unsigned size = std::min((addr | (burst_size - 1)) + 1,
+                        base_addr + pkt->getSize()) - addr;
+        stats.readPktSize[ceilLog2(size)]++;
+        stats.readBursts++;
+        stats.masterReadAccesses[pkt->masterId()]++;
+
+        // First check write buffer to see if the data is already at
+        // the controller
+        bool foundInWrQ = false;
+        Addr burst_addr = burstAlign(addr, is_dram);
+        // if the burst address is not present then there is no need
+        // looking any further
+        if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
+            for (const auto& vec : writeQueue) {
+                for (const auto& p : vec) {
+                    // check if the read is subsumed in the write queue
+                    // packet we are looking at
+                    if (p->addr <= addr &&
+                       ((addr + size) <= (p->addr + p->size))) {
+
+                        foundInWrQ = true;
+                        stats.servicedByWrQ++;
+                        pktsServicedByWrQ++;
+                        DPRINTF(MemCtrl,
+                                "Read to addr %lld with size %d serviced by "
+                                "write queue\n",
+                                addr, size);
+                        stats.bytesReadWrQ += burst_size;
+                        break;
+                    }
+                }
+            }
+        }
+
+        // If not found in the write q, make a memory packet and
+        // push it onto the read queue
+        if (!foundInWrQ) {
+
+            // Make the burst helper for split packets
+            if (pkt_count > 1 && burst_helper == NULL) {
+                DPRINTF(MemCtrl, "Read to addr %lld translates to %d "
+                        "memory requests\n", pkt->getAddr(), pkt_count);
+                burst_helper = new BurstHelper(pkt_count);
+            }
+
+            MemPacket* mem_pkt;
+            if (is_dram) {
+                mem_pkt = dram->decodePacket(pkt, addr, size, true, true);
+                // increment read entries of the rank
+                dram->setupRank(mem_pkt->rank, true);
+            } else {
+                mem_pkt = nvm->decodePacket(pkt, addr, size, true, false);
+                // Increment count to trigger issue of non-deterministic read
+                nvm->setupRank(mem_pkt->rank, true);
+                // Default readyTime to Max; will be reset once read is issued
+                mem_pkt->readyTime = MaxTick;
+            }
+            mem_pkt->burstHelper = burst_helper;
+
+            assert(!readQueueFull(1));
+            stats.rdQLenPdf[totalReadQueueSize + respQueue.size()]++;
+
+            DPRINTF(MemCtrl, "Adding to read queue\n");
+
+            readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
+
+            // log packet
+            logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
+                       mem_pkt->addr, 1);
+
+            // Update stats
+            stats.avgRdQLen = totalReadQueueSize + respQueue.size();
+        }
+
+        // Starting address of next memory pkt (aligned to burst boundary)
+        addr = (addr | (burst_size - 1)) + 1;
+    }
+
+    // If all packets are serviced by write queue, we send the repsonse back
+    if (pktsServicedByWrQ == pkt_count) {
+        accessAndRespond(pkt, frontendLatency);
+        return;
+    }
+
+    // Update how many split packets are serviced by write queue
+    if (burst_helper != NULL)
+        burst_helper->burstsServiced = pktsServicedByWrQ;
+
+    // If we are not already scheduled to get a request out of the
+    // queue, do so now
+    if (!nextReqEvent.scheduled()) {
+        DPRINTF(MemCtrl, "Request scheduled immediately\n");
+        schedule(nextReqEvent, curTick());
+    }
+}
+
+void
+MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
+{
+    // only add to the write queue here. whenever the request is
+    // eventually done, set the readyTime, and call schedule()
+    assert(pkt->isWrite());
+
+    // if the request size is larger than burst size, the pkt is split into
+    // multiple packets
+    const Addr base_addr = pkt->getAddr();
+    Addr addr = base_addr;
+    uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
+                                    nvm->bytesPerBurst();
+    for (int cnt = 0; cnt < pkt_count; ++cnt) {
+        unsigned size = std::min((addr | (burst_size - 1)) + 1,
+                        base_addr + pkt->getSize()) - addr;
+        stats.writePktSize[ceilLog2(size)]++;
+        stats.writeBursts++;
+        stats.masterWriteAccesses[pkt->masterId()]++;
+
+        // see if we can merge with an existing item in the write
+        // queue and keep track of whether we have merged or not
+        bool merged = isInWriteQueue.find(burstAlign(addr, is_dram)) !=
+            isInWriteQueue.end();
+
+        // if the item was not merged we need to create a new write
+        // and enqueue it
+        if (!merged) {
+            MemPacket* mem_pkt;
+            if (is_dram) {
+                mem_pkt = dram->decodePacket(pkt, addr, size, false, true);
+                dram->setupRank(mem_pkt->rank, false);
+            } else {
+                mem_pkt = nvm->decodePacket(pkt, addr, size, false, false);
+                nvm->setupRank(mem_pkt->rank, false);
+            }
+            assert(totalWriteQueueSize < writeBufferSize);
+            stats.wrQLenPdf[totalWriteQueueSize]++;
+
+            DPRINTF(MemCtrl, "Adding to write queue\n");
+
+            writeQueue[mem_pkt->qosValue()].push_back(mem_pkt);
+            isInWriteQueue.insert(burstAlign(addr, is_dram));
+
+            // log packet
+            logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
+                       mem_pkt->addr, 1);
+
+            assert(totalWriteQueueSize == isInWriteQueue.size());
+
+            // Update stats
+            stats.avgWrQLen = totalWriteQueueSize;
+
+        } else {
+            DPRINTF(MemCtrl,
+                    "Merging write burst with existing queue entry\n");
+
+            // keep track of the fact that this burst effectively
+            // disappeared as it was merged with an existing one
+            stats.mergedWrBursts++;
+        }
+
+        // Starting address of next memory pkt (aligned to burst_size boundary)
+        addr = (addr | (burst_size - 1)) + 1;
+    }
+
+    // we do not wait for the writes to be send to the actual memory,
+    // but instead take responsibility for the consistency here and
+    // snoop the write queue for any upcoming reads
+    // @todo, if a pkt size is larger than burst size, we might need a
+    // different front end latency
+    accessAndRespond(pkt, frontendLatency);
+
+    // If we are not already scheduled to get a request out of the
+    // queue, do so now
+    if (!nextReqEvent.scheduled()) {
+        DPRINTF(MemCtrl, "Request scheduled immediately\n");
+        schedule(nextReqEvent, curTick());
+    }
+}
+
+void
+MemCtrl::printQs() const
+{
+#if TRACING_ON
+    DPRINTF(MemCtrl, "===READ QUEUE===\n\n");
+    for (const auto& queue : readQueue) {
+        for (const auto& packet : queue) {
+            DPRINTF(MemCtrl, "Read %lu\n", packet->addr);
+        }
+    }
+
+    DPRINTF(MemCtrl, "\n===RESP QUEUE===\n\n");
+    for (const auto& packet : respQueue) {
+        DPRINTF(MemCtrl, "Response %lu\n", packet->addr);
+    }
+
+    DPRINTF(MemCtrl, "\n===WRITE QUEUE===\n\n");
+    for (const auto& queue : writeQueue) {
+        for (const auto& packet : queue) {
+            DPRINTF(MemCtrl, "Write %lu\n", packet->addr);
+        }
+    }
+#endif // TRACING_ON
+}
+
+bool
+MemCtrl::recvTimingReq(PacketPtr pkt)
+{
+    // This is where we enter from the outside world
+    DPRINTF(MemCtrl, "recvTimingReq: request %s addr %lld size %d\n",
+            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
+
+    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
+             "is responding");
+
+    panic_if(!(pkt->isRead() || pkt->isWrite()),
+             "Should only see read and writes at memory controller\n");
+
+    // Calc avg gap between requests
+    if (prevArrival != 0) {
+        stats.totGap += curTick() - prevArrival;
+    }
+    prevArrival = curTick();
+
+    // What type of media does this packet access?
+    bool is_dram;
+    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
+        is_dram = true;
+    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
+        is_dram = false;
+    } else {
+        panic("Can't handle address range for packet %s\n",
+              pkt->print());
+    }
+
+
+    // Find out how many memory packets a pkt translates to
+    // If the burst size is equal or larger than the pkt size, then a pkt
+    // translates to only one memory packet. Otherwise, a pkt translates to
+    // multiple memory packets
+    unsigned size = pkt->getSize();
+    uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
+                                    nvm->bytesPerBurst();
+    unsigned offset = pkt->getAddr() & (burst_size - 1);
+    unsigned int pkt_count = divCeil(offset + size, burst_size);
+
+    // run the QoS scheduler and assign a QoS priority value to the packet
+    qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
+
+    // check local buffers and do not accept if full
+    if (pkt->isWrite()) {
+        assert(size != 0);
+        if (writeQueueFull(pkt_count)) {
+            DPRINTF(MemCtrl, "Write queue full, not accepting\n");
+            // remember that we have to retry this port
+            retryWrReq = true;
+            stats.numWrRetry++;
+            return false;
+        } else {
+            addToWriteQueue(pkt, pkt_count, is_dram);
+            stats.writeReqs++;
+            stats.bytesWrittenSys += size;
+        }
+    } else {
+        assert(pkt->isRead());
+        assert(size != 0);
+        if (readQueueFull(pkt_count)) {
+            DPRINTF(MemCtrl, "Read queue full, not accepting\n");
+            // remember that we have to retry this port
+            retryRdReq = true;
+            stats.numRdRetry++;
+            return false;
+        } else {
+            addToReadQueue(pkt, pkt_count, is_dram);
+            stats.readReqs++;
+            stats.bytesReadSys += size;
+        }
+    }
+
+    return true;
+}
+
+void
+MemCtrl::processRespondEvent()
+{
+    DPRINTF(MemCtrl,
+            "processRespondEvent(): Some req has reached its readyTime\n");
+
+    MemPacket* mem_pkt = respQueue.front();
+
+    if (mem_pkt->isDram()) {
+        // media specific checks and functions when read response is complete
+        dram->respondEvent(mem_pkt->rank);
+    }
+
+    if (mem_pkt->burstHelper) {
+        // it is a split packet
+        mem_pkt->burstHelper->burstsServiced++;
+        if (mem_pkt->burstHelper->burstsServiced ==
+            mem_pkt->burstHelper->burstCount) {
+            // we have now serviced all children packets of a system packet
+            // so we can now respond to the requester
+            // @todo we probably want to have a different front end and back
+            // end latency for split packets
+            accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency);
+            delete mem_pkt->burstHelper;
+            mem_pkt->burstHelper = NULL;
+        }
+    } else {
+        // it is not a split packet
+        accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency);
+    }
+
+    delete respQueue.front();
+    respQueue.pop_front();
+
+    if (!respQueue.empty()) {
+        assert(respQueue.front()->readyTime >= curTick());
+        assert(!respondEvent.scheduled());
+        schedule(respondEvent, respQueue.front()->readyTime);
+    } else {
+        // if there is nothing left in any queue, signal a drain
+        if (drainState() == DrainState::Draining &&
+            !totalWriteQueueSize && !totalReadQueueSize &&
+            allIntfDrained()) {
+
+            DPRINTF(Drain, "Controller done draining\n");
+            signalDrainDone();
+        } else if (mem_pkt->isDram()) {
+            // check the refresh state and kick the refresh event loop
+            // into action again if banks already closed and just waiting
+            // for read to complete
+            dram->checkRefreshState(mem_pkt->rank);
+        }
+    }
+
+    // We have made a location in the queue available at this point,
+    // so if there is a read that was forced to wait, retry now
+    if (retryRdReq) {
+        retryRdReq = false;
+        port.sendRetryReq();
+    }
+}
+
+MemPacketQueue::iterator
+MemCtrl::chooseNext(MemPacketQueue& queue, Tick extra_col_delay)
+{
+    // This method does the arbitration between requests.
+
+    MemPacketQueue::iterator ret = queue.end();
+
+    if (!queue.empty()) {
+        if (queue.size() == 1) {
+            // available rank corresponds to state refresh idle
+            MemPacket* mem_pkt = *(queue.begin());
+            if (packetReady(mem_pkt)) {
+                ret = queue.begin();
+                DPRINTF(MemCtrl, "Single request, going to a free rank\n");
+            } else {
+                DPRINTF(MemCtrl, "Single request, going to a busy rank\n");
+            }
+        } else if (memSchedPolicy == Enums::fcfs) {
+            // check if there is a packet going to a free rank
+            for (auto i = queue.begin(); i != queue.end(); ++i) {
+                MemPacket* mem_pkt = *i;
+                if (packetReady(mem_pkt)) {
+                    ret = i;
+                    break;
+                }
+            }
+        } else if (memSchedPolicy == Enums::frfcfs) {
+            ret = chooseNextFRFCFS(queue, extra_col_delay);
+        } else {
+            panic("No scheduling policy chosen\n");
+        }
+    }
+    return ret;
+}
+
+MemPacketQueue::iterator
+MemCtrl::chooseNextFRFCFS(MemPacketQueue& queue, Tick extra_col_delay)
+{
+    auto selected_pkt_it = queue.end();
+    Tick col_allowed_at = MaxTick;
+
+    // time we need to issue a column command to be seamless
+    const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
+
+    // find optimal packet for each interface
+    if (dram && nvm) {
+        // create 2nd set of parameters for NVM
+        auto nvm_pkt_it = queue.end();
+        Tick nvm_col_at = MaxTick;
+
+        // Select packet by default to give priority if both
+        // can issue at the same time or seamlessly
+        std::tie(selected_pkt_it, col_allowed_at) =
+                 dram->chooseNextFRFCFS(queue, min_col_at);
+        std::tie(nvm_pkt_it, nvm_col_at) =
+                 nvm->chooseNextFRFCFS(queue, min_col_at);
+
+        // Compare DRAM and NVM and select NVM if it can issue
+        // earlier than the DRAM packet
+        if (col_allowed_at > nvm_col_at) {
+            selected_pkt_it = nvm_pkt_it;
+        }
+    } else if (dram) {
+        std::tie(selected_pkt_it, col_allowed_at) =
+                 dram->chooseNextFRFCFS(queue, min_col_at);
+    } else if (nvm) {
+        std::tie(selected_pkt_it, col_allowed_at) =
+                 nvm->chooseNextFRFCFS(queue, min_col_at);
+    }
+
+    if (selected_pkt_it == queue.end()) {
+        DPRINTF(MemCtrl, "%s no available packets found\n", __func__);
+    }
+
+    return selected_pkt_it;
+}
+
+void
+MemCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
+{
+    DPRINTF(MemCtrl, "Responding to Address %lld.. \n",pkt->getAddr());
+
+    bool needsResponse = pkt->needsResponse();
+    // do the actual memory access which also turns the packet into a
+    // response
+    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
+        dram->access(pkt);
+    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
+        nvm->access(pkt);
+    } else {
+        panic("Can't handle address range for packet %s\n",
+              pkt->print());
+    }
+
+    // turn packet around to go back to requester if response expected
+    if (needsResponse) {
+        // access already turned the packet into a response
+        assert(pkt->isResponse());
+        // response_time consumes the static latency and is charged also
+        // with headerDelay that takes into account the delay provided by
+        // the xbar and also the payloadDelay that takes into account the
+        // number of data beats.
+        Tick response_time = curTick() + static_latency + pkt->headerDelay +
+                             pkt->payloadDelay;
+        // Here we reset the timing of the packet before sending it out.
+        pkt->headerDelay = pkt->payloadDelay = 0;
+
+        // queue the packet in the response queue to be sent out after
+        // the static latency has passed
+        port.schedTimingResp(pkt, response_time);
+    } else {
+        // @todo the packet is going to be deleted, and the MemPacket
+        // is still having a pointer to it
+        pendingDelete.reset(pkt);
+    }
+
+    DPRINTF(MemCtrl, "Done\n");
+
+    return;
+}
+
+void
+MemCtrl::pruneBurstTick()
+{
+    auto it = burstTicks.begin();
+    while (it != burstTicks.end()) {
+        auto current_it = it++;
+        if (curTick() > *current_it) {
+            DPRINTF(MemCtrl, "Removing burstTick for %d\n", *current_it);
+            burstTicks.erase(current_it);
+        }
+    }
+}
+
+Tick
+MemCtrl::getBurstWindow(Tick cmd_tick)
+{
+    // get tick aligned to burst window
+    Tick burst_offset = cmd_tick % commandWindow;
+    return (cmd_tick - burst_offset);
+}
+
+Tick
+MemCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
+{
+    // start with assumption that there is no contention on command bus
+    Tick cmd_at = cmd_tick;
+
+    // get tick aligned to burst window
+    Tick burst_tick = getBurstWindow(cmd_tick);
+
+    // verify that we have command bandwidth to issue the command
+    // if not, iterate over next window(s) until slot found
+    while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
+        DPRINTF(MemCtrl, "Contention found on command bus at %d\n",
+                burst_tick);
+        burst_tick += commandWindow;
+        cmd_at = burst_tick;
+    }
+
+    // add command into burst window and return corresponding Tick
+    burstTicks.insert(burst_tick);
+    return cmd_at;
+}
+
+Tick
+MemCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
+                         Tick max_multi_cmd_split)
+{
+    // start with assumption that there is no contention on command bus
+    Tick cmd_at = cmd_tick;
+
+    // get tick aligned to burst window
+    Tick burst_tick = getBurstWindow(cmd_tick);
+
+    // Command timing requirements are from 2nd command
+    // Start with assumption that 2nd command will issue at cmd_at and
+    // find prior slot for 1st command to issue
+    // Given a maximum latency of max_multi_cmd_split between the commands,
+    // find the burst at the maximum latency prior to cmd_at
+    Tick burst_offset = 0;
+    Tick first_cmd_offset = cmd_tick % commandWindow;
+    while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
+        burst_offset += commandWindow;
+    }
+    // get the earliest burst aligned address for first command
+    // ensure that the time does not go negative
+    Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
+
+    // Can required commands issue?
+    bool first_can_issue = false;
+    bool second_can_issue = false;
+    // verify that we have command bandwidth to issue the command(s)
+    while (!first_can_issue || !second_can_issue) {
+        bool same_burst = (burst_tick == first_cmd_tick);
+        auto first_cmd_count = burstTicks.count(first_cmd_tick);
+        auto second_cmd_count = same_burst ? first_cmd_count + 1 :
+                                   burstTicks.count(burst_tick);
+
+        first_can_issue = first_cmd_count < max_cmds_per_burst;
+        second_can_issue = second_cmd_count < max_cmds_per_burst;
+
+        if (!second_can_issue) {
+            DPRINTF(MemCtrl, "Contention (cmd2) found on command bus at %d\n",
+                    burst_tick);
+            burst_tick += commandWindow;
+            cmd_at = burst_tick;
+        }
+
+        // Verify max_multi_cmd_split isn't violated when command 2 is shifted
+        // If commands initially were issued in same burst, they are
+        // now in consecutive bursts and can still issue B2B
+        bool gap_violated = !same_burst &&
+             ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
+
+        if (!first_can_issue || (!second_can_issue && gap_violated)) {
+            DPRINTF(MemCtrl, "Contention (cmd1) found on command bus at %d\n",
+                    first_cmd_tick);
+            first_cmd_tick += commandWindow;
+        }
+    }
+
+    // Add command to burstTicks
+    burstTicks.insert(burst_tick);
+    burstTicks.insert(first_cmd_tick);
+
+    return cmd_at;
+}
+
+bool
+MemCtrl::inReadBusState(bool next_state) const
+{
+    // check the bus state
+    if (next_state) {
+        // use busStateNext to get the state that will be used
+        // for the next burst
+        return (busStateNext == MemCtrl::READ);
+    } else {
+        return (busState == MemCtrl::READ);
+    }
+}
+
+bool
+MemCtrl::inWriteBusState(bool next_state) const
+{
+    // check the bus state
+    if (next_state) {
+        // use busStateNext to get the state that will be used
+        // for the next burst
+        return (busStateNext == MemCtrl::WRITE);
+    } else {
+        return (busState == MemCtrl::WRITE);
+    }
+}
+
+void
+MemCtrl::doBurstAccess(MemPacket* mem_pkt)
+{
+    // first clean up the burstTick set, removing old entries
+    // before adding new entries for next burst
+    pruneBurstTick();
+
+    // When was command issued?
+    Tick cmd_at;
+
+    // Issue the next burst and update bus state to reflect
+    // when previous command was issued
+    if (mem_pkt->isDram()) {
+        std::vector<MemPacketQueue>& queue = selQueue(mem_pkt->isRead());
+        std::tie(cmd_at, nextBurstAt) =
+                 dram->doBurstAccess(mem_pkt, nextBurstAt, queue);
+
+        // Update timing for NVM ranks if NVM is configured on this channel
+        if (nvm)
+            nvm->addRankToRankDelay(cmd_at);
+
+    } else {
+        std::tie(cmd_at, nextBurstAt) =
+                 nvm->doBurstAccess(mem_pkt, nextBurstAt);
+
+        // Update timing for NVM ranks if NVM is configured on this channel
+        if (dram)
+            dram->addRankToRankDelay(cmd_at);
+
+    }
+
+    DPRINTF(MemCtrl, "Access to %lld, ready at %lld next burst at %lld.\n",
+            mem_pkt->addr, mem_pkt->readyTime, nextBurstAt);
+
+    // Update the minimum timing between the requests, this is a
+    // conservative estimate of when we have to schedule the next
+    // request to not introduce any unecessary bubbles. In most cases
+    // we will wake up sooner than we have to.
+    nextReqTime = nextBurstAt - (dram ? dram->commandOffset() :
+                                        nvm->commandOffset());
+
+
+    // Update the common bus stats
+    if (mem_pkt->isRead()) {
+        ++readsThisTime;
+        // Update latency stats
+        stats.masterReadTotalLat[mem_pkt->masterId()] +=
+            mem_pkt->readyTime - mem_pkt->entryTime;
+        stats.masterReadBytes[mem_pkt->masterId()] += mem_pkt->size;
+    } else {
+        ++writesThisTime;
+        stats.masterWriteBytes[mem_pkt->masterId()] += mem_pkt->size;
+        stats.masterWriteTotalLat[mem_pkt->masterId()] +=
+            mem_pkt->readyTime - mem_pkt->entryTime;
+    }
+}
+
+void
+MemCtrl::processNextReqEvent()
+{
+    // transition is handled by QoS algorithm if enabled
+    if (turnPolicy) {
+        // select bus state - only done if QoS algorithms are in use
+        busStateNext = selectNextBusState();
+    }
+
+    // detect bus state change
+    bool switched_cmd_type = (busState != busStateNext);
+    // record stats
+    recordTurnaroundStats();
+
+    DPRINTF(MemCtrl, "QoS Turnarounds selected state %s %s\n",
+            (busState==MemCtrl::READ)?"READ":"WRITE",
+            switched_cmd_type?"[turnaround triggered]":"");
+
+    if (switched_cmd_type) {
+        if (busState == MemCtrl::READ) {
+            DPRINTF(MemCtrl,
+                    "Switching to writes after %d reads with %d reads "
+                    "waiting\n", readsThisTime, totalReadQueueSize);
+            stats.rdPerTurnAround.sample(readsThisTime);
+            readsThisTime = 0;
+        } else {
+            DPRINTF(MemCtrl,
+                    "Switching to reads after %d writes with %d writes "
+                    "waiting\n", writesThisTime, totalWriteQueueSize);
+            stats.wrPerTurnAround.sample(writesThisTime);
+            writesThisTime = 0;
+        }
+    }
+
+    // updates current state
+    busState = busStateNext;
+
+    if (nvm) {
+        for (auto queue = readQueue.rbegin();
+             queue != readQueue.rend(); ++queue) {
+             // select non-deterministic NVM read to issue
+             // assume that we have the command bandwidth to issue this along
+             // with additional RD/WR burst with needed bank operations
+             if (nvm->readsWaitingToIssue()) {
+                 // select non-deterministic NVM read to issue
+                 nvm->chooseRead(*queue);
+             }
+        }
+    }
+
+    // check ranks for refresh/wakeup - uses busStateNext, so done after
+    // turnaround decisions
+    // Default to busy status and update based on interface specifics
+    bool dram_busy = dram ? dram->isBusy() : true;
+    bool nvm_busy = true;
+    bool all_writes_nvm = false;
+    if (nvm) {
+        all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
+        bool read_queue_empty = totalReadQueueSize == 0;
+        nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
+    }
+    // Default state of unused interface is 'true'
+    // Simply AND the busy signals to determine if system is busy
+    if (dram_busy && nvm_busy) {
+        // if all ranks are refreshing wait for them to finish
+        // and stall this state machine without taking any further
+        // action, and do not schedule a new nextReqEvent
+        return;
+    }
+
+    // when we get here it is either a read or a write
+    if (busState == READ) {
+
+        // track if we should switch or not
+        bool switch_to_writes = false;
+
+        if (totalReadQueueSize == 0) {
+            // In the case there is no read request to go next,
+            // trigger writes if we have passed the low threshold (or
+            // if we are draining)
+            if (!(totalWriteQueueSize == 0) &&
+                (drainState() == DrainState::Draining ||
+                 totalWriteQueueSize > writeLowThreshold)) {
+
+                DPRINTF(MemCtrl,
+                        "Switching to writes due to read queue empty\n");
+                switch_to_writes = true;
+            } else {
+                // check if we are drained
+                // not done draining until in PWR_IDLE state
+                // ensuring all banks are closed and
+                // have exited low power states
+                if (drainState() == DrainState::Draining &&
+                    respQueue.empty() && allIntfDrained()) {
+
+                    DPRINTF(Drain, "MemCtrl controller done draining\n");
+                    signalDrainDone();
+                }
+
+                // nothing to do, not even any point in scheduling an
+                // event for the next request
+                return;
+            }
+        } else {
+
+            bool read_found = false;
+            MemPacketQueue::iterator to_read;
+            uint8_t prio = numPriorities();
+
+            for (auto queue = readQueue.rbegin();
+                 queue != readQueue.rend(); ++queue) {
+
+                prio--;
+
+                DPRINTF(QOS,
+                        "Checking READ queue [%d] priority [%d elements]\n",
+                        prio, queue->size());
+
+                // Figure out which read request goes next
+                // If we are changing command type, incorporate the minimum
+                // bus turnaround delay which will be rank to rank delay
+                to_read = chooseNext((*queue), switched_cmd_type ?
+                                               minWriteToReadDataGap() : 0);
+
+                if (to_read != queue->end()) {
+                    // candidate read found
+                    read_found = true;
+                    break;
+                }
+            }
+
+            // if no read to an available rank is found then return
+            // at this point. There could be writes to the available ranks
+            // which are above the required threshold. However, to
+            // avoid adding more complexity to the code, return and wait
+            // for a refresh event to kick things into action again.
+            if (!read_found) {
+                DPRINTF(MemCtrl, "No Reads Found - exiting\n");
+                return;
+            }
+
+            auto mem_pkt = *to_read;
+
+            doBurstAccess(mem_pkt);
+
+            // sanity check
+            assert(mem_pkt->size <= (mem_pkt->isDram() ?
+                                      dram->bytesPerBurst() :
+                                      nvm->bytesPerBurst()) );
+            assert(mem_pkt->readyTime >= curTick());
+
+            // log the response
+            logResponse(MemCtrl::READ, (*to_read)->masterId(),
+                        mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
+                        mem_pkt->readyTime - mem_pkt->entryTime);
+
+
+            // Insert into response queue. It will be sent back to the
+            // requester at its readyTime
+            if (respQueue.empty()) {
+                assert(!respondEvent.scheduled());
+                schedule(respondEvent, mem_pkt->readyTime);
+            } else {
+                assert(respQueue.back()->readyTime <= mem_pkt->readyTime);
+                assert(respondEvent.scheduled());
+            }
+
+            respQueue.push_back(mem_pkt);
+
+            // we have so many writes that we have to transition
+            // don't transition if the writeRespQueue is full and
+            // there are no other writes that can issue
+            if ((totalWriteQueueSize > writeHighThreshold) &&
+               !(nvm && all_writes_nvm && nvm->writeRespQueueFull())) {
+                switch_to_writes = true;
+            }
+
+            // remove the request from the queue
+            // the iterator is no longer valid .
+            readQueue[mem_pkt->qosValue()].erase(to_read);
+        }
+
+        // switching to writes, either because the read queue is empty
+        // and the writes have passed the low threshold (or we are
+        // draining), or because the writes hit the hight threshold
+        if (switch_to_writes) {
+            // transition to writing
+            busStateNext = WRITE;
+        }
+    } else {
+
+        bool write_found = false;
+        MemPacketQueue::iterator to_write;
+        uint8_t prio = numPriorities();
+
+        for (auto queue = writeQueue.rbegin();
+             queue != writeQueue.rend(); ++queue) {
+
+            prio--;
+
+            DPRINTF(QOS,
+                    "Checking WRITE queue [%d] priority [%d elements]\n",
+                    prio, queue->size());
+
+            // If we are changing command type, incorporate the minimum
+            // bus turnaround delay
+            to_write = chooseNext((*queue),
+                     switched_cmd_type ? minReadToWriteDataGap() : 0);
+
+            if (to_write != queue->end()) {
+                write_found = true;
+                break;
+            }
+        }
+
+        // if there are no writes to a rank that is available to service
+        // requests (i.e. rank is in refresh idle state) are found then
+        // return. There could be reads to the available ranks. However, to
+        // avoid adding more complexity to the code, return at this point and
+        // wait for a refresh event to kick things into action again.
+        if (!write_found) {
+            DPRINTF(MemCtrl, "No Writes Found - exiting\n");
+            return;
+        }
+
+        auto mem_pkt = *to_write;
+
+        // sanity check
+        assert(mem_pkt->size <= (mem_pkt->isDram() ?
+                                  dram->bytesPerBurst() :
+                                  nvm->bytesPerBurst()) );
+
+        doBurstAccess(mem_pkt);
+
+        isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
+
+        // log the response
+        logResponse(MemCtrl::WRITE, mem_pkt->masterId(),
+                    mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
+                    mem_pkt->readyTime - mem_pkt->entryTime);
+
+
+        // remove the request from the queue - the iterator is no longer valid
+        writeQueue[mem_pkt->qosValue()].erase(to_write);
+
+        delete mem_pkt;
+
+        // If we emptied the write queue, or got sufficiently below the
+        // threshold (using the minWritesPerSwitch as the hysteresis) and
+        // are not draining, or we have reads waiting and have done enough
+        // writes, then switch to reads.
+        // If we are interfacing to NVM and have filled the writeRespQueue,
+        // with only NVM writes in Q, then switch to reads
+        bool below_threshold =
+            totalWriteQueueSize + minWritesPerSwitch < writeLowThreshold;
+
+        if (totalWriteQueueSize == 0 ||
+            (below_threshold && drainState() != DrainState::Draining) ||
+            (totalReadQueueSize && writesThisTime >= minWritesPerSwitch) ||
+            (totalReadQueueSize && nvm && nvm->writeRespQueueFull() &&
+             all_writes_nvm)) {
+
+            // turn the bus back around for reads again
+            busStateNext = MemCtrl::READ;
+
+            // note that the we switch back to reads also in the idle
+            // case, which eventually will check for any draining and
+            // also pause any further scheduling if there is really
+            // nothing to do
+        }
+    }
+    // It is possible that a refresh to another rank kicks things back into
+    // action before reaching this point.
+    if (!nextReqEvent.scheduled())
+        schedule(nextReqEvent, std::max(nextReqTime, curTick()));
+
+    // If there is space available and we have writes waiting then let
+    // them retry. This is done here to ensure that the retry does not
+    // cause a nextReqEvent to be scheduled before we do so as part of
+    // the next request processing
+    if (retryWrReq && totalWriteQueueSize < writeBufferSize) {
+        retryWrReq = false;
+        port.sendRetryReq();
+    }
+}
+
+bool
+MemCtrl::packetReady(MemPacket* pkt)
+{
+    return (pkt->isDram() ?
+        dram->burstReady(pkt) : nvm->burstReady(pkt));
+}
+
+Tick
+MemCtrl::minReadToWriteDataGap()
+{
+    Tick dram_min = dram ?  dram->minReadToWriteDataGap() : MaxTick;
+    Tick nvm_min = nvm ?  nvm->minReadToWriteDataGap() : MaxTick;
+    return std::min(dram_min, nvm_min);
+}
+
+Tick
+MemCtrl::minWriteToReadDataGap()
+{
+    Tick dram_min = dram ? dram->minWriteToReadDataGap() : MaxTick;
+    Tick nvm_min = nvm ?  nvm->minWriteToReadDataGap() : MaxTick;
+    return std::min(dram_min, nvm_min);
+}
+
+Addr
+MemCtrl::burstAlign(Addr addr, bool is_dram) const
+{
+    if (is_dram)
+        return (addr & ~(Addr(dram->bytesPerBurst() - 1)));
+    else
+        return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
+}
+
+MemCtrl::CtrlStats::CtrlStats(MemCtrl &_ctrl)
+    : Stats::Group(&_ctrl),
+    ctrl(_ctrl),
+
+    ADD_STAT(readReqs, "Number of read requests accepted"),
+    ADD_STAT(writeReqs, "Number of write requests accepted"),
+
+    ADD_STAT(readBursts,
+             "Number of controller read bursts, "
+             "including those serviced by the write queue"),
+    ADD_STAT(writeBursts,
+             "Number of controller write bursts, "
+             "including those merged in the write queue"),
+    ADD_STAT(servicedByWrQ,
+             "Number of controller read bursts serviced by the write queue"),
+    ADD_STAT(mergedWrBursts,
+             "Number of controller write bursts merged with an existing one"),
+
+    ADD_STAT(neitherReadNorWriteReqs,
+             "Number of requests that are neither read nor write"),
+
+    ADD_STAT(avgRdQLen, "Average read queue length when enqueuing"),
+    ADD_STAT(avgWrQLen, "Average write queue length when enqueuing"),
+
+    ADD_STAT(numRdRetry, "Number of times read queue was full causing retry"),
+    ADD_STAT(numWrRetry, "Number of times write queue was full causing retry"),
+
+    ADD_STAT(readPktSize, "Read request sizes (log2)"),
+    ADD_STAT(writePktSize, "Write request sizes (log2)"),
+
+    ADD_STAT(rdQLenPdf, "What read queue length does an incoming req see"),
+    ADD_STAT(wrQLenPdf, "What write queue length does an incoming req see"),
+
+    ADD_STAT(rdPerTurnAround,
+             "Reads before turning the bus around for writes"),
+    ADD_STAT(wrPerTurnAround,
+             "Writes before turning the bus around for reads"),
+
+    ADD_STAT(bytesReadWrQ, "Total number of bytes read from write queue"),
+    ADD_STAT(bytesReadSys, "Total read bytes from the system interface side"),
+    ADD_STAT(bytesWrittenSys,
+             "Total written bytes from the system interface side"),
+
+    ADD_STAT(avgRdBWSys, "Average system read bandwidth in MiByte/s"),
+    ADD_STAT(avgWrBWSys, "Average system write bandwidth in MiByte/s"),
+
+    ADD_STAT(totGap, "Total gap between requests"),
+    ADD_STAT(avgGap, "Average gap between requests"),
+
+    ADD_STAT(masterReadBytes, "Per-master bytes read from memory"),
+    ADD_STAT(masterWriteBytes, "Per-master bytes write to memory"),
+    ADD_STAT(masterReadRate,
+             "Per-master bytes read from memory rate (Bytes/sec)"),
+    ADD_STAT(masterWriteRate,
+             "Per-master bytes write to memory rate (Bytes/sec)"),
+    ADD_STAT(masterReadAccesses,
+             "Per-master read serviced memory accesses"),
+    ADD_STAT(masterWriteAccesses,
+             "Per-master write serviced memory accesses"),
+    ADD_STAT(masterReadTotalLat,
+             "Per-master read total memory access latency"),
+    ADD_STAT(masterWriteTotalLat,
+             "Per-master write total memory access latency"),
+    ADD_STAT(masterReadAvgLat,
+             "Per-master read average memory access latency"),
+    ADD_STAT(masterWriteAvgLat,
+             "Per-master write average memory access latency")
+
+{
+}
+
+void
+MemCtrl::CtrlStats::regStats()
+{
+    using namespace Stats;
+
+    assert(ctrl.system());
+    const auto max_masters = ctrl.system()->maxMasters();
+
+    avgRdQLen.precision(2);
+    avgWrQLen.precision(2);
+
+    readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
+    writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
+
+    rdQLenPdf.init(ctrl.readBufferSize);
+    wrQLenPdf.init(ctrl.writeBufferSize);
+
+    rdPerTurnAround
+        .init(ctrl.readBufferSize)
+        .flags(nozero);
+    wrPerTurnAround
+        .init(ctrl.writeBufferSize)
+        .flags(nozero);
+
+    avgRdBWSys.precision(2);
+    avgWrBWSys.precision(2);
+    avgGap.precision(2);
+
+    // per-master bytes read and written to memory
+    masterReadBytes
+        .init(max_masters)
+        .flags(nozero | nonan);
+
+    masterWriteBytes
+        .init(max_masters)
+        .flags(nozero | nonan);
+
+    // per-master bytes read and written to memory rate
+    masterReadRate
+        .flags(nozero | nonan)
+        .precision(12);
+
+    masterReadAccesses
+        .init(max_masters)
+        .flags(nozero);
+
+    masterWriteAccesses
+        .init(max_masters)
+        .flags(nozero);
+
+    masterReadTotalLat
+        .init(max_masters)
+        .flags(nozero | nonan);
+
+    masterReadAvgLat
+        .flags(nonan)
+        .precision(2);
+
+    masterWriteRate
+        .flags(nozero | nonan)
+        .precision(12);
+
+    masterWriteTotalLat
+        .init(max_masters)
+        .flags(nozero | nonan);
+
+    masterWriteAvgLat
+        .flags(nonan)
+        .precision(2);
+
+    for (int i = 0; i < max_masters; i++) {
+        const std::string master = ctrl.system()->getMasterName(i);
+        masterReadBytes.subname(i, master);
+        masterReadRate.subname(i, master);
+        masterWriteBytes.subname(i, master);
+        masterWriteRate.subname(i, master);
+        masterReadAccesses.subname(i, master);
+        masterWriteAccesses.subname(i, master);
+        masterReadTotalLat.subname(i, master);
+        masterReadAvgLat.subname(i, master);
+        masterWriteTotalLat.subname(i, master);
+        masterWriteAvgLat.subname(i, master);
+    }
+
+    // Formula stats
+    avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
+    avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
+
+    avgGap = totGap / (readReqs + writeReqs);
+
+    masterReadRate = masterReadBytes / simSeconds;
+    masterWriteRate = masterWriteBytes / simSeconds;
+    masterReadAvgLat = masterReadTotalLat / masterReadAccesses;
+    masterWriteAvgLat = masterWriteTotalLat / masterWriteAccesses;
+}
+
+void
+MemCtrl::recvFunctional(PacketPtr pkt)
+{
+    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
+        // rely on the abstract memory
+        dram->functionalAccess(pkt);
+    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
+        // rely on the abstract memory
+        nvm->functionalAccess(pkt);
+   } else {
+        panic("Can't handle address range for packet %s\n",
+              pkt->print());
+   }
+}
+
+Port &
+MemCtrl::getPort(const string &if_name, PortID idx)
+{
+    if (if_name != "port") {
+        return QoS::MemCtrl::getPort(if_name, idx);
+    } else {
+        return port;
+    }
+}
+
+bool
+MemCtrl::allIntfDrained() const
+{
+   // ensure dram is in power down and refresh IDLE states
+   bool dram_drained = !dram || dram->allRanksDrained();
+   // No outstanding NVM writes
+   // All other queues verified as needed with calling logic
+   bool nvm_drained = !nvm || nvm->allRanksDrained();
+   return (dram_drained && nvm_drained);
+}
+
+DrainState
+MemCtrl::drain()
+{
+    // if there is anything in any of our internal queues, keep track
+    // of that as well
+    if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
+          allIntfDrained())) {
+
+        DPRINTF(Drain, "Memory controller not drained, write: %d, read: %d,"
+                " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
+                respQueue.size());
+
+        // the only queue that is not drained automatically over time
+        // is the write queue, thus kick things into action if needed
+        if (!totalWriteQueueSize && !nextReqEvent.scheduled()) {
+            schedule(nextReqEvent, curTick());
+        }
+
+        if (dram)
+            dram->drainRanks();
+
+        return DrainState::Draining;
+    } else {
+        return DrainState::Drained;
+    }
+}
+
+void
+MemCtrl::drainResume()
+{
+    if (!isTimingMode && system()->isTimingMode()) {
+        // if we switched to timing mode, kick things into action,
+        // and behave as if we restored from a checkpoint
+        startup();
+        dram->startup();
+    } else if (isTimingMode && !system()->isTimingMode()) {
+        // if we switch from timing mode, stop the refresh events to
+        // not cause issues with KVM
+        if (dram)
+            dram->suspend();
+    }
+
+    // update the mode
+    isTimingMode = system()->isTimingMode();
+}
+
+MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
+    : QueuedSlavePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
+      ctrl(_ctrl)
+{ }
+
+AddrRangeList
+MemCtrl::MemoryPort::getAddrRanges() const
+{
+    AddrRangeList ranges;
+    if (ctrl.dram) {
+        DPRINTF(DRAM, "Pushing DRAM ranges to port\n");
+        ranges.push_back(ctrl.dram->getAddrRange());
+    }
+    if (ctrl.nvm) {
+        DPRINTF(NVM, "Pushing NVM ranges to port\n");
+        ranges.push_back(ctrl.nvm->getAddrRange());
+    }
+    return ranges;
+}
+
+void
+MemCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
+{
+    pkt->pushLabel(ctrl.name());
+
+    if (!queue.trySatisfyFunctional(pkt)) {
+        // Default implementation of SimpleTimingPort::recvFunctional()
+        // calls recvAtomic() and throws away the latency; we can save a
+        // little here by just not calculating the latency.
+        ctrl.recvFunctional(pkt);
+    }
+
+    pkt->popLabel();
+}
+
+Tick
+MemCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
+{
+    return ctrl.recvAtomic(pkt);
+}
+
+bool
+MemCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
+{
+    // pass it to the memory controller
+    return ctrl.recvTimingReq(pkt);
+}
+
+MemCtrl*
+MemCtrlParams::create()
+{
+    return new MemCtrl(this);
+}
diff --git a/src/mem/mem_ctrl.hh b/src/mem/mem_ctrl.hh
new file mode 100644
index 0000000..834cb5c
--- /dev/null
+++ b/src/mem/mem_ctrl.hh
@@ -0,0 +1,709 @@
+/*
+ * Copyright (c) 2012-2020 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder.  You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2013 Amin Farmahini-Farahani
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * MemCtrl declaration
+ */
+
+#ifndef __MEM_CTRL_HH__
+#define __MEM_CTRL_HH__
+
+#include <deque>
+#include <string>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+#include "base/callback.hh"
+#include "base/statistics.hh"
+#include "enums/MemSched.hh"
+#include "mem/qos/mem_ctrl.hh"
+#include "mem/qport.hh"
+#include "params/MemCtrl.hh"
+#include "sim/eventq.hh"
+
+class DRAMInterface;
+class NVMInterface;
+
+/**
+ * A burst helper helps organize and manage a packet that is larger than
+ * the memory burst size. A system packet that is larger than the burst size
+ * is split into multiple packets and all those packets point to
+ * a single burst helper such that we know when the whole packet is served.
+ */
+class BurstHelper
+{
+  public:
+
+    /** Number of bursts requred for a system packet **/
+    const unsigned int burstCount;
+
+    /** Number of bursts serviced so far for a system packet **/
+    unsigned int burstsServiced;
+
+    BurstHelper(unsigned int _burstCount)
+        : burstCount(_burstCount), burstsServiced(0)
+    { }
+};
+
+/**
+ * A memory packet stores packets along with the timestamp of when
+ * the packet entered the queue, and also the decoded address.
+ */
+class MemPacket
+{
+  public:
+
+    /** When did request enter the controller */
+    const Tick entryTime;
+
+    /** When will request leave the controller */
+    Tick readyTime;
+
+    /** This comes from the outside world */
+    const PacketPtr pkt;
+
+    /** MasterID associated with the packet */
+    const MasterID _masterId;
+
+    const bool read;
+
+    /** Does this packet access DRAM?*/
+    const bool dram;
+
+    /** Will be populated by address decoder */
+    const uint8_t rank;
+    const uint8_t bank;
+    const uint32_t row;
+
+    /**
+     * Bank id is calculated considering banks in all the ranks
+     * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and
+     * bankId = 8 --> rank1, bank0
+     */
+    const uint16_t bankId;
+
+    /**
+     * The starting address of the packet.
+     * This address could be unaligned to burst size boundaries. The
+     * reason is to keep the address offset so we can accurately check
+     * incoming read packets with packets in the write queue.
+     */
+    Addr addr;
+
+    /**
+     * The size of this dram packet in bytes
+     * It is always equal or smaller than the burst size
+     */
+    unsigned int size;
+
+    /**
+     * A pointer to the BurstHelper if this MemPacket is a split packet
+     * If not a split packet (common case), this is set to NULL
+     */
+    BurstHelper* burstHelper;
+
+    /**
+     * QoS value of the encapsulated packet read at queuing time
+     */
+    uint8_t _qosValue;
+
+    /**
+     * Set the packet QoS value
+     * (interface compatibility with Packet)
+     */
+    inline void qosValue(const uint8_t qv) { _qosValue = qv; }
+
+    /**
+     * Get the packet QoS value
+     * (interface compatibility with Packet)
+     */
+    inline uint8_t qosValue() const { return _qosValue; }
+
+    /**
+     * Get the packet MasterID
+     * (interface compatibility with Packet)
+     */
+    inline MasterID masterId() const { return _masterId; }
+
+    /**
+     * Get the packet size
+     * (interface compatibility with Packet)
+     */
+    inline unsigned int getSize() const { return size; }
+
+    /**
+     * Get the packet address
+     * (interface compatibility with Packet)
+     */
+    inline Addr getAddr() const { return addr; }
+
+    /**
+     * Return true if its a read packet
+     * (interface compatibility with Packet)
+     */
+    inline bool isRead() const { return read; }
+
+    /**
+     * Return true if its a write packet
+     * (interface compatibility with Packet)
+     */
+    inline bool isWrite() const { return !read; }
+
+    /**
+     * Return true if its a DRAM access
+     */
+    inline bool isDram() const { return dram; }
+
+    MemPacket(PacketPtr _pkt, bool is_read, bool is_dram, uint8_t _rank,
+               uint8_t _bank, uint32_t _row, uint16_t bank_id, Addr _addr,
+               unsigned int _size)
+        : entryTime(curTick()), readyTime(curTick()), pkt(_pkt),
+          _masterId(pkt->masterId()),
+          read(is_read), dram(is_dram), rank(_rank), bank(_bank), row(_row),
+          bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
+          _qosValue(_pkt->qosValue())
+    { }
+
+};
+
+// The memory packets are store in a multiple dequeue structure,
+// based on their QoS priority
+typedef std::deque<MemPacket*> MemPacketQueue;
+
+
+/**
+ * The memory controller is a single-channel memory controller capturing
+ * the most important timing constraints associated with a
+ * contemporary controller. For multi-channel memory systems, the controller
+ * is combined with a crossbar model, with the channel address
+ * interleaving taking part in the crossbar.
+ *
+ * As a basic design principle, this controller
+ * model is not cycle callable, but instead uses events to: 1) decide
+ * when new decisions can be made, 2) when resources become available,
+ * 3) when things are to be considered done, and 4) when to send
+ * things back. The controller interfaces to media specific interfaces
+ * to enable flexible topoloties.
+ * Through these simple principles, the model delivers
+ * high performance, and lots of flexibility, allowing users to
+ * evaluate the system impact of a wide range of memory technologies.
+ *
+ * For more details, please see Hansson et al, "Simulating DRAM
+ * controllers for future system architecture exploration",
+ * Proc. ISPASS, 2014. If you use this model as part of your research
+ * please cite the paper.
+ *
+ */
+class MemCtrl : public QoS::MemCtrl
+{
+  private:
+
+    // For now, make use of a queued slave port to avoid dealing with
+    // flow control for the responses being sent back
+    class MemoryPort : public QueuedSlavePort
+    {
+
+        RespPacketQueue queue;
+        MemCtrl& ctrl;
+
+      public:
+
+        MemoryPort(const std::string& name, MemCtrl& _ctrl);
+
+      protected:
+
+        Tick recvAtomic(PacketPtr pkt);
+
+        void recvFunctional(PacketPtr pkt);
+
+        bool recvTimingReq(PacketPtr);
+
+        virtual AddrRangeList getAddrRanges() const;
+
+    };
+
+    /**
+     * Our incoming port, for a multi-ported controller add a crossbar
+     * in front of it
+     */
+    MemoryPort port;
+
+    /**
+     * Remember if the memory system is in timing mode
+     */
+    bool isTimingMode;
+
+    /**
+     * Remember if we have to retry a request when available.
+     */
+    bool retryRdReq;
+    bool retryWrReq;
+
+    /**
+     * Bunch of things requires to setup "events" in gem5
+     * When event "respondEvent" occurs for example, the method
+     * processRespondEvent is called; no parameters are allowed
+     * in these methods
+     */
+    void processNextReqEvent();
+    EventFunctionWrapper nextReqEvent;
+
+    void processRespondEvent();
+    EventFunctionWrapper respondEvent;
+
+    /**
+     * Check if the read queue has room for more entries
+     *
+     * @param pkt_count The number of entries needed in the read queue
+     * @return true if read queue is full, false otherwise
+     */
+    bool readQueueFull(unsigned int pkt_count) const;
+
+    /**
+     * Check if the write queue has room for more entries
+     *
+     * @param pkt_count The number of entries needed in the write queue
+     * @return true if write queue is full, false otherwise
+     */
+    bool writeQueueFull(unsigned int pkt_count) const;
+
+    /**
+     * When a new read comes in, first check if the write q has a
+     * pending request to the same address.\ If not, decode the
+     * address to populate rank/bank/row, create one or mutliple
+     * "mem_pkt", and push them to the back of the read queue.\
+     * If this is the only
+     * read request in the system, schedule an event to start
+     * servicing it.
+     *
+     * @param pkt The request packet from the outside world
+     * @param pkt_count The number of memory bursts the pkt
+     * @param is_dram Does this packet access DRAM?
+     * translate to. If pkt size is larger then one full burst,
+     * then pkt_count is greater than one.
+     */
+    void addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram);
+
+    /**
+     * Decode the incoming pkt, create a mem_pkt and push to the
+     * back of the write queue. \If the write q length is more than
+     * the threshold specified by the user, ie the queue is beginning
+     * to get full, stop reads, and start draining writes.
+     *
+     * @param pkt The request packet from the outside world
+     * @param pkt_count The number of memory bursts the pkt
+     * @param is_dram Does this packet access DRAM?
+     * translate to. If pkt size is larger then one full burst,
+     * then pkt_count is greater than one.
+     */
+    void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram);
+
+    /**
+     * Actually do the burst based on media specific access function.
+     * Update bus statistics when complete.
+     *
+     * @param mem_pkt The memory packet created from the outside world pkt
+     */
+    void doBurstAccess(MemPacket* mem_pkt);
+
+    /**
+     * When a packet reaches its "readyTime" in the response Q,
+     * use the "access()" method in AbstractMemory to actually
+     * create the response packet, and send it back to the outside
+     * world requestor.
+     *
+     * @param pkt The packet from the outside world
+     * @param static_latency Static latency to add before sending the packet
+     */
+    void accessAndRespond(PacketPtr pkt, Tick static_latency);
+
+    /**
+     * Determine if there is a packet that can issue.
+     *
+     * @param pkt The packet to evaluate
+     */
+    bool packetReady(MemPacket* pkt);
+
+    /**
+     * Calculate the minimum delay used when scheduling a read-to-write
+     * transision.
+     * @param return minimum delay
+     */
+    Tick minReadToWriteDataGap();
+
+    /**
+     * Calculate the minimum delay used when scheduling a write-to-read
+     * transision.
+     * @param return minimum delay
+     */
+    Tick minWriteToReadDataGap();
+
+    /**
+     * The memory schduler/arbiter - picks which request needs to
+     * go next, based on the specified policy such as FCFS or FR-FCFS
+     * and moves it to the head of the queue.
+     * Prioritizes accesses to the same rank as previous burst unless
+     * controller is switching command type.
+     *
+     * @param queue Queued requests to consider
+     * @param extra_col_delay Any extra delay due to a read/write switch
+     * @return an iterator to the selected packet, else queue.end()
+     */
+    MemPacketQueue::iterator chooseNext(MemPacketQueue& queue,
+        Tick extra_col_delay);
+
+    /**
+     * For FR-FCFS policy reorder the read/write queue depending on row buffer
+     * hits and earliest bursts available in memory
+     *
+     * @param queue Queued requests to consider
+     * @param extra_col_delay Any extra delay due to a read/write switch
+     * @return an iterator to the selected packet, else queue.end()
+     */
+    MemPacketQueue::iterator chooseNextFRFCFS(MemPacketQueue& queue,
+            Tick extra_col_delay);
+
+    /**
+     * Calculate burst window aligned tick
+     *
+     * @param cmd_tick Initial tick of command
+     * @return burst window aligned tick
+     */
+    Tick getBurstWindow(Tick cmd_tick);
+
+    /**
+     * Used for debugging to observe the contents of the queues.
+     */
+    void printQs() const;
+
+    /**
+     * Burst-align an address.
+     *
+     * @param addr The potentially unaligned address
+     * @param is_dram Does this packet access DRAM?
+     *
+     * @return An address aligned to a memory burst
+     */
+    Addr burstAlign(Addr addr, bool is_dram) const;
+
+    /**
+     * The controller's main read and write queues,
+     * with support for QoS reordering
+     */
+    std::vector<MemPacketQueue> readQueue;
+    std::vector<MemPacketQueue> writeQueue;
+
+    /**
+     * To avoid iterating over the write queue to check for
+     * overlapping transactions, maintain a set of burst addresses
+     * that are currently queued. Since we merge writes to the same
+     * location we never have more than one address to the same burst
+     * address.
+     */
+    std::unordered_set<Addr> isInWriteQueue;
+
+    /**
+     * Response queue where read packets wait after we're done working
+     * with them, but it's not time to send the response yet. The
+     * responses are stored separately mostly to keep the code clean
+     * and help with events scheduling. For all logical purposes such
+     * as sizing the read queue, this and the main read queue need to
+     * be added together.
+     */
+    std::deque<MemPacket*> respQueue;
+
+    /**
+     * Holds count of commands issued in burst window starting at
+     * defined Tick. This is used to ensure that the command bandwidth
+     * does not exceed the allowable media constraints.
+     */
+    std::unordered_multiset<Tick> burstTicks;
+
+    /**
+     * Create pointer to interface of the actual dram media when connected
+     */
+    DRAMInterface* const dram;
+
+    /**
+     * Create pointer to interface of the actual nvm media when connected
+     */
+    NVMInterface* const nvm;
+
+    /**
+     * The following are basic design parameters of the memory
+     * controller, and are initialized based on parameter values.
+     * The rowsPerBank is determined based on the capacity, number of
+     * ranks and banks, the burst size, and the row buffer size.
+     */
+    const uint32_t readBufferSize;
+    const uint32_t writeBufferSize;
+    const uint32_t writeHighThreshold;
+    const uint32_t writeLowThreshold;
+    const uint32_t minWritesPerSwitch;
+    uint32_t writesThisTime;
+    uint32_t readsThisTime;
+
+    /**
+     * Memory controller configuration initialized based on parameter
+     * values.
+     */
+    Enums::MemSched memSchedPolicy;
+
+    /**
+     * Pipeline latency of the controller frontend. The frontend
+     * contribution is added to writes (that complete when they are in
+     * the write buffer) and reads that are serviced the write buffer.
+     */
+    const Tick frontendLatency;
+
+    /**
+     * Pipeline latency of the backend and PHY. Along with the
+     * frontend contribution, this latency is added to reads serviced
+     * by the memory.
+     */
+    const Tick backendLatency;
+
+    /**
+     * Length of a command window, used to check
+     * command bandwidth
+     */
+    const Tick commandWindow;
+
+    /**
+     * Till when must we wait before issuing next RD/WR burst?
+     */
+    Tick nextBurstAt;
+
+    Tick prevArrival;
+
+    /**
+     * The soonest you have to start thinking about the next request
+     * is the longest access time that can occur before
+     * nextBurstAt. Assuming you need to precharge, open a new row,
+     * and access, it is tRP + tRCD + tCL.
+     */
+    Tick nextReqTime;
+
+    struct CtrlStats : public Stats::Group
+    {
+        CtrlStats(MemCtrl &ctrl);
+
+        void regStats() override;
+
+        MemCtrl &ctrl;
+
+        // All statistics that the model needs to capture
+        Stats::Scalar readReqs;
+        Stats::Scalar writeReqs;
+        Stats::Scalar readBursts;
+        Stats::Scalar writeBursts;
+        Stats::Scalar servicedByWrQ;
+        Stats::Scalar mergedWrBursts;
+        Stats::Scalar neitherReadNorWriteReqs;
+        // Average queue lengths
+        Stats::Average avgRdQLen;
+        Stats::Average avgWrQLen;
+
+        Stats::Scalar numRdRetry;
+        Stats::Scalar numWrRetry;
+        Stats::Vector readPktSize;
+        Stats::Vector writePktSize;
+        Stats::Vector rdQLenPdf;
+        Stats::Vector wrQLenPdf;
+        Stats::Histogram rdPerTurnAround;
+        Stats::Histogram wrPerTurnAround;
+
+        Stats::Scalar bytesReadWrQ;
+        Stats::Scalar bytesReadSys;
+        Stats::Scalar bytesWrittenSys;
+        // Average bandwidth
+        Stats::Formula avgRdBWSys;
+        Stats::Formula avgWrBWSys;
+
+        Stats::Scalar totGap;
+        Stats::Formula avgGap;
+
+        // per-master bytes read and written to memory
+        Stats::Vector masterReadBytes;
+        Stats::Vector masterWriteBytes;
+
+        // per-master bytes read and written to memory rate
+        Stats::Formula masterReadRate;
+        Stats::Formula masterWriteRate;
+
+        // per-master read and write serviced memory accesses
+        Stats::Vector masterReadAccesses;
+        Stats::Vector masterWriteAccesses;
+
+        // per-master read and write total memory access latency
+        Stats::Vector masterReadTotalLat;
+        Stats::Vector masterWriteTotalLat;
+
+        // per-master raed and write average memory access latency
+        Stats::Formula masterReadAvgLat;
+        Stats::Formula masterWriteAvgLat;
+    };
+
+    CtrlStats stats;
+
+    /**
+     * Upstream caches need this packet until true is returned, so
+     * hold it for deletion until a subsequent call
+     */
+    std::unique_ptr<Packet> pendingDelete;
+
+    /**
+     * Select either the read or write queue
+     *
+     * @param is_read The current burst is a read, select read queue
+     * @return a reference to the appropriate queue
+     */
+    std::vector<MemPacketQueue>& selQueue(bool is_read)
+    {
+        return (is_read ? readQueue : writeQueue);
+    };
+
+    /**
+     * Remove commands that have already issued from burstTicks
+     */
+    void pruneBurstTick();
+
+  public:
+
+    MemCtrl(const MemCtrlParams* p);
+
+    /**
+     * Ensure that all interfaced have drained commands
+     *
+     * @return bool flag, set once drain complete
+     */
+    bool allIntfDrained() const;
+
+    DrainState drain() override;
+
+    /**
+     * Check for command bus contention for single cycle command.
+     * If there is contention, shift command to next burst.
+     * Check verifies that the commands issued per burst is less
+     * than a defined max number, maxCommandsPerWindow.
+     * Therefore, contention per cycle is not verified and instead
+     * is done based on a burst window.
+     *
+     * @param cmd_tick Initial tick of command, to be verified
+     * @param max_cmds_per_burst Number of commands that can issue
+     *                           in a burst window
+     * @return tick for command issue without contention
+     */
+    Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst);
+
+    /**
+     * Check for command bus contention for multi-cycle (2 currently)
+     * command. If there is contention, shift command(s) to next burst.
+     * Check verifies that the commands issued per burst is less
+     * than a defined max number, maxCommandsPerWindow.
+     * Therefore, contention per cycle is not verified and instead
+     * is done based on a burst window.
+     *
+     * @param cmd_tick Initial tick of command, to be verified
+     * @param max_multi_cmd_split Maximum delay between commands
+     * @param max_cmds_per_burst Number of commands that can issue
+     *                           in a burst window
+     * @return tick for command issue without contention
+     */
+    Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
+                        Tick max_multi_cmd_split = 0);
+
+    /**
+     * Is there a respondEvent scheduled?
+     *
+     * @return true if event is scheduled
+     */
+    bool respondEventScheduled() const { return respondEvent.scheduled(); }
+
+    /**
+     * Is there a read/write burst Event scheduled?
+     *
+     * @return true if event is scheduled
+     */
+    bool requestEventScheduled() const { return nextReqEvent.scheduled(); }
+
+    /**
+     * restart the controller
+     * This can be used by interfaces to restart the
+     * scheduler after maintainence commands complete
+     *
+     * @param Tick to schedule next event
+     */
+    void restartScheduler(Tick tick) { schedule(nextReqEvent, tick); }
+
+    /**
+     * Check the current direction of the memory channel
+     *
+     * @param next_state Check either the current or next bus state
+     * @return True when bus is currently in a read state
+     */
+    bool inReadBusState(bool next_state) const;
+
+    /**
+     * Check the current direction of the memory channel
+     *
+     * @param next_state Check either the current or next bus state
+     * @return True when bus is currently in a write state
+     */
+    bool inWriteBusState(bool next_state) const;
+
+    Port &getPort(const std::string &if_name,
+                  PortID idx=InvalidPortID) override;
+
+    virtual void init() override;
+    virtual void startup() override;
+    virtual void drainResume() override;
+
+  protected:
+
+    Tick recvAtomic(PacketPtr pkt);
+    void recvFunctional(PacketPtr pkt);
+    bool recvTimingReq(PacketPtr pkt);
+
+};
+
+#endif //__MEM_CTRL_HH__
diff --git a/src/mem/dram_ctrl.cc b/src/mem/mem_interface.cc
similarity index 65%
rename from src/mem/dram_ctrl.cc
rename to src/mem/mem_interface.cc
index 55451a2..7817c4a 100644
--- a/src/mem/dram_ctrl.cc
+++ b/src/mem/mem_interface.cc
@@ -38,147 +38,48 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "mem/dram_ctrl.hh"
+#include "mem/mem_interface.hh"
 
 #include "base/bitfield.hh"
 #include "base/trace.hh"
 #include "debug/DRAM.hh"
 #include "debug/DRAMPower.hh"
 #include "debug/DRAMState.hh"
-#include "debug/Drain.hh"
 #include "debug/NVM.hh"
-#include "debug/QOS.hh"
-#include "params/DRAMInterface.hh"
-#include "params/NVMInterface.hh"
 #include "sim/system.hh"
 
 using namespace std;
 using namespace Data;
 
-DRAMCtrl::DRAMCtrl(const DRAMCtrlParams* p) :
-    QoS::MemCtrl(p),
-    port(name() + ".port", *this), isTimingMode(false),
-    retryRdReq(false), retryWrReq(false),
-    nextReqEvent([this]{ processNextReqEvent(); }, name()),
-    respondEvent([this]{ processRespondEvent(); }, name()),
-    dram(p->dram), nvm(p->nvm),
-    readBufferSize((dram ? dram->readBufferSize : 0) +
-                   (nvm ? nvm->readBufferSize : 0)),
-    writeBufferSize((dram ? dram->writeBufferSize : 0) +
-                    (nvm ? nvm->writeBufferSize : 0)),
-    writeHighThreshold(writeBufferSize * p->write_high_thresh_perc / 100.0),
-    writeLowThreshold(writeBufferSize * p->write_low_thresh_perc / 100.0),
-    minWritesPerSwitch(p->min_writes_per_switch),
-    writesThisTime(0), readsThisTime(0),
-    memSchedPolicy(p->mem_sched_policy),
-    frontendLatency(p->static_frontend_latency),
-    backendLatency(p->static_backend_latency),
-    commandWindow(p->command_window),
-    nextBurstAt(0), prevArrival(0),
-    nextReqTime(0),
-    stats(*this)
-{
-    DPRINTF(DRAM, "Setting up controller\n");
-    readQueue.resize(p->qos_priorities);
-    writeQueue.resize(p->qos_priorities);
-
-    // Hook up interfaces to the controller
-    if (dram)
-        dram->setCtrl(this, commandWindow);
-    if (nvm)
-        nvm->setCtrl(this, commandWindow);
-
-    fatal_if(!dram && !nvm, "Memory controller must have an interface");
-
-    // perform a basic check of the write thresholds
-    if (p->write_low_thresh_perc >= p->write_high_thresh_perc)
-        fatal("Write buffer low threshold %d must be smaller than the "
-              "high threshold %d\n", p->write_low_thresh_perc,
-              p->write_high_thresh_perc);
-}
+MemInterface::MemInterface(const MemInterfaceParams* _p)
+    : AbstractMemory(_p),
+      addrMapping(_p->addr_mapping),
+      burstSize((_p->devices_per_rank * _p->burst_length *
+                 _p->device_bus_width) / 8),
+      deviceSize(_p->device_size),
+      deviceRowBufferSize(_p->device_rowbuffer_size),
+      devicesPerRank(_p->devices_per_rank),
+      rowBufferSize(devicesPerRank * deviceRowBufferSize),
+      burstsPerRowBuffer(rowBufferSize / burstSize),
+      burstsPerStripe(range.interleaved() ?
+                      range.granularity() / burstSize : 1),
+      ranksPerChannel(_p->ranks_per_channel),
+      banksPerRank(_p->banks_per_rank), rowsPerBank(0),
+      tCK(_p->tCK), tCS(_p->tCS), tBURST(_p->tBURST),
+      tRTW(_p->tRTW),
+      tWTR(_p->tWTR),
+      readBufferSize(_p->read_buffer_size),
+      writeBufferSize(_p->write_buffer_size)
+{}
 
 void
-DRAMCtrl::init()
+MemInterface::setCtrl(MemCtrl* _ctrl, unsigned int command_window)
 {
-   if (!port.isConnected()) {
-        fatal("DRAMCtrl %s is unconnected!\n", name());
-    } else {
-        port.sendRangeChange();
-    }
+    ctrl = _ctrl;
+    maxCommandsPerWindow = command_window / tCK;
 }
 
-void
-DRAMCtrl::startup()
-{
-    // remember the memory system mode of operation
-    isTimingMode = system()->isTimingMode();
-
-    if (isTimingMode) {
-        // shift the bus busy time sufficiently far ahead that we never
-        // have to worry about negative values when computing the time for
-        // the next request, this will add an insignificant bubble at the
-        // start of simulation
-        nextBurstAt = curTick() + (dram ? dram->commandOffset() :
-                                          nvm->commandOffset());
-    }
-}
-
-Tick
-DRAMCtrl::recvAtomic(PacketPtr pkt)
-{
-    DPRINTF(DRAM, "recvAtomic: %s 0x%x\n", pkt->cmdString(), pkt->getAddr());
-
-    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
-             "is responding");
-
-    Tick latency = 0;
-    // do the actual memory access and turn the packet into a response
-    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
-        dram->access(pkt);
-
-        if (pkt->hasData()) {
-            // this value is not supposed to be accurate, just enough to
-            // keep things going, mimic a closed page
-            latency = dram->accessLatency();
-        }
-    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
-        nvm->access(pkt);
-
-        if (pkt->hasData()) {
-            // this value is not supposed to be accurate, just enough to
-            // keep things going, mimic a closed page
-            latency = nvm->accessLatency();
-        }
-    } else {
-        panic("Can't handle address range for packet %s\n",
-              pkt->print());
-    }
-
-    return latency;
-}
-
-bool
-DRAMCtrl::readQueueFull(unsigned int neededEntries) const
-{
-    DPRINTF(DRAM, "Read queue limit %d, current size %d, entries needed %d\n",
-            readBufferSize, totalReadQueueSize + respQueue.size(),
-            neededEntries);
-
-    auto rdsize_new = totalReadQueueSize + respQueue.size() + neededEntries;
-    return rdsize_new > readBufferSize;
-}
-
-bool
-DRAMCtrl::writeQueueFull(unsigned int neededEntries) const
-{
-    DPRINTF(DRAM, "Write queue limit %d, current size %d, entries needed %d\n",
-            writeBufferSize, totalWriteQueueSize, neededEntries);
-
-    auto wrsize_new = (totalWriteQueueSize + neededEntries);
-    return  wrsize_new > writeBufferSize;
-}
-
-DRAMPacket*
+MemPacket*
 MemInterface::decodePacket(const PacketPtr pkt, Addr pkt_addr,
                        unsigned size, bool is_read, bool is_dram)
 {
@@ -260,465 +161,12 @@
     // later
     uint16_t bank_id = banksPerRank * rank + bank;
 
-    return new DRAMPacket(pkt, is_read, is_dram, rank, bank, row, bank_id,
+    return new MemPacket(pkt, is_read, is_dram, rank, bank, row, bank_id,
                    pkt_addr, size);
 }
 
-void
-DRAMCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
-{
-    // only add to the read queue here. whenever the request is
-    // eventually done, set the readyTime, and call schedule()
-    assert(!pkt->isWrite());
-
-    assert(pkt_count != 0);
-
-    // if the request size is larger than burst size, the pkt is split into
-    // multiple packets
-    // Note if the pkt starting address is not aligened to burst size, the
-    // address of first packet is kept unaliged. Subsequent packets
-    // are aligned to burst size boundaries. This is to ensure we accurately
-    // check read packets against packets in write queue.
-    const Addr base_addr = pkt->getAddr();
-    Addr addr = base_addr;
-    unsigned pktsServicedByWrQ = 0;
-    BurstHelper* burst_helper = NULL;
-
-    uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
-                                   nvm->bytesPerBurst();
-    for (int cnt = 0; cnt < pkt_count; ++cnt) {
-        unsigned size = std::min((addr | (burst_size - 1)) + 1,
-                        base_addr + pkt->getSize()) - addr;
-        stats.readPktSize[ceilLog2(size)]++;
-        stats.readBursts++;
-        stats.masterReadAccesses[pkt->masterId()]++;
-
-        // First check write buffer to see if the data is already at
-        // the controller
-        bool foundInWrQ = false;
-        Addr burst_addr = burstAlign(addr, is_dram);
-        // if the burst address is not present then there is no need
-        // looking any further
-        if (isInWriteQueue.find(burst_addr) != isInWriteQueue.end()) {
-            for (const auto& vec : writeQueue) {
-                for (const auto& p : vec) {
-                    // check if the read is subsumed in the write queue
-                    // packet we are looking at
-                    if (p->addr <= addr &&
-                       ((addr + size) <= (p->addr + p->size))) {
-
-                        foundInWrQ = true;
-                        stats.servicedByWrQ++;
-                        pktsServicedByWrQ++;
-                        DPRINTF(DRAM,
-                                "Read to addr %lld with size %d serviced by "
-                                "write queue\n",
-                                addr, size);
-                        stats.bytesReadWrQ += burst_size;
-                        break;
-                    }
-                }
-            }
-        }
-
-        // If not found in the write q, make a DRAM packet and
-        // push it onto the read queue
-        if (!foundInWrQ) {
-
-            // Make the burst helper for split packets
-            if (pkt_count > 1 && burst_helper == NULL) {
-                DPRINTF(DRAM, "Read to addr %lld translates to %d "
-                        "memory requests\n", pkt->getAddr(), pkt_count);
-                burst_helper = new BurstHelper(pkt_count);
-            }
-
-            DRAMPacket* dram_pkt;
-            if (is_dram) {
-                dram_pkt = dram->decodePacket(pkt, addr, size, true, true);
-                // increment read entries of the rank
-                dram->setupRank(dram_pkt->rank, true);
-            } else {
-                dram_pkt = nvm->decodePacket(pkt, addr, size, true, false);
-                // Increment count to trigger issue of non-deterministic read
-                nvm->setupRank(dram_pkt->rank, true);
-                // Default readyTime to Max; will be reset once read is issued
-                dram_pkt->readyTime = MaxTick;
-            }
-            dram_pkt->burstHelper = burst_helper;
-
-            assert(!readQueueFull(1));
-            stats.rdQLenPdf[totalReadQueueSize + respQueue.size()]++;
-
-            DPRINTF(DRAM, "Adding to read queue\n");
-
-            readQueue[dram_pkt->qosValue()].push_back(dram_pkt);
-
-            // log packet
-            logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
-                       dram_pkt->addr, 1);
-
-            // Update stats
-            stats.avgRdQLen = totalReadQueueSize + respQueue.size();
-        }
-
-        // Starting address of next memory pkt (aligned to burst boundary)
-        addr = (addr | (burst_size - 1)) + 1;
-    }
-
-    // If all packets are serviced by write queue, we send the repsonse back
-    if (pktsServicedByWrQ == pkt_count) {
-        accessAndRespond(pkt, frontendLatency);
-        return;
-    }
-
-    // Update how many split packets are serviced by write queue
-    if (burst_helper != NULL)
-        burst_helper->burstsServiced = pktsServicedByWrQ;
-
-    // If we are not already scheduled to get a request out of the
-    // queue, do so now
-    if (!nextReqEvent.scheduled()) {
-        DPRINTF(DRAM, "Request scheduled immediately\n");
-        schedule(nextReqEvent, curTick());
-    }
-}
-
-void
-DRAMCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram)
-{
-    // only add to the write queue here. whenever the request is
-    // eventually done, set the readyTime, and call schedule()
-    assert(pkt->isWrite());
-
-    // if the request size is larger than burst size, the pkt is split into
-    // multiple packets
-    const Addr base_addr = pkt->getAddr();
-    Addr addr = base_addr;
-    uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
-                                   nvm->bytesPerBurst();
-    for (int cnt = 0; cnt < pkt_count; ++cnt) {
-        unsigned size = std::min((addr | (burst_size - 1)) + 1,
-                        base_addr + pkt->getSize()) - addr;
-        stats.writePktSize[ceilLog2(size)]++;
-        stats.writeBursts++;
-        stats.masterWriteAccesses[pkt->masterId()]++;
-
-        // see if we can merge with an existing item in the write
-        // queue and keep track of whether we have merged or not
-        bool merged = isInWriteQueue.find(burstAlign(addr, is_dram)) !=
-            isInWriteQueue.end();
-
-        // if the item was not merged we need to create a new write
-        // and enqueue it
-        if (!merged) {
-            DRAMPacket* dram_pkt;
-            if (is_dram) {
-                dram_pkt = dram->decodePacket(pkt, addr, size, false, true);
-                dram->setupRank(dram_pkt->rank, false);
-            } else {
-                dram_pkt = nvm->decodePacket(pkt, addr, size, false, false);
-                nvm->setupRank(dram_pkt->rank, false);
-            }
-            assert(totalWriteQueueSize < writeBufferSize);
-            stats.wrQLenPdf[totalWriteQueueSize]++;
-
-            DPRINTF(DRAM, "Adding to write queue\n");
-
-            writeQueue[dram_pkt->qosValue()].push_back(dram_pkt);
-            isInWriteQueue.insert(burstAlign(addr, is_dram));
-
-            // log packet
-            logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
-                       dram_pkt->addr, 1);
-
-            assert(totalWriteQueueSize == isInWriteQueue.size());
-
-            // Update stats
-            stats.avgWrQLen = totalWriteQueueSize;
-
-        } else {
-            DPRINTF(DRAM, "Merging write burst with existing queue entry\n");
-
-            // keep track of the fact that this burst effectively
-            // disappeared as it was merged with an existing one
-            stats.mergedWrBursts++;
-        }
-
-        // Starting address of next memory pkt (aligned to burst_size boundary)
-        addr = (addr | (burst_size - 1)) + 1;
-    }
-
-    // we do not wait for the writes to be send to the actual memory,
-    // but instead take responsibility for the consistency here and
-    // snoop the write queue for any upcoming reads
-    // @todo, if a pkt size is larger than burst size, we might need a
-    // different front end latency
-    accessAndRespond(pkt, frontendLatency);
-
-    // If we are not already scheduled to get a request out of the
-    // queue, do so now
-    if (!nextReqEvent.scheduled()) {
-        DPRINTF(DRAM, "Request scheduled immediately\n");
-        schedule(nextReqEvent, curTick());
-    }
-}
-
-void
-DRAMCtrl::printQs() const
-{
-#if TRACING_ON
-    DPRINTF(DRAM, "===READ QUEUE===\n\n");
-    for (const auto& queue : readQueue) {
-        for (const auto& packet : queue) {
-            DPRINTF(DRAM, "Read %lu\n", packet->addr);
-        }
-    }
-
-    DPRINTF(DRAM, "\n===RESP QUEUE===\n\n");
-    for (const auto& packet : respQueue) {
-        DPRINTF(DRAM, "Response %lu\n", packet->addr);
-    }
-
-    DPRINTF(DRAM, "\n===WRITE QUEUE===\n\n");
-    for (const auto& queue : writeQueue) {
-        for (const auto& packet : queue) {
-            DPRINTF(DRAM, "Write %lu\n", packet->addr);
-        }
-    }
-#endif // TRACING_ON
-}
-
-bool
-DRAMCtrl::recvTimingReq(PacketPtr pkt)
-{
-    // This is where we enter from the outside world
-    DPRINTF(DRAM, "recvTimingReq: request %s addr %lld size %d\n",
-            pkt->cmdString(), pkt->getAddr(), pkt->getSize());
-
-    panic_if(pkt->cacheResponding(), "Should not see packets where cache "
-             "is responding");
-
-    panic_if(!(pkt->isRead() || pkt->isWrite()),
-             "Should only see read and writes at memory controller\n");
-
-    // Calc avg gap between requests
-    if (prevArrival != 0) {
-        stats.totGap += curTick() - prevArrival;
-    }
-    prevArrival = curTick();
-
-    // What type of media does this packet access?
-    bool is_dram;
-    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
-        is_dram = true;
-    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
-        is_dram = false;
-    } else {
-        panic("Can't handle address range for packet %s\n",
-              pkt->print());
-    }
-
-
-    // Find out how many memory packets a pkt translates to
-    // If the burst size is equal or larger than the pkt size, then a pkt
-    // translates to only one memory packet. Otherwise, a pkt translates to
-    // multiple memory packets
-    unsigned size = pkt->getSize();
-    uint32_t burst_size = is_dram ? dram->bytesPerBurst() :
-                                  nvm->bytesPerBurst();
-    unsigned offset = pkt->getAddr() & (burst_size - 1);
-    unsigned int pkt_count = divCeil(offset + size, burst_size);
-
-    // run the QoS scheduler and assign a QoS priority value to the packet
-    qosSchedule( { &readQueue, &writeQueue }, burst_size, pkt);
-
-    // check local buffers and do not accept if full
-    if (pkt->isWrite()) {
-        assert(size != 0);
-        if (writeQueueFull(pkt_count)) {
-            DPRINTF(DRAM, "Write queue full, not accepting\n");
-            // remember that we have to retry this port
-            retryWrReq = true;
-            stats.numWrRetry++;
-            return false;
-        } else {
-            addToWriteQueue(pkt, pkt_count, is_dram);
-            stats.writeReqs++;
-            stats.bytesWrittenSys += size;
-        }
-    } else {
-        assert(pkt->isRead());
-        assert(size != 0);
-        if (readQueueFull(pkt_count)) {
-            DPRINTF(DRAM, "Read queue full, not accepting\n");
-            // remember that we have to retry this port
-            retryRdReq = true;
-            stats.numRdRetry++;
-            return false;
-        } else {
-            addToReadQueue(pkt, pkt_count, is_dram);
-            stats.readReqs++;
-            stats.bytesReadSys += size;
-        }
-    }
-
-    return true;
-}
-
-void
-DRAMCtrl::processRespondEvent()
-{
-    DPRINTF(DRAM,
-            "processRespondEvent(): Some req has reached its readyTime\n");
-
-    DRAMPacket* dram_pkt = respQueue.front();
-
-    if (dram_pkt->isDram()) {
-        // media specific checks and functions when read response is complete
-        dram->respondEvent(dram_pkt->rank);
-    }
-
-    if (dram_pkt->burstHelper) {
-        // it is a split packet
-        dram_pkt->burstHelper->burstsServiced++;
-        if (dram_pkt->burstHelper->burstsServiced ==
-            dram_pkt->burstHelper->burstCount) {
-            // we have now serviced all children packets of a system packet
-            // so we can now respond to the requester
-            // @todo we probably want to have a different front end and back
-            // end latency for split packets
-            accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
-            delete dram_pkt->burstHelper;
-            dram_pkt->burstHelper = NULL;
-        }
-    } else {
-        // it is not a split packet
-        accessAndRespond(dram_pkt->pkt, frontendLatency + backendLatency);
-    }
-
-    assert(respQueue.front() == dram_pkt);
-    respQueue.pop_front();
-
-    if (!respQueue.empty()) {
-        assert(respQueue.front()->readyTime >= curTick());
-        assert(!respondEvent.scheduled());
-        schedule(respondEvent, respQueue.front()->readyTime);
-    } else {
-        // if there is nothing left in any queue, signal a drain
-        if (drainState() == DrainState::Draining &&
-            !totalWriteQueueSize && !totalReadQueueSize &&
-            allIntfDrained()) {
-
-            DPRINTF(Drain, "DRAM controller done draining\n");
-            signalDrainDone();
-        } else if (dram_pkt->isDram()) {
-            // check the refresh state and kick the refresh event loop
-            // into action again if banks already closed and just waiting
-            // for read to complete
-            dram->checkRefreshState(dram_pkt->rank);
-        }
-    }
-
-    delete dram_pkt;
-
-    // We have made a location in the queue available at this point,
-    // so if there is a read that was forced to wait, retry now
-    if (retryRdReq) {
-        retryRdReq = false;
-        port.sendRetryReq();
-    }
-}
-
-DRAMPacketQueue::iterator
-DRAMCtrl::chooseNext(DRAMPacketQueue& queue, Tick extra_col_delay)
-{
-    // This method does the arbitration between requests.
-
-    DRAMPacketQueue::iterator ret = queue.end();
-
-    if (!queue.empty()) {
-        if (queue.size() == 1) {
-            // available rank corresponds to state refresh idle
-            DRAMPacket* dram_pkt = *(queue.begin());
-            if (packetReady(dram_pkt)) {
-                ret = queue.begin();
-                DPRINTF(DRAM, "Single request, going to a free rank\n");
-            } else {
-                DPRINTF(DRAM, "Single request, going to a busy rank\n");
-            }
-        } else if (memSchedPolicy == Enums::fcfs) {
-            // check if there is a packet going to a free rank
-            for (auto i = queue.begin(); i != queue.end(); ++i) {
-                DRAMPacket* dram_pkt = *i;
-                if (packetReady(dram_pkt)) {
-                    ret = i;
-                    break;
-                }
-            }
-        } else if (memSchedPolicy == Enums::frfcfs) {
-            ret = chooseNextFRFCFS(queue, extra_col_delay);
-        } else {
-            panic("No scheduling policy chosen\n");
-        }
-    }
-    return ret;
-}
-
-DRAMPacketQueue::iterator
-DRAMCtrl::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick extra_col_delay)
-{
-    auto selected_pkt_it = queue.end();
-    Tick col_allowed_at = MaxTick;
-
-    // time we need to issue a column command to be seamless
-    const Tick min_col_at = std::max(nextBurstAt + extra_col_delay, curTick());
-
-    // find optimal packet for each interface
-    if (dram && nvm) {
-        // create 2nd set of parameters for NVM
-        auto nvm_pkt_it = queue.end();
-        Tick nvm_col_at = MaxTick;
-
-        // Select DRAM packet by default to give priority if both
-        // can issue at the same time or seamlessly
-        std::tie(selected_pkt_it, col_allowed_at) =
-                 dram->chooseNextFRFCFS(queue, min_col_at);
-        if (selected_pkt_it == queue.end()) {
-            DPRINTF(DRAM, "%s no available DRAM ranks found\n", __func__);
-        }
-
-        std::tie(nvm_pkt_it, nvm_col_at) =
-                 nvm->chooseNextFRFCFS(queue, min_col_at);
-        if (nvm_pkt_it == queue.end()) {
-            DPRINTF(NVM, "%s no available NVM ranks found\n", __func__);
-        }
-
-        // Compare DRAM and NVM and select NVM if it can issue
-        // earlier than the DRAM packet
-        if (col_allowed_at > nvm_col_at) {
-            selected_pkt_it = nvm_pkt_it;
-        }
-    } else if (dram) {
-        std::tie(selected_pkt_it, col_allowed_at) =
-                 dram->chooseNextFRFCFS(queue, min_col_at);
-
-        if (selected_pkt_it == queue.end()) {
-            DPRINTF(DRAM, "%s no available DRAM ranks found\n", __func__);
-        }
-    } else if (nvm) {
-        std::tie(selected_pkt_it, col_allowed_at) =
-                 nvm->chooseNextFRFCFS(queue, min_col_at);
-
-        if (selected_pkt_it == queue.end()) {
-            DPRINTF(NVM, "%s no available NVM ranks found\n", __func__);
-        }
-    }
-
-    return selected_pkt_it;
-}
-
-pair<DRAMPacketQueue::iterator, Tick>
-DRAMInterface::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick min_col_at) const
+pair<MemPacketQueue::iterator, Tick>
+DRAMInterface::chooseNextFRFCFS(MemPacketQueue& queue, Tick min_col_at) const
 {
     vector<uint32_t> earliest_banks(ranksPerChannel, 0);
 
@@ -746,7 +194,7 @@
     auto selected_pkt_it = queue.end();
 
     for (auto i = queue.begin(); i != queue.end() ; ++i) {
-        DRAMPacket* pkt = *i;
+        MemPacket* pkt = *i;
 
         // select optimal DRAM packet in Q
         if (pkt->isDram()) {
@@ -833,157 +281,6 @@
 }
 
 void
-DRAMCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency)
-{
-    DPRINTF(DRAM, "Responding to Address %lld.. \n",pkt->getAddr());
-
-    bool needsResponse = pkt->needsResponse();
-    // do the actual memory access which also turns the packet into a
-    // response
-    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
-        dram->access(pkt);
-    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
-        nvm->access(pkt);
-    } else {
-        panic("Can't handle address range for packet %s\n",
-              pkt->print());
-    }
-
-    // turn packet around to go back to requester if response expected
-    if (needsResponse) {
-        // access already turned the packet into a response
-        assert(pkt->isResponse());
-        // response_time consumes the static latency and is charged also
-        // with headerDelay that takes into account the delay provided by
-        // the xbar and also the payloadDelay that takes into account the
-        // number of data beats.
-        Tick response_time = curTick() + static_latency + pkt->headerDelay +
-                             pkt->payloadDelay;
-        // Here we reset the timing of the packet before sending it out.
-        pkt->headerDelay = pkt->payloadDelay = 0;
-
-        // queue the packet in the response queue to be sent out after
-        // the static latency has passed
-        port.schedTimingResp(pkt, response_time);
-    } else {
-        // @todo the packet is going to be deleted, and the DRAMPacket
-        // is still having a pointer to it
-        pendingDelete.reset(pkt);
-    }
-
-    DPRINTF(DRAM, "Done\n");
-
-    return;
-}
-
-void
-DRAMCtrl::pruneBurstTick()
-{
-    auto it = burstTicks.begin();
-    while (it != burstTicks.end()) {
-        auto current_it = it++;
-        if (curTick() > *current_it) {
-            DPRINTF(DRAM, "Removing burstTick for %d\n", *current_it);
-            burstTicks.erase(current_it);
-        }
-    }
-}
-
-Tick
-DRAMCtrl::getBurstWindow(Tick cmd_tick)
-{
-    // get tick aligned to burst window
-    Tick burst_offset = cmd_tick % commandWindow;
-    return (cmd_tick - burst_offset);
-}
-
-Tick
-DRAMCtrl::verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst)
-{
-    // start with assumption that there is no contention on command bus
-    Tick cmd_at = cmd_tick;
-
-    // get tick aligned to burst window
-    Tick burst_tick = getBurstWindow(cmd_tick);
-
-    // verify that we have command bandwidth to issue the command
-    // if not, iterate over next window(s) until slot found
-    while (burstTicks.count(burst_tick) >= max_cmds_per_burst) {
-        DPRINTF(DRAM, "Contention found on command bus at %d\n", burst_tick);
-        burst_tick += commandWindow;
-        cmd_at = burst_tick;
-    }
-
-    // add command into burst window and return corresponding Tick
-    burstTicks.insert(burst_tick);
-    return cmd_at;
-}
-
-Tick
-DRAMCtrl::verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
-                         Tick max_multi_cmd_split)
-{
-    // start with assumption that there is no contention on command bus
-    Tick cmd_at = cmd_tick;
-
-    // get tick aligned to burst window
-    Tick burst_tick = getBurstWindow(cmd_tick);
-
-    // Command timing requirements are from 2nd command
-    // Start with assumption that 2nd command will issue at cmd_at and
-    // find prior slot for 1st command to issue
-    // Given a maximum latency of max_multi_cmd_split between the commands,
-    // find the burst at the maximum latency prior to cmd_at
-    Tick burst_offset = 0;
-    Tick first_cmd_offset = cmd_tick % commandWindow;
-    while (max_multi_cmd_split > (first_cmd_offset + burst_offset)) {
-        burst_offset += commandWindow;
-    }
-    // get the earliest burst aligned address for first command
-    // ensure that the time does not go negative
-    Tick first_cmd_tick = burst_tick - std::min(burst_offset, burst_tick);
-
-    // Can required commands issue?
-    bool first_can_issue = false;
-    bool second_can_issue = false;
-    // verify that we have command bandwidth to issue the command(s)
-    while (!first_can_issue || !second_can_issue) {
-        bool same_burst = (burst_tick == first_cmd_tick);
-        auto first_cmd_count = burstTicks.count(first_cmd_tick);
-        auto second_cmd_count = same_burst ? first_cmd_count + 1 :
-                                   burstTicks.count(burst_tick);
-
-        first_can_issue = first_cmd_count < max_cmds_per_burst;
-        second_can_issue = second_cmd_count < max_cmds_per_burst;
-
-        if (!second_can_issue) {
-            DPRINTF(DRAM, "Contention (cmd2) found on command bus at %d\n",
-                    burst_tick);
-            burst_tick += commandWindow;
-            cmd_at = burst_tick;
-        }
-
-        // Verify max_multi_cmd_split isn't violated when command 2 is shifted
-        // If commands initially were issued in same burst, they are
-        // now in consecutive bursts and can still issue B2B
-        bool gap_violated = !same_burst &&
-             ((burst_tick - first_cmd_tick) > max_multi_cmd_split);
-
-        if (!first_can_issue || (!second_can_issue && gap_violated)) {
-            DPRINTF(DRAM, "Contention (cmd1) found on command bus at %d\n",
-                    first_cmd_tick);
-            first_cmd_tick += commandWindow;
-        }
-    }
-
-    // Add command to burstTicks
-    burstTicks.insert(burst_tick);
-    burstTicks.insert(first_cmd_tick);
-
-    return cmd_at;
-}
-
-void
 DRAMInterface::activateBank(Rank& rank_ref, Bank& bank_ref,
                        Tick act_tick, uint32_t row)
 {
@@ -1156,14 +453,14 @@
 }
 
 pair<Tick, Tick>
-DRAMInterface::doBurstAccess(DRAMPacket* dram_pkt, Tick next_burst_at,
-                             const std::vector<DRAMPacketQueue>& queue)
+DRAMInterface::doBurstAccess(MemPacket* mem_pkt, Tick next_burst_at,
+                             const std::vector<MemPacketQueue>& queue)
 {
     DPRINTF(DRAM, "Timing access to addr %lld, rank/bank/row %d %d %d\n",
-            dram_pkt->addr, dram_pkt->rank, dram_pkt->bank, dram_pkt->row);
+            mem_pkt->addr, mem_pkt->rank, mem_pkt->bank, mem_pkt->row);
 
     // get the rank
-    Rank& rank_ref = *ranks[dram_pkt->rank];
+    Rank& rank_ref = *ranks[mem_pkt->rank];
 
     assert(rank_ref.inRefIdleState());
 
@@ -1176,13 +473,13 @@
     }
 
     // get the bank
-    Bank& bank_ref = rank_ref.banks[dram_pkt->bank];
+    Bank& bank_ref = rank_ref.banks[mem_pkt->bank];
 
     // for the state we need to track if it is a row hit or not
     bool row_hit = true;
 
     // Determine the access latency and update the bank state
-    if (bank_ref.openRow == dram_pkt->row) {
+    if (bank_ref.openRow == mem_pkt->row) {
         // nothing to do
     } else {
         row_hit = false;
@@ -1198,11 +495,11 @@
 
         // Record the activation and deal with all the global timing
         // constraints caused be a new activation (tRRD and tXAW)
-        activateBank(rank_ref, bank_ref, act_tick, dram_pkt->row);
+        activateBank(rank_ref, bank_ref, act_tick, mem_pkt->row);
     }
 
     // respect any constraints on the command (e.g. tRCD or tCCD)
-    const Tick col_allowed_at = dram_pkt->isRead() ?
+    const Tick col_allowed_at = mem_pkt->isRead() ?
                                 bank_ref.rdAllowedAt : bank_ref.wrAllowedAt;
 
     // we need to wait until the bus is available before we can issue
@@ -1235,7 +532,7 @@
     DPRINTF(DRAM, "Schedule RD/WR burst at tick %d\n", cmd_at);
 
     // update the packet ready time
-    dram_pkt->readyTime = cmd_at + tCL + tBURST;
+    mem_pkt->readyTime = cmd_at + tCL + tBURST;
 
     rank_ref.lastBurstTick = cmd_at;
 
@@ -1245,7 +542,7 @@
     Tick dly_to_wr_cmd;
     for (int j = 0; j < ranksPerChannel; j++) {
         for (int i = 0; i < banksPerRank; i++) {
-            if (dram_pkt->rank == j) {
+            if (mem_pkt->rank == j) {
                 if (bankGroupArch &&
                    (bank_ref.bankgr == ranks[j]->banks[i].bankgr)) {
                     // bank group architecture requires longer delays between
@@ -1253,17 +550,17 @@
                     // tCCD_L is default requirement for same BG timing
                     // tCCD_L_WR is required for write-to-write
                     // Need to also take bus turnaround delays into account
-                    dly_to_rd_cmd = dram_pkt->isRead() ?
+                    dly_to_rd_cmd = mem_pkt->isRead() ?
                                     tCCD_L : std::max(tCCD_L, wrToRdDlySameBG);
-                    dly_to_wr_cmd = dram_pkt->isRead() ?
+                    dly_to_wr_cmd = mem_pkt->isRead() ?
                                     std::max(tCCD_L, rdToWrDlySameBG) :
                                     tCCD_L_WR;
                 } else {
                     // tBURST is default requirement for diff BG timing
                     // Need to also take bus turnaround delays into account
-                    dly_to_rd_cmd = dram_pkt->isRead() ? burst_gap :
+                    dly_to_rd_cmd = mem_pkt->isRead() ? burst_gap :
                                                        writeToReadDelay();
-                    dly_to_wr_cmd = dram_pkt->isRead() ? readToWriteDelay() :
+                    dly_to_wr_cmd = mem_pkt->isRead() ? readToWriteDelay() :
                                                        burst_gap;
                 }
             } else {
@@ -1281,14 +578,14 @@
     }
 
     // Save rank of current access
-    activeRank = dram_pkt->rank;
+    activeRank = mem_pkt->rank;
 
     // If this is a write, we also need to respect the write recovery
     // time before a precharge, in the case of a read, respect the
     // read to precharge constraint
     bank_ref.preAllowedAt = std::max(bank_ref.preAllowedAt,
-                                 dram_pkt->isRead() ? cmd_at + tRTP :
-                                 dram_pkt->readyTime + tWR);
+                                 mem_pkt->isRead() ? cmd_at + tRTP :
+                                 mem_pkt->readyTime + tWR);
 
     // increment the bytes accessed and the accesses per row
     bank_ref.bytesAccessed += burstSize;
@@ -1325,11 +622,11 @@
             // 3) make sure we are not considering the packet that we are
             //    currently dealing with
             while (!got_more_hits && p != queue[i].end()) {
-                if (dram_pkt != (*p)) {
-                    bool same_rank_bank = (dram_pkt->rank == (*p)->rank) &&
-                                          (dram_pkt->bank == (*p)->bank);
+                if (mem_pkt != (*p)) {
+                    bool same_rank_bank = (mem_pkt->rank == (*p)->rank) &&
+                                          (mem_pkt->bank == (*p)->bank);
 
-                    bool same_row = dram_pkt->row == (*p)->row;
+                    bool same_row = mem_pkt->row == (*p)->row;
                     got_more_hits |= same_rank_bank && same_row;
                     got_bank_conflict |= same_rank_bank && !same_row;
                 }
@@ -1349,16 +646,16 @@
     }
 
     // DRAMPower trace command to be written
-    std::string mem_cmd = dram_pkt->isRead() ? "RD" : "WR";
+    std::string mem_cmd = mem_pkt->isRead() ? "RD" : "WR";
 
     // MemCommand required for DRAMPower library
     MemCommand::cmds command = (mem_cmd == "RD") ? MemCommand::RD :
                                                    MemCommand::WR;
 
-    rank_ref.cmdList.push_back(Command(command, dram_pkt->bank, cmd_at));
+    rank_ref.cmdList.push_back(Command(command, mem_pkt->bank, cmd_at));
 
     DPRINTF(DRAMPower, "%llu,%s,%d,%d\n", divCeil(cmd_at, tCK) -
-            timeStampOffset, mem_cmd, dram_pkt->bank, dram_pkt->rank);
+            timeStampOffset, mem_cmd, mem_pkt->bank, mem_pkt->rank);
 
     // if this access should use auto-precharge, then we are
     // closing the row after the read/write burst
@@ -1368,11 +665,11 @@
         prechargeBank(rank_ref, bank_ref, std::max(curTick(),
                       bank_ref.preAllowedAt), true);
 
-        DPRINTF(DRAM, "Auto-precharged bank: %d\n", dram_pkt->bankId);
+        DPRINTF(DRAM, "Auto-precharged bank: %d\n", mem_pkt->bankId);
     }
 
     // Update the stats and schedule the next request
-    if (dram_pkt->isRead()) {
+    if (mem_pkt->isRead()) {
         // Every respQueue which will generate an event, increment count
         ++rank_ref.outstandingEvents;
 
@@ -1380,11 +677,11 @@
         if (row_hit)
             stats.readRowHits++;
         stats.bytesRead += burstSize;
-        stats.perBankRdBursts[dram_pkt->bankId]++;
+        stats.perBankRdBursts[mem_pkt->bankId]++;
 
         // Update latency stats
-        stats.totMemAccLat += dram_pkt->readyTime - dram_pkt->entryTime;
-        stats.totQLat += cmd_at - dram_pkt->entryTime;
+        stats.totMemAccLat += mem_pkt->readyTime - mem_pkt->entryTime;
+        stats.totQLat += cmd_at - mem_pkt->entryTime;
         stats.totBusLat += tBURST;
     } else {
         // Schedule write done event to decrement event count
@@ -1394,12 +691,12 @@
         // the time that writes are outstanding and bus is active
         // to holdoff power-down entry events
         if (!rank_ref.writeDoneEvent.scheduled()) {
-            schedule(rank_ref.writeDoneEvent, dram_pkt->readyTime);
+            schedule(rank_ref.writeDoneEvent, mem_pkt->readyTime);
             // New event, increment count
             ++rank_ref.outstandingEvents;
 
-        } else if (rank_ref.writeDoneEvent.when() < dram_pkt->readyTime) {
-            reschedule(rank_ref.writeDoneEvent, dram_pkt->readyTime);
+        } else if (rank_ref.writeDoneEvent.when() < mem_pkt->readyTime) {
+            reschedule(rank_ref.writeDoneEvent, mem_pkt->readyTime);
         }
         // will remove write from queue when returned to parent function
         // decrement count for DRAM rank
@@ -1409,7 +706,7 @@
         if (row_hit)
             stats.writeRowHits++;
         stats.bytesWritten += burstSize;
-        stats.perBankWrBursts[dram_pkt->bankId]++;
+        stats.perBankWrBursts[mem_pkt->bankId]++;
 
     }
     // Update bus state to reflect when previous command was issued
@@ -1433,399 +730,6 @@
     }
 }
 
-bool
-DRAMCtrl::inReadBusState(bool next_state) const
-{
-    // check the bus state
-    if (next_state) {
-        // use busStateNext to get the state that will be used
-        // for the next burst
-        return (busStateNext == DRAMCtrl::READ);
-    } else {
-        return (busState == DRAMCtrl::READ);
-    }
-}
-
-bool
-DRAMCtrl::inWriteBusState(bool next_state) const
-{
-    // check the bus state
-    if (next_state) {
-        // use busStateNext to get the state that will be used
-        // for the next burst
-        return (busStateNext == DRAMCtrl::WRITE);
-    } else {
-        return (busState == DRAMCtrl::WRITE);
-    }
-}
-
-void
-DRAMCtrl::doBurstAccess(DRAMPacket* dram_pkt)
-{
-    // first clean up the burstTick set, removing old entries
-    // before adding new entries for next burst
-    pruneBurstTick();
-
-    // When was command issued?
-    Tick cmd_at;
-
-    // Issue the next burst and update bus state to reflect
-    // when previous command was issued
-    if (dram_pkt->isDram()) {
-        std::vector<DRAMPacketQueue>& queue = selQueue(dram_pkt->isRead());
-        std::tie(cmd_at, nextBurstAt) =
-                 dram->doBurstAccess(dram_pkt, nextBurstAt, queue);
-
-        // Update timing for NVM ranks if NVM is configured on this channel
-        if (nvm)
-            nvm->addRankToRankDelay(cmd_at);
-
-    } else {
-        std::tie(cmd_at, nextBurstAt) =
-                 nvm->doBurstAccess(dram_pkt, nextBurstAt);
-
-        // Update timing for NVM ranks if NVM is configured on this channel
-        if (dram)
-            dram->addRankToRankDelay(cmd_at);
-
-    }
-
-    DPRINTF(DRAM, "Access to %lld, ready at %lld next burst at %lld.\n",
-            dram_pkt->addr, dram_pkt->readyTime, nextBurstAt);
-
-    // Update the minimum timing between the requests, this is a
-    // conservative estimate of when we have to schedule the next
-    // request to not introduce any unecessary bubbles. In most cases
-    // we will wake up sooner than we have to.
-    nextReqTime = nextBurstAt - (dram ? dram->commandOffset() :
-                                        nvm->commandOffset());
-
-
-    // Update the common bus stats
-    if (dram_pkt->isRead()) {
-        ++readsThisTime;
-        // Update latency stats
-        stats.masterReadTotalLat[dram_pkt->masterId()] +=
-            dram_pkt->readyTime - dram_pkt->entryTime;
-        stats.masterReadBytes[dram_pkt->masterId()] += dram_pkt->size;
-    } else {
-        ++writesThisTime;
-        stats.masterWriteBytes[dram_pkt->masterId()] += dram_pkt->size;
-        stats.masterWriteTotalLat[dram_pkt->masterId()] +=
-            dram_pkt->readyTime - dram_pkt->entryTime;
-    }
-}
-
-void
-DRAMCtrl::processNextReqEvent()
-{
-    // transition is handled by QoS algorithm if enabled
-    if (turnPolicy) {
-        // select bus state - only done if QoS algorithms are in use
-        busStateNext = selectNextBusState();
-    }
-
-    // detect bus state change
-    bool switched_cmd_type = (busState != busStateNext);
-    // record stats
-    recordTurnaroundStats();
-
-    DPRINTF(DRAM, "QoS Turnarounds selected state %s %s\n",
-            (busState==MemCtrl::READ)?"READ":"WRITE",
-            switched_cmd_type?"[turnaround triggered]":"");
-
-    if (switched_cmd_type) {
-        if (busState == MemCtrl::READ) {
-            DPRINTF(DRAM,
-                    "Switching to writes after %d reads with %d reads "
-                    "waiting\n", readsThisTime, totalReadQueueSize);
-            stats.rdPerTurnAround.sample(readsThisTime);
-            readsThisTime = 0;
-        } else {
-            DPRINTF(DRAM,
-                    "Switching to reads after %d writes with %d writes "
-                    "waiting\n", writesThisTime, totalWriteQueueSize);
-            stats.wrPerTurnAround.sample(writesThisTime);
-            writesThisTime = 0;
-        }
-    }
-
-    // updates current state
-    busState = busStateNext;
-
-    if (nvm) {
-        for (auto queue = readQueue.rbegin();
-             queue != readQueue.rend(); ++queue) {
-             // select non-deterministic NVM read to issue
-             // assume that we have the command bandwidth to issue this along
-             // with additional RD/WR burst with needed bank operations
-             if (nvm->readsWaitingToIssue()) {
-                 // select non-deterministic NVM read to issue
-                 nvm->chooseRead(*queue);
-             }
-        }
-    }
-
-    // check ranks for refresh/wakeup - uses busStateNext, so done after
-    // turnaround decisions
-    // Default to busy status and update based on interface specifics
-    bool dram_busy = dram ? dram->isBusy() : true;
-    bool nvm_busy = true;
-    bool all_writes_nvm = false;
-    if (nvm) {
-        all_writes_nvm = nvm->numWritesQueued == totalWriteQueueSize;
-        bool read_queue_empty = totalReadQueueSize == 0;
-        nvm_busy = nvm->isBusy(read_queue_empty, all_writes_nvm);
-    }
-    // Default state of unused interface is 'true'
-    // Simply AND the busy signals to determine if system is busy
-    if (dram_busy && nvm_busy) {
-        // if all ranks are refreshing wait for them to finish
-        // and stall this state machine without taking any further
-        // action, and do not schedule a new nextReqEvent
-        return;
-    }
-
-    // when we get here it is either a read or a write
-    if (busState == READ) {
-
-        // track if we should switch or not
-        bool switch_to_writes = false;
-
-        if (totalReadQueueSize == 0) {
-            // In the case there is no read request to go next,
-            // trigger writes if we have passed the low threshold (or
-            // if we are draining)
-            if (!(totalWriteQueueSize == 0) &&
-                (drainState() == DrainState::Draining ||
-                 totalWriteQueueSize > writeLowThreshold)) {
-
-                DPRINTF(DRAM, "Switching to writes due to read queue empty\n");
-                switch_to_writes = true;
-            } else {
-                // check if we are drained
-                // not done draining until in PWR_IDLE state
-                // ensuring all banks are closed and
-                // have exited low power states
-                if (drainState() == DrainState::Draining &&
-                    respQueue.empty() && allIntfDrained()) {
-
-                    DPRINTF(Drain, "DRAM controller done draining\n");
-                    signalDrainDone();
-                }
-
-                // nothing to do, not even any point in scheduling an
-                // event for the next request
-                return;
-            }
-        } else {
-
-            bool read_found = false;
-            DRAMPacketQueue::iterator to_read;
-            uint8_t prio = numPriorities();
-
-            for (auto queue = readQueue.rbegin();
-                 queue != readQueue.rend(); ++queue) {
-
-                prio--;
-
-                DPRINTF(QOS,
-                        "DRAM controller checking READ queue [%d] priority [%d elements]\n",
-                        prio, queue->size());
-
-                // Figure out which read request goes next
-                // If we are changing command type, incorporate the minimum
-                // bus turnaround delay which will be rank to rank delay
-                to_read = chooseNext((*queue), switched_cmd_type ?
-                                               minWriteToReadDataGap() : 0);
-
-                if (to_read != queue->end()) {
-                    // candidate read found
-                    read_found = true;
-                    break;
-                }
-            }
-
-            // if no read to an available rank is found then return
-            // at this point. There could be writes to the available ranks
-            // which are above the required threshold. However, to
-            // avoid adding more complexity to the code, return and wait
-            // for a refresh event to kick things into action again.
-            if (!read_found) {
-                DPRINTF(DRAM, "No Reads Found - exiting\n");
-                return;
-            }
-
-            auto dram_pkt = *to_read;
-
-            doBurstAccess(dram_pkt);
-
-            // sanity check
-            assert(dram_pkt->size <= (dram_pkt->isDram() ?
-                                      dram->bytesPerBurst() :
-                                      nvm->bytesPerBurst()) );
-            assert(dram_pkt->readyTime >= curTick());
-
-            // log the response
-            logResponse(MemCtrl::READ, (*to_read)->masterId(),
-                        dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
-                        dram_pkt->readyTime - dram_pkt->entryTime);
-
-
-            // Insert into response queue. It will be sent back to the
-            // requester at its readyTime
-            if (respQueue.empty()) {
-                assert(!respondEvent.scheduled());
-                schedule(respondEvent, dram_pkt->readyTime);
-            } else {
-                assert(respQueue.back()->readyTime <= dram_pkt->readyTime);
-                assert(respondEvent.scheduled());
-            }
-
-            respQueue.push_back(dram_pkt);
-
-            // we have so many writes that we have to transition
-            // don't transition if the writeRespQueue is full and
-            // there are no other writes that can issue
-            if ((totalWriteQueueSize > writeHighThreshold) &&
-               !(nvm && all_writes_nvm && nvm->writeRespQueueFull())) {
-                switch_to_writes = true;
-            }
-
-            // remove the request from the queue - the iterator is no longer valid .
-            readQueue[dram_pkt->qosValue()].erase(to_read);
-        }
-
-        // switching to writes, either because the read queue is empty
-        // and the writes have passed the low threshold (or we are
-        // draining), or because the writes hit the hight threshold
-        if (switch_to_writes) {
-            // transition to writing
-            busStateNext = WRITE;
-        }
-    } else {
-
-        bool write_found = false;
-        DRAMPacketQueue::iterator to_write;
-        uint8_t prio = numPriorities();
-
-        for (auto queue = writeQueue.rbegin();
-             queue != writeQueue.rend(); ++queue) {
-
-            prio--;
-
-            DPRINTF(QOS,
-                    "DRAM controller checking WRITE queue [%d] priority [%d elements]\n",
-                    prio, queue->size());
-
-            // If we are changing command type, incorporate the minimum
-            // bus turnaround delay
-            to_write = chooseNext((*queue),
-                     switched_cmd_type ? minReadToWriteDataGap() : 0);
-
-            if (to_write != queue->end()) {
-                write_found = true;
-                break;
-            }
-        }
-
-        // if there are no writes to a rank that is available to service
-        // requests (i.e. rank is in refresh idle state) are found then
-        // return. There could be reads to the available ranks. However, to
-        // avoid adding more complexity to the code, return at this point and
-        // wait for a refresh event to kick things into action again.
-        if (!write_found) {
-            DPRINTF(DRAM, "No Writes Found - exiting\n");
-            return;
-        }
-
-        auto dram_pkt = *to_write;
-
-        // sanity check
-        assert(dram_pkt->size <= (dram_pkt->isDram() ?
-                                  dram->bytesPerBurst() :
-                                  nvm->bytesPerBurst()) );
-
-        doBurstAccess(dram_pkt);
-
-        isInWriteQueue.erase(burstAlign(dram_pkt->addr, dram_pkt->isDram()));
-
-        // log the response
-        logResponse(MemCtrl::WRITE, dram_pkt->masterId(),
-                    dram_pkt->qosValue(), dram_pkt->getAddr(), 1,
-                    dram_pkt->readyTime - dram_pkt->entryTime);
-
-
-        // remove the request from the queue - the iterator is no longer valid
-        writeQueue[dram_pkt->qosValue()].erase(to_write);
-
-        delete dram_pkt;
-
-        // If we emptied the write queue, or got sufficiently below the
-        // threshold (using the minWritesPerSwitch as the hysteresis) and
-        // are not draining, or we have reads waiting and have done enough
-        // writes, then switch to reads.
-        // If we are interfacing to NVM and have filled the writeRespQueue,
-        // with only NVM writes in Q, then switch to reads
-        bool below_threshold =
-            totalWriteQueueSize + minWritesPerSwitch < writeLowThreshold;
-
-        if (totalWriteQueueSize == 0 ||
-            (below_threshold && drainState() != DrainState::Draining) ||
-            (totalReadQueueSize && writesThisTime >= minWritesPerSwitch) ||
-            (totalReadQueueSize && nvm && nvm->writeRespQueueFull() &&
-             all_writes_nvm)) {
-
-            // turn the bus back around for reads again
-            busStateNext = MemCtrl::READ;
-
-            // note that the we switch back to reads also in the idle
-            // case, which eventually will check for any draining and
-            // also pause any further scheduling if there is really
-            // nothing to do
-        }
-    }
-    // It is possible that a refresh to another rank kicks things back into
-    // action before reaching this point.
-    if (!nextReqEvent.scheduled())
-        schedule(nextReqEvent, std::max(nextReqTime, curTick()));
-
-    // If there is space available and we have writes waiting then let
-    // them retry. This is done here to ensure that the retry does not
-    // cause a nextReqEvent to be scheduled before we do so as part of
-    // the next request processing
-    if (retryWrReq && totalWriteQueueSize < writeBufferSize) {
-        retryWrReq = false;
-        port.sendRetryReq();
-    }
-}
-
-MemInterface::MemInterface(const MemInterfaceParams* _p)
-    : AbstractMemory(_p),
-      addrMapping(_p->addr_mapping),
-      burstSize((_p->devices_per_rank * _p->burst_length *
-                 _p->device_bus_width) / 8),
-      deviceSize(_p->device_size),
-      deviceRowBufferSize(_p->device_rowbuffer_size),
-      devicesPerRank(_p->devices_per_rank),
-      rowBufferSize(devicesPerRank * deviceRowBufferSize),
-      burstsPerRowBuffer(rowBufferSize / burstSize),
-      burstsPerStripe(range.interleaved() ?
-                      range.granularity() / burstSize : 1),
-      ranksPerChannel(_p->ranks_per_channel),
-      banksPerRank(_p->banks_per_rank), rowsPerBank(0),
-      tCK(_p->tCK), tCS(_p->tCS), tBURST(_p->tBURST),
-      tRTW(_p->tRTW),
-      tWTR(_p->tWTR)
-{}
-
-void
-MemInterface::setCtrl(DRAMCtrl* _ctrl, unsigned int command_window)
-{
-    ctrl = _ctrl;
-    maxCommandsPerWindow = command_window / tCK;
-}
-
 DRAMInterface::DRAMInterface(const DRAMInterfaceParams* _p)
     : MemInterface(_p),
       bankGroupsPerRank(_p->bank_groups_per_rank),
@@ -1849,9 +753,7 @@
       timeStampOffset(0), activeRank(0),
       enableDRAMPowerdown(_p->enable_dram_powerdown),
       lastStatsResetTick(0),
-      stats(*this),
-      readBufferSize(_p->read_buffer_size),
-      writeBufferSize(_p->write_buffer_size)
+      stats(*this)
 {
     DPRINTF(DRAM, "Setting up DRAM Interface\n");
 
@@ -2132,7 +1034,7 @@
 }
 
 pair<vector<uint32_t>, bool>
-DRAMInterface::minBankPrep(const DRAMPacketQueue& queue,
+DRAMInterface::minBankPrep(const MemPacketQueue& queue,
                       Tick min_col_at) const
 {
     Tick min_act_at = MaxTick;
@@ -2943,173 +1845,6 @@
            (dram.ctrl->inWriteBusState(true) && (writeEntries != 0));
 }
 
-DRAMCtrl::CtrlStats::CtrlStats(DRAMCtrl &_ctrl)
-    : Stats::Group(&_ctrl),
-    ctrl(_ctrl),
-
-    ADD_STAT(readReqs, "Number of read requests accepted"),
-    ADD_STAT(writeReqs, "Number of write requests accepted"),
-
-    ADD_STAT(readBursts,
-             "Number of controller read bursts, "
-             "including those serviced by the write queue"),
-    ADD_STAT(writeBursts,
-             "Number of controller write bursts, "
-             "including those merged in the write queue"),
-    ADD_STAT(servicedByWrQ,
-             "Number of controller read bursts serviced by the write queue"),
-    ADD_STAT(mergedWrBursts,
-             "Number of controller write bursts merged with an existing one"),
-
-    ADD_STAT(neitherReadNorWriteReqs,
-             "Number of requests that are neither read nor write"),
-
-    ADD_STAT(avgRdQLen, "Average read queue length when enqueuing"),
-    ADD_STAT(avgWrQLen, "Average write queue length when enqueuing"),
-
-    ADD_STAT(numRdRetry, "Number of times read queue was full causing retry"),
-    ADD_STAT(numWrRetry, "Number of times write queue was full causing retry"),
-
-    ADD_STAT(readPktSize, "Read request sizes (log2)"),
-    ADD_STAT(writePktSize, "Write request sizes (log2)"),
-
-    ADD_STAT(rdQLenPdf, "What read queue length does an incoming req see"),
-    ADD_STAT(wrQLenPdf, "What write queue length does an incoming req see"),
-
-    ADD_STAT(rdPerTurnAround,
-             "Reads before turning the bus around for writes"),
-    ADD_STAT(wrPerTurnAround,
-             "Writes before turning the bus around for reads"),
-
-    ADD_STAT(bytesReadWrQ, "Total number of bytes read from write queue"),
-    ADD_STAT(bytesReadSys, "Total read bytes from the system interface side"),
-    ADD_STAT(bytesWrittenSys,
-             "Total written bytes from the system interface side"),
-
-    ADD_STAT(avgRdBWSys, "Average system read bandwidth in MiByte/s"),
-    ADD_STAT(avgWrBWSys, "Average system write bandwidth in MiByte/s"),
-
-    ADD_STAT(totGap, "Total gap between requests"),
-    ADD_STAT(avgGap, "Average gap between requests"),
-
-    ADD_STAT(masterReadBytes, "Per-master bytes read from memory"),
-    ADD_STAT(masterWriteBytes, "Per-master bytes write to memory"),
-    ADD_STAT(masterReadRate,
-             "Per-master bytes read from memory rate (Bytes/sec)"),
-    ADD_STAT(masterWriteRate,
-             "Per-master bytes write to memory rate (Bytes/sec)"),
-    ADD_STAT(masterReadAccesses,
-             "Per-master read serviced memory accesses"),
-    ADD_STAT(masterWriteAccesses,
-             "Per-master write serviced memory accesses"),
-    ADD_STAT(masterReadTotalLat,
-             "Per-master read total memory access latency"),
-    ADD_STAT(masterWriteTotalLat,
-             "Per-master write total memory access latency"),
-    ADD_STAT(masterReadAvgLat,
-             "Per-master read average memory access latency"),
-    ADD_STAT(masterWriteAvgLat,
-             "Per-master write average memory access latency")
-
-{
-}
-
-void
-DRAMCtrl::CtrlStats::regStats()
-{
-    using namespace Stats;
-
-    assert(ctrl.system());
-    const auto max_masters = ctrl.system()->maxMasters();
-
-    avgRdQLen.precision(2);
-    avgWrQLen.precision(2);
-
-    readPktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
-    writePktSize.init(ceilLog2(ctrl.system()->cacheLineSize()) + 1);
-
-    rdQLenPdf.init(ctrl.readBufferSize);
-    wrQLenPdf.init(ctrl.writeBufferSize);
-
-    rdPerTurnAround
-        .init(ctrl.readBufferSize)
-        .flags(nozero);
-    wrPerTurnAround
-        .init(ctrl.writeBufferSize)
-        .flags(nozero);
-
-    avgRdBWSys.precision(2);
-    avgWrBWSys.precision(2);
-    avgGap.precision(2);
-
-    // per-master bytes read and written to memory
-    masterReadBytes
-        .init(max_masters)
-        .flags(nozero | nonan);
-
-    masterWriteBytes
-        .init(max_masters)
-        .flags(nozero | nonan);
-
-    // per-master bytes read and written to memory rate
-    masterReadRate
-        .flags(nozero | nonan)
-        .precision(12);
-
-    masterReadAccesses
-        .init(max_masters)
-        .flags(nozero);
-
-    masterWriteAccesses
-        .init(max_masters)
-        .flags(nozero);
-
-    masterReadTotalLat
-        .init(max_masters)
-        .flags(nozero | nonan);
-
-    masterReadAvgLat
-        .flags(nonan)
-        .precision(2);
-
-    masterWriteRate
-        .flags(nozero | nonan)
-        .precision(12);
-
-    masterWriteTotalLat
-        .init(max_masters)
-        .flags(nozero | nonan);
-
-    masterWriteAvgLat
-        .flags(nonan)
-        .precision(2);
-
-    for (int i = 0; i < max_masters; i++) {
-        const std::string master = ctrl.system()->getMasterName(i);
-        masterReadBytes.subname(i, master);
-        masterReadRate.subname(i, master);
-        masterWriteBytes.subname(i, master);
-        masterWriteRate.subname(i, master);
-        masterReadAccesses.subname(i, master);
-        masterWriteAccesses.subname(i, master);
-        masterReadTotalLat.subname(i, master);
-        masterReadAvgLat.subname(i, master);
-        masterWriteTotalLat.subname(i, master);
-        masterWriteAvgLat.subname(i, master);
-    }
-
-    // Formula stats
-    avgRdBWSys = (bytesReadSys / 1000000) / simSeconds;
-    avgWrBWSys = (bytesWrittenSys / 1000000) / simSeconds;
-
-    avgGap = totGap / (readReqs + writeReqs);
-
-    masterReadRate = masterReadBytes / simSeconds;
-    masterWriteRate = masterWriteBytes / simSeconds;
-    masterReadAvgLat = masterReadTotalLat / masterReadAccesses;
-    masterWriteAvgLat = masterWriteTotalLat / masterWriteAccesses;
-}
-
 void
 DRAMInterface::DRAMStats::resetStats()
 {
@@ -3261,142 +1996,6 @@
     rank.computeStats();
 }
 
-void
-DRAMCtrl::recvFunctional(PacketPtr pkt)
-{
-    if (dram && dram->getAddrRange().contains(pkt->getAddr())) {
-        // rely on the abstract memory
-        dram->functionalAccess(pkt);
-    } else if (nvm && nvm->getAddrRange().contains(pkt->getAddr())) {
-        // rely on the abstract memory
-        nvm->functionalAccess(pkt);
-   } else {
-        panic("Can't handle address range for packet %s\n",
-              pkt->print());
-   }
-}
-
-Port &
-DRAMCtrl::getPort(const string &if_name, PortID idx)
-{
-    if (if_name != "port") {
-        return QoS::MemCtrl::getPort(if_name, idx);
-    } else {
-        return port;
-    }
-}
-
-bool
-DRAMCtrl::allIntfDrained() const
-{
-   // ensure dram is in power down and refresh IDLE states
-   bool dram_drained = !dram || dram->allRanksDrained();
-   // No outstanding NVM writes
-   // All other queues verified as needed with calling logic
-   bool nvm_drained = !nvm || nvm->allRanksDrained();
-   return (dram_drained && nvm_drained);
-}
-
-DrainState
-DRAMCtrl::drain()
-{
-    // if there is anything in any of our internal queues, keep track
-    // of that as well
-    if (!(!totalWriteQueueSize && !totalReadQueueSize && respQueue.empty() &&
-          allIntfDrained())) {
-
-        DPRINTF(Drain, "DRAM controller not drained, write: %d, read: %d,"
-                " resp: %d\n", totalWriteQueueSize, totalReadQueueSize,
-                respQueue.size());
-
-        // the only queue that is not drained automatically over time
-        // is the write queue, thus kick things into action if needed
-        if (!totalWriteQueueSize && !nextReqEvent.scheduled()) {
-            schedule(nextReqEvent, curTick());
-        }
-
-        if (dram)
-            dram->drainRanks();
-
-        return DrainState::Draining;
-    } else {
-        return DrainState::Drained;
-    }
-}
-
-void
-DRAMCtrl::drainResume()
-{
-    if (!isTimingMode && system()->isTimingMode()) {
-        // if we switched to timing mode, kick things into action,
-        // and behave as if we restored from a checkpoint
-        startup();
-        dram->startup();
-    } else if (isTimingMode && !system()->isTimingMode()) {
-        // if we switch from timing mode, stop the refresh events to
-        // not cause issues with KVM
-        if (dram)
-            dram->suspend();
-    }
-
-    // update the mode
-    isTimingMode = system()->isTimingMode();
-}
-
-DRAMCtrl::MemoryPort::MemoryPort(const std::string& name, DRAMCtrl& _ctrl)
-    : QueuedSlavePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
-      ctrl(_ctrl)
-{ }
-
-AddrRangeList
-DRAMCtrl::MemoryPort::getAddrRanges() const
-{
-    AddrRangeList ranges;
-    if (ctrl.dram) {
-        DPRINTF(DRAM, "Pushing DRAM ranges to port\n");
-        ranges.push_back(ctrl.dram->getAddrRange());
-    }
-    if (ctrl.nvm) {
-        DPRINTF(DRAM, "Pushing NVM ranges to port\n");
-        ranges.push_back(ctrl.nvm->getAddrRange());
-    }
-    return ranges;
-}
-
-void
-DRAMCtrl::MemoryPort::recvFunctional(PacketPtr pkt)
-{
-    pkt->pushLabel(ctrl.name());
-
-    if (!queue.trySatisfyFunctional(pkt)) {
-        // Default implementation of SimpleTimingPort::recvFunctional()
-        // calls recvAtomic() and throws away the latency; we can save a
-        // little here by just not calculating the latency.
-        ctrl.recvFunctional(pkt);
-    }
-
-    pkt->popLabel();
-}
-
-Tick
-DRAMCtrl::MemoryPort::recvAtomic(PacketPtr pkt)
-{
-    return ctrl.recvAtomic(pkt);
-}
-
-bool
-DRAMCtrl::MemoryPort::recvTimingReq(PacketPtr pkt)
-{
-    // pass it to the memory controller
-    return ctrl.recvTimingReq(pkt);
-}
-
-DRAMCtrl*
-DRAMCtrlParams::create()
-{
-    return new DRAMCtrl(this);
-}
-
 NVMInterface::NVMInterface(const NVMInterfaceParams* _p)
     : MemInterface(_p),
       maxPendingWrites(_p->max_pending_writes),
@@ -3407,9 +2006,7 @@
       writeRespondEvent([this]{ processWriteRespondEvent(); }, name()),
       readReadyEvent([this]{ processReadReadyEvent(); }, name()),
       nextReadAt(0), numPendingReads(0), numReadDataReady(0),
-      numReadsToIssue(0), numWritesQueued(0),
-      readBufferSize(_p->read_buffer_size),
-      writeBufferSize(_p->write_buffer_size)
+      numReadsToIssue(0), numWritesQueued(0)
 {
     DPRINTF(NVM, "Setting up NVM Interface\n");
 
@@ -3473,8 +2070,8 @@
     }
 }
 
-pair<DRAMPacketQueue::iterator, Tick>
-NVMInterface::chooseNextFRFCFS(DRAMPacketQueue& queue, Tick min_col_at) const
+pair<MemPacketQueue::iterator, Tick>
+NVMInterface::chooseNextFRFCFS(MemPacketQueue& queue, Tick min_col_at) const
 {
     // remember if we found a hit, but one that cannit issue seamlessly
     bool found_prepped_pkt = false;
@@ -3483,7 +2080,7 @@
     Tick selected_col_at = MaxTick;
 
     for (auto i = queue.begin(); i != queue.end() ; ++i) {
-        DRAMPacket* pkt = *i;
+        MemPacket* pkt = *i;
 
         // select optimal NVM packet in Q
         if (!pkt->isDram()) {
@@ -3530,7 +2127,7 @@
 }
 
 void
-NVMInterface::chooseRead(DRAMPacketQueue& queue)
+NVMInterface::chooseRead(MemPacketQueue& queue)
 {
     Tick cmd_at = std::max(curTick(), nextReadAt);
 
@@ -3545,7 +2142,7 @@
     numReadsToIssue--;
     // For simplicity, issue non-deterministic reads in order (fcfs)
     for (auto i = queue.begin(); i != queue.end() ; ++i) {
-        DRAMPacket* pkt = *i;
+        MemPacket* pkt = *i;
 
         // Find 1st NVM read packet that hasn't issued read command
         if (pkt->readyTime == MaxTick && !pkt->isDram() && pkt->isRead()) {
@@ -3671,7 +2268,7 @@
 
 
 bool
-NVMInterface::burstReady(DRAMPacket* pkt) const {
+NVMInterface::burstReady(MemPacket* pkt) const {
     bool read_rdy =  pkt->isRead() && (ctrl->inReadBusState(true)) &&
                (pkt->readyTime <= curTick()) && (numReadDataReady > 0);
     bool write_rdy =  !pkt->isRead() && !ctrl->inReadBusState(true) &&
@@ -3680,7 +2277,7 @@
 }
 
 pair<Tick, Tick>
-NVMInterface::doBurstAccess(DRAMPacket* pkt, Tick next_burst_at)
+NVMInterface::doBurstAccess(MemPacket* pkt, Tick next_burst_at)
 {
     DPRINTF(NVM, "NVM Timing access to addr %lld, rank/bank/row %d %d %d\n",
             pkt->addr, pkt->rank, pkt->bank, pkt->row);
diff --git a/src/mem/dram_ctrl.hh b/src/mem/mem_interface.hh
similarity index 61%
rename from src/mem/dram_ctrl.hh
rename to src/mem/mem_interface.hh
index 7fc499f..9844002 100644
--- a/src/mem/dram_ctrl.hh
+++ b/src/mem/mem_interface.hh
@@ -40,11 +40,11 @@
 
 /**
  * @file
- * DRAMCtrl declaration
+ * MemInterface declaration
  */
 
-#ifndef __MEM_DRAM_CTRL_HH__
-#define __MEM_DRAM_CTRL_HH__
+#ifndef __MEM_INTERFACE_HH__
+#define __MEM_INTERFACE_HH__
 
 #include <deque>
 #include <string>
@@ -54,168 +54,15 @@
 
 #include "base/statistics.hh"
 #include "enums/AddrMap.hh"
-#include "enums/MemSched.hh"
 #include "enums/PageManage.hh"
 #include "mem/abstract_mem.hh"
 #include "mem/drampower.hh"
-#include "mem/qos/mem_ctrl.hh"
-#include "mem/qport.hh"
-#include "params/DRAMCtrl.hh"
+#include "mem/mem_ctrl.hh"
 #include "params/DRAMInterface.hh"
 #include "params/MemInterface.hh"
 #include "params/NVMInterface.hh"
 #include "sim/eventq.hh"
 
-class DRAMInterfaceParams;
-class NVMInterfaceParams;
-
-/**
- * A burst helper helps organize and manage a packet that is larger than
- * the DRAM burst size. A system packet that is larger than the burst size
- * is split into multiple DRAM packets and all those DRAM packets point to
- * a single burst helper such that we know when the whole packet is served.
- */
-class BurstHelper
-{
-  public:
-
-    /** Number of DRAM bursts requred for a system packet **/
-    const unsigned int burstCount;
-
-    /** Number of DRAM bursts serviced so far for a system packet **/
-    unsigned int burstsServiced;
-
-    BurstHelper(unsigned int _burstCount)
-        : burstCount(_burstCount), burstsServiced(0)
-    { }
-};
-
-/**
- * A DRAM packet stores packets along with the timestamp of when
- * the packet entered the queue, and also the decoded address.
- */
-class DRAMPacket
-{
-  public:
-
-    /** When did request enter the controller */
-    const Tick entryTime;
-
-    /** When will request leave the controller */
-    Tick readyTime;
-
-    /** This comes from the outside world */
-    const PacketPtr pkt;
-
-    /** MasterID associated with the packet */
-    const MasterID _masterId;
-
-    const bool read;
-
-    /** Does this packet access DRAM?*/
-    const bool dram;
-
-    /** Will be populated by address decoder */
-    const uint8_t rank;
-    const uint8_t bank;
-    const uint32_t row;
-
-    /**
-     * Bank id is calculated considering banks in all the ranks
-     * eg: 2 ranks each with 8 banks, then bankId = 0 --> rank0, bank0 and
-     * bankId = 8 --> rank1, bank0
-     */
-    const uint16_t bankId;
-
-    /**
-     * The starting address of the DRAM packet.
-     * This address could be unaligned to burst size boundaries. The
-     * reason is to keep the address offset so we can accurately check
-     * incoming read packets with packets in the write queue.
-     */
-    Addr addr;
-
-    /**
-     * The size of this dram packet in bytes
-     * It is always equal or smaller than DRAM burst size
-     */
-    unsigned int size;
-
-    /**
-     * A pointer to the BurstHelper if this DRAMPacket is a split packet
-     * If not a split packet (common case), this is set to NULL
-     */
-    BurstHelper* burstHelper;
-
-    /**
-     * QoS value of the encapsulated packet read at queuing time
-     */
-    uint8_t _qosValue;
-
-    /**
-     * Set the packet QoS value
-     * (interface compatibility with Packet)
-     */
-    inline void qosValue(const uint8_t qv) { _qosValue = qv; }
-
-    /**
-     * Get the packet QoS value
-     * (interface compatibility with Packet)
-     */
-    inline uint8_t qosValue() const { return _qosValue; }
-
-    /**
-     * Get the packet MasterID
-     * (interface compatibility with Packet)
-     */
-    inline MasterID masterId() const { return _masterId; }
-
-    /**
-     * Get the packet size
-     * (interface compatibility with Packet)
-     */
-    inline unsigned int getSize() const { return size; }
-
-    /**
-     * Get the packet address
-     * (interface compatibility with Packet)
-     */
-    inline Addr getAddr() const { return addr; }
-
-    /**
-     * Return true if its a read packet
-     * (interface compatibility with Packet)
-     */
-    inline bool isRead() const { return read; }
-
-    /**
-     * Return true if its a write packet
-     * (interface compatibility with Packet)
-     */
-    inline bool isWrite() const { return !read; }
-
-    /**
-     * Return true if its a DRAM access
-     */
-    inline bool isDram() const { return dram; }
-
-    DRAMPacket(PacketPtr _pkt, bool is_read, bool is_dram, uint8_t _rank,
-               uint8_t _bank, uint32_t _row, uint16_t bank_id, Addr _addr,
-               unsigned int _size)
-        : entryTime(curTick()), readyTime(curTick()), pkt(_pkt),
-          _masterId(pkt->masterId()),
-          read(is_read), dram(is_dram), rank(_rank), bank(_bank), row(_row),
-          bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
-          _qosValue(_pkt->qosValue())
-    { }
-
-};
-
-// The DRAM packets are store in a multiple dequeue structure,
-// based on their QoS priority
-typedef std::deque<DRAMPacket*> DRAMPacketQueue;
-
-
 /**
  * General interface to memory device
  * Includes functions and parameters shared across media types
@@ -259,9 +106,9 @@
     };
 
     /**
-     * A pointer to the parent DRAMCtrl instance
+     * A pointer to the parent MemCtrl instance
      */
-    DRAMCtrl* ctrl;
+    MemCtrl* ctrl;
 
     /**
      * Number of commands that can issue in the defined controller
@@ -317,13 +164,23 @@
 
 
   public:
+
+    /**
+      * Buffer sizes for read and write queues in the controller
+      * These are passed to the controller on instantiation
+      * Defining them here allows for buffers to be resized based
+      * on memory type / configuration.
+      */
+    const uint32_t readBufferSize;
+    const uint32_t writeBufferSize;
+
     /** Set a pointer to the controller and initialize
      * interface based on controller parameters
      * @param _ctrl pointer to the parent controller
      * @param command_window size of command window used to
      *                       check command bandwidth
      */
-    void setCtrl(DRAMCtrl* _ctrl, unsigned int command_window);
+    void setCtrl(MemCtrl* _ctrl, unsigned int command_window);
 
     /**
      * Get an address in a dense range which starts from 0. The input
@@ -363,8 +220,8 @@
      * @return an iterator to the selected packet, else queue.end()
      * @return the tick when the packet selected will issue
      */
-    virtual std::pair<DRAMPacketQueue::iterator, Tick>
-    chooseNextFRFCFS(DRAMPacketQueue& queue, Tick min_col_at) const = 0;
+    virtual std::pair<MemPacketQueue::iterator, Tick>
+    chooseNextFRFCFS(MemPacketQueue& queue, Tick min_col_at) const = 0;
 
     /*
      * Function to calulate unloaded latency
@@ -386,7 +243,7 @@
      *
      * @param Return true if RD/WR can issue
      */
-    virtual bool burstReady(DRAMPacket* pkt) const = 0;
+    virtual bool burstReady(MemPacket* pkt) const = 0;
 
     /**
      * Determine the required delay for an access to a different rank
@@ -414,13 +271,13 @@
      * pkt_addr is used for the offset within the packet.
      *
      * @param pkt The packet from the outside world
-     * @param pkt_addr The starting address of the DRAM packet
-     * @param size The size of the DRAM packet in bytes
+     * @param pkt_addr The starting address of the packet
+     * @param size The size of the packet in bytes
      * @param is_read Is the request for a read or a write to memory
      * @param is_dram Is the request to a DRAM interface
-     * @return A DRAMPacket pointer with the decoded information
+     * @return A MemPacket pointer with the decoded information
      */
-    DRAMPacket* decodePacket(const PacketPtr pkt, Addr pkt_addr,
+    MemPacket* decodePacket(const PacketPtr pkt, Addr pkt_addr,
                            unsigned int size, bool is_read, bool is_dram);
 
     /**
@@ -997,17 +854,6 @@
       */
     std::vector<Rank*> ranks;
 
-  public:
-
-    /**
-      * Buffer sizes for read and write queues in the controller
-      * These are passed to the controller on instantiation
-      * Defining them here allows for buffers to be resized based
-      * on memory type / configuration.
-      */
-    const uint32_t readBufferSize;
-    const uint32_t writeBufferSize;
-
     /*
      * @return delay between write and read commands
      */
@@ -1024,7 +870,7 @@
      * @return boolean indicating burst can issue seamlessly, with no gaps
      */
     std::pair<std::vector<uint32_t>, bool>
-    minBankPrep(const DRAMPacketQueue& queue, Tick min_col_at) const;
+    minBankPrep(const MemPacketQueue& queue, Tick min_col_at) const;
 
     /*
      * @return time to send a burst of data without gaps
@@ -1093,8 +939,8 @@
      * @return an iterator to the selected packet, else queue.end()
      * @return the tick when the packet selected will issue
      */
-    std::pair<DRAMPacketQueue::iterator, Tick>
-    chooseNextFRFCFS(DRAMPacketQueue& queue, Tick min_col_at) const override;
+    std::pair<MemPacketQueue::iterator, Tick>
+    chooseNextFRFCFS(MemPacketQueue& queue, Tick min_col_at) const override;
 
     /**
      * Actually do the burst - figure out the latency it
@@ -1104,15 +950,15 @@
      * response q from where it will eventually go back to the outside
      * world.
      *
-     * @param dram_pkt The DRAM packet created from the outside world pkt
+     * @param mem_pkt The packet created from the outside world pkt
      * @param next_burst_at Minimum bus timing requirement from controller
      * @param queue Reference to the read or write queue with the packet
      * @return pair, tick when current burst is issued and
      *               tick when next burst can issue
      */
     std::pair<Tick, Tick>
-    doBurstAccess(DRAMPacket* dram_pkt, Tick next_burst_at,
-                  const std::vector<DRAMPacketQueue>& queue);
+    doBurstAccess(MemPacket* mem_pkt, Tick next_burst_at,
+                  const std::vector<MemPacketQueue>& queue);
 
     /**
      * Check if a burst operation can be issued to the DRAM
@@ -1122,7 +968,7 @@
      *                    REF IDLE state
      */
     bool
-    burstReady(DRAMPacket* pkt) const override
+    burstReady(MemPacket* pkt) const override
     {
         return ranks[pkt->rank]->inRefIdleState();
     }
@@ -1310,15 +1156,6 @@
     uint32_t numWritesQueued;
 
     /**
-      * Buffer sizes for read and write queues in the controller
-      * These are passed to the controller on instantiation
-      * Defining them here allows for buffers to be resized based
-      * on memory type / configuration.
-      */
-    const uint32_t readBufferSize;
-    const uint32_t writeBufferSize;
-
-    /**
      * Initialize the NVM interface and verify parameters
      */
     void init() override;
@@ -1352,7 +1189,7 @@
      *                    has been updated to a non-zero value to
      *                    account for race conditions between events
      */
-    bool burstReady(DRAMPacket* pkt) const override;
+    bool burstReady(MemPacket* pkt) const override;
 
     /**
      * This function checks if ranks are busy.
@@ -1375,8 +1212,8 @@
      * @return an iterator to the selected packet, else queue.end()
      * @return the tick when the packet selected will issue
      */
-    std::pair<DRAMPacketQueue::iterator, Tick>
-    chooseNextFRFCFS(DRAMPacketQueue& queue, Tick min_col_at) const override;
+    std::pair<MemPacketQueue::iterator, Tick>
+    chooseNextFRFCFS(MemPacketQueue& queue, Tick min_col_at) const override;
 
     /**
      *  Add rank to rank delay to bus timing to all NVM banks in alli ranks
@@ -1391,7 +1228,7 @@
     /**
      * Select read command to issue asynchronously
      */
-    void chooseRead(DRAMPacketQueue& queue);
+    void chooseRead(MemPacketQueue& queue);
 
     /*
      * Function to calulate unloaded access latency
@@ -1425,531 +1262,9 @@
      *               tick when next burst can issue
      */
     std::pair<Tick, Tick>
-    doBurstAccess(DRAMPacket* pkt, Tick next_burst_at);
+    doBurstAccess(MemPacket* pkt, Tick next_burst_at);
 
     NVMInterface(const NVMInterfaceParams* _p);
 };
 
-/**
- * The DRAM controller is a single-channel memory controller capturing
- * the most important timing constraints associated with a
- * contemporary DRAM. For multi-channel memory systems, the controller
- * is combined with a crossbar model, with the channel address
- * interleaving taking part in the crossbar.
- *
- * As a basic design principle, this controller
- * model is not cycle callable, but instead uses events to: 1) decide
- * when new decisions can be made, 2) when resources become available,
- * 3) when things are to be considered done, and 4) when to send
- * things back. Through these simple principles, the model delivers
- * high performance, and lots of flexibility, allowing users to
- * evaluate the system impact of a wide range of memory technologies,
- * such as DDR3/4, LPDDR2/3/4, WideIO1/2, HBM and HMC.
- *
- * For more details, please see Hansson et al, "Simulating DRAM
- * controllers for future system architecture exploration",
- * Proc. ISPASS, 2014. If you use this model as part of your research
- * please cite the paper.
- *
- * The low-power functionality implements a staggered powerdown
- * similar to that described in "Optimized Active and Power-Down Mode
- * Refresh Control in 3D-DRAMs" by Jung et al, VLSI-SoC, 2014.
- */
-class DRAMCtrl : public QoS::MemCtrl
-{
-  private:
-
-    // For now, make use of a queued slave port to avoid dealing with
-    // flow control for the responses being sent back
-    class MemoryPort : public QueuedSlavePort
-    {
-
-        RespPacketQueue queue;
-        DRAMCtrl& ctrl;
-
-      public:
-
-        MemoryPort(const std::string& name, DRAMCtrl& _ctrl);
-
-      protected:
-
-        Tick recvAtomic(PacketPtr pkt);
-
-        void recvFunctional(PacketPtr pkt);
-
-        bool recvTimingReq(PacketPtr);
-
-        virtual AddrRangeList getAddrRanges() const;
-
-    };
-
-    /**
-     * Our incoming port, for a multi-ported controller add a crossbar
-     * in front of it
-     */
-    MemoryPort port;
-
-    /**
-     * Remember if the memory system is in timing mode
-     */
-    bool isTimingMode;
-
-    /**
-     * Remember if we have to retry a request when available.
-     */
-    bool retryRdReq;
-    bool retryWrReq;
-
-    /**
-     * Bunch of things requires to setup "events" in gem5
-     * When event "respondEvent" occurs for example, the method
-     * processRespondEvent is called; no parameters are allowed
-     * in these methods
-     */
-    void processNextReqEvent();
-    EventFunctionWrapper nextReqEvent;
-
-    void processRespondEvent();
-    EventFunctionWrapper respondEvent;
-
-    /**
-     * Check if the read queue has room for more entries
-     *
-     * @param pkt_count The number of entries needed in the read queue
-     * @return true if read queue is full, false otherwise
-     */
-    bool readQueueFull(unsigned int pkt_count) const;
-
-    /**
-     * Check if the write queue has room for more entries
-     *
-     * @param pkt_count The number of entries needed in the write queue
-     * @return true if write queue is full, false otherwise
-     */
-    bool writeQueueFull(unsigned int pkt_count) const;
-
-    /**
-     * When a new read comes in, first check if the write q has a
-     * pending request to the same address.\ If not, decode the
-     * address to populate rank/bank/row, create one or mutliple
-     * "dram_pkt", and push them to the back of the read queue.\
-     * If this is the only
-     * read request in the system, schedule an event to start
-     * servicing it.
-     *
-     * @param pkt The request packet from the outside world
-     * @param pkt_count The number of DRAM bursts the pkt
-     * @param is_dram Does this packet access DRAM?
-     * translate to. If pkt size is larger then one full burst,
-     * then pkt_count is greater than one.
-     */
-    void addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram);
-
-    /**
-     * Decode the incoming pkt, create a dram_pkt and push to the
-     * back of the write queue. \If the write q length is more than
-     * the threshold specified by the user, ie the queue is beginning
-     * to get full, stop reads, and start draining writes.
-     *
-     * @param pkt The request packet from the outside world
-     * @param pkt_count The number of DRAM bursts the pkt
-     * @param is_dram Does this packet access DRAM?
-     * translate to. If pkt size is larger then one full burst,
-     * then pkt_count is greater than one.
-     */
-    void addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram);
-
-    /**
-     * Actually do the burst based on media specific access function.
-     * Update bus statistics when complete.
-     *
-     * @param pkt The DRAM packet created from the outside world pkt
-     */
-    void doBurstAccess(DRAMPacket* dram_pkt);
-
-    /**
-     * When a packet reaches its "readyTime" in the response Q,
-     * use the "access()" method in AbstractMemory to actually
-     * create the response packet, and send it back to the outside
-     * world requestor.
-     *
-     * @param pkt The packet from the outside world
-     * @param static_latency Static latency to add before sending the packet
-     */
-    void accessAndRespond(PacketPtr pkt, Tick static_latency);
-
-    /**
-     * Determine if there is a packet that can issue.
-     *
-     * @param pkt The packet to evaluate
-     */
-    bool
-    packetReady(DRAMPacket* pkt)
-    {
-        return (pkt->isDram() ?
-            dram->burstReady(pkt) : nvm->burstReady(pkt));
-    }
-
-    /**
-     * Calculate the minimum delay used when scheduling a read-to-write
-     * transision.
-     * @param return minimum delay
-     */
-    Tick
-    minReadToWriteDataGap()
-    {
-        Tick dram_min = dram ?  dram->minReadToWriteDataGap() : MaxTick;
-        Tick nvm_min = nvm ?  nvm->minReadToWriteDataGap() : MaxTick;
-        return std::min(dram_min, nvm_min);
-    }
-
-    /**
-     * Calculate the minimum delay used when scheduling a write-to-read
-     * transision.
-     * @param return minimum delay
-     */
-    Tick
-    minWriteToReadDataGap()
-    {
-        Tick dram_min = dram ? dram->minWriteToReadDataGap() : MaxTick;
-        Tick nvm_min = nvm ?  nvm->minWriteToReadDataGap() : MaxTick;
-        return std::min(dram_min, nvm_min);
-    }
-
-    /**
-     * The memory schduler/arbiter - picks which request needs to
-     * go next, based on the specified policy such as FCFS or FR-FCFS
-     * and moves it to the head of the queue.
-     * Prioritizes accesses to the same rank as previous burst unless
-     * controller is switching command type.
-     *
-     * @param queue Queued requests to consider
-     * @param extra_col_delay Any extra delay due to a read/write switch
-     * @return an iterator to the selected packet, else queue.end()
-     */
-    DRAMPacketQueue::iterator chooseNext(DRAMPacketQueue& queue,
-        Tick extra_col_delay);
-
-    /**
-     * For FR-FCFS policy reorder the read/write queue depending on row buffer
-     * hits and earliest bursts available in DRAM
-     *
-     * @param queue Queued requests to consider
-     * @param extra_col_delay Any extra delay due to a read/write switch
-     * @return an iterator to the selected packet, else queue.end()
-     */
-    DRAMPacketQueue::iterator chooseNextFRFCFS(DRAMPacketQueue& queue,
-            Tick extra_col_delay);
-
-    /**
-     * Calculate burst window aligned tick
-     *
-     * @param cmd_tick Initial tick of command
-     * @return burst window aligned tick
-     */
-    Tick getBurstWindow(Tick cmd_tick);
-
-    /**
-     * Used for debugging to observe the contents of the queues.
-     */
-    void printQs() const;
-
-    /**
-     * Burst-align an address.
-     *
-     * @param addr The potentially unaligned address
-     * @param is_dram Does this packet access DRAM?
-     *
-     * @return An address aligned to a memory burst
-     */
-    Addr
-    burstAlign(Addr addr, bool is_dram) const
-    {
-        if (is_dram)
-            return (addr & ~(Addr(dram->bytesPerBurst() - 1)));
-        else
-            return (addr & ~(Addr(nvm->bytesPerBurst() - 1)));
-    }
-
-    /**
-     * The controller's main read and write queues, with support for QoS reordering
-     */
-    std::vector<DRAMPacketQueue> readQueue;
-    std::vector<DRAMPacketQueue> writeQueue;
-
-    /**
-     * To avoid iterating over the write queue to check for
-     * overlapping transactions, maintain a set of burst addresses
-     * that are currently queued. Since we merge writes to the same
-     * location we never have more than one address to the same burst
-     * address.
-     */
-    std::unordered_set<Addr> isInWriteQueue;
-
-    /**
-     * Response queue where read packets wait after we're done working
-     * with them, but it's not time to send the response yet. The
-     * responses are stored separately mostly to keep the code clean
-     * and help with events scheduling. For all logical purposes such
-     * as sizing the read queue, this and the main read queue need to
-     * be added together.
-     */
-    std::deque<DRAMPacket*> respQueue;
-
-    /**
-     * Holds count of commands issued in burst window starting at
-     * defined Tick. This is used to ensure that the command bandwidth
-     * does not exceed the allowable media constraints.
-     */
-    std::unordered_multiset<Tick> burstTicks;
-
-    /**
-     * Create pointer to interface of the actual dram media when connected
-     */
-    DRAMInterface* const dram;
-
-    /**
-     * Create pointer to interface of the actual nvm media when connected
-     */
-    NVMInterface* const nvm;
-
-    /**
-     * The following are basic design parameters of the memory
-     * controller, and are initialized based on parameter values.
-     * The rowsPerBank is determined based on the capacity, number of
-     * ranks and banks, the burst size, and the row buffer size.
-     */
-    const uint32_t readBufferSize;
-    const uint32_t writeBufferSize;
-    const uint32_t writeHighThreshold;
-    const uint32_t writeLowThreshold;
-    const uint32_t minWritesPerSwitch;
-    uint32_t writesThisTime;
-    uint32_t readsThisTime;
-
-    /**
-     * Memory controller configuration initialized based on parameter
-     * values.
-     */
-    Enums::MemSched memSchedPolicy;
-
-    /**
-     * Pipeline latency of the controller frontend. The frontend
-     * contribution is added to writes (that complete when they are in
-     * the write buffer) and reads that are serviced the write buffer.
-     */
-    const Tick frontendLatency;
-
-    /**
-     * Pipeline latency of the backend and PHY. Along with the
-     * frontend contribution, this latency is added to reads serviced
-     * by the DRAM.
-     */
-    const Tick backendLatency;
-
-    /**
-     * Length of a command window, used to check
-     * command bandwidth
-     */
-    const Tick commandWindow;
-
-    /**
-     * Till when must we wait before issuing next RD/WR burst?
-     */
-    Tick nextBurstAt;
-
-    Tick prevArrival;
-
-    /**
-     * The soonest you have to start thinking about the next request
-     * is the longest access time that can occur before
-     * nextBurstAt. Assuming you need to precharge, open a new row,
-     * and access, it is tRP + tRCD + tCL.
-     */
-    Tick nextReqTime;
-
-    struct CtrlStats : public Stats::Group
-    {
-        CtrlStats(DRAMCtrl &ctrl);
-
-        void regStats() override;
-
-        DRAMCtrl &ctrl;
-
-        // All statistics that the model needs to capture
-        Stats::Scalar readReqs;
-        Stats::Scalar writeReqs;
-        Stats::Scalar readBursts;
-        Stats::Scalar writeBursts;
-        Stats::Scalar servicedByWrQ;
-        Stats::Scalar mergedWrBursts;
-        Stats::Scalar neitherReadNorWriteReqs;
-        // Average queue lengths
-        Stats::Average avgRdQLen;
-        Stats::Average avgWrQLen;
-
-        Stats::Scalar numRdRetry;
-        Stats::Scalar numWrRetry;
-        Stats::Vector readPktSize;
-        Stats::Vector writePktSize;
-        Stats::Vector rdQLenPdf;
-        Stats::Vector wrQLenPdf;
-        Stats::Histogram rdPerTurnAround;
-        Stats::Histogram wrPerTurnAround;
-
-        Stats::Scalar bytesReadWrQ;
-        Stats::Scalar bytesReadSys;
-        Stats::Scalar bytesWrittenSys;
-        // Average bandwidth
-        Stats::Formula avgRdBWSys;
-        Stats::Formula avgWrBWSys;
-
-        Stats::Scalar totGap;
-        Stats::Formula avgGap;
-
-        // per-master bytes read and written to memory
-        Stats::Vector masterReadBytes;
-        Stats::Vector masterWriteBytes;
-
-        // per-master bytes read and written to memory rate
-        Stats::Formula masterReadRate;
-        Stats::Formula masterWriteRate;
-
-        // per-master read and write serviced memory accesses
-        Stats::Vector masterReadAccesses;
-        Stats::Vector masterWriteAccesses;
-
-        // per-master read and write total memory access latency
-        Stats::Vector masterReadTotalLat;
-        Stats::Vector masterWriteTotalLat;
-
-        // per-master raed and write average memory access latency
-        Stats::Formula masterReadAvgLat;
-        Stats::Formula masterWriteAvgLat;
-    };
-
-    CtrlStats stats;
-
-    /**
-     * Upstream caches need this packet until true is returned, so
-     * hold it for deletion until a subsequent call
-     */
-    std::unique_ptr<Packet> pendingDelete;
-
-    /**
-     * Select either the read or write queue
-     *
-     * @param is_read The current burst is a read, select read queue
-     * @return a reference to the appropriate queue
-     */
-    std::vector<DRAMPacketQueue>&
-    selQueue(bool is_read)
-    {
-        return (is_read ? readQueue : writeQueue);
-    };
-
-    /**
-     * Remove commands that have already issued from burstTicks
-     */
-    void pruneBurstTick();
-
-  public:
-
-    DRAMCtrl(const DRAMCtrlParams* p);
-
-    /**
-     * Ensure that all interfaced have drained commands
-     *
-     * @return bool flag, set once drain complete
-     */
-    bool allIntfDrained() const;
-
-    DrainState drain() override;
-
-    /**
-     * Check for command bus contention for single cycle command.
-     * If there is contention, shift command to next burst.
-     * Check verifies that the commands issued per burst is less
-     * than a defined max number, maxCommandsPerWindow.
-     * Therefore, contention per cycle is not verified and instead
-     * is done based on a burst window.
-     *
-     * @param cmd_tick Initial tick of command, to be verified
-     * @param max_cmds_per_burst Number of commands that can issue
-     *                           in a burst window
-     * @return tick for command issue without contention
-     */
-    Tick verifySingleCmd(Tick cmd_tick, Tick max_cmds_per_burst);
-
-    /**
-     * Check for command bus contention for multi-cycle (2 currently)
-     * command. If there is contention, shift command(s) to next burst.
-     * Check verifies that the commands issued per burst is less
-     * than a defined max number, maxCommandsPerWindow.
-     * Therefore, contention per cycle is not verified and instead
-     * is done based on a burst window.
-     *
-     * @param cmd_tick Initial tick of command, to be verified
-     * @param max_multi_cmd_split Maximum delay between commands
-     * @param max_cmds_per_burst Number of commands that can issue
-     *                           in a burst window
-     * @return tick for command issue without contention
-     */
-    Tick verifyMultiCmd(Tick cmd_tick, Tick max_cmds_per_burst,
-                        Tick max_multi_cmd_split = 0);
-
-    /**
-     * Is there a respondEvent scheduled?
-     *
-     * @return true if event is scheduled
-     */
-    bool respondEventScheduled() const { return respondEvent.scheduled(); }
-
-    /**
-     * Is there a read/write burst Event scheduled?
-     *
-     * @return true if event is scheduled
-     */
-    bool requestEventScheduled() const { return nextReqEvent.scheduled(); }
-
-    /**
-     * restart the controller
-     * This can be used by interfaces to restart the
-     * scheduler after maintainence commands complete
-     *
-     * @param Tick to schedule next event
-     */
-    void restartScheduler(Tick tick) { schedule(nextReqEvent, tick); }
-
-    /**
-     * Check the current direction of the memory channel
-     *
-     * @param next_state Check either the current or next bus state
-     * @return True when bus is currently in a read state
-     */
-    bool inReadBusState(bool next_state) const;
-
-    /**
-     * Check the current direction of the memory channel
-     *
-     * @param next_state Check either the current or next bus state
-     * @return True when bus is currently in a write state
-     */
-    bool inWriteBusState(bool next_state) const;
-
-    Port &getPort(const std::string &if_name,
-                  PortID idx=InvalidPortID) override;
-
-    virtual void init() override;
-    virtual void startup() override;
-    virtual void drainResume() override;
-
-  protected:
-
-    Tick recvAtomic(PacketPtr pkt);
-    void recvFunctional(PacketPtr pkt);
-    bool recvTimingReq(PacketPtr pkt);
-
-};
-
-#endif //__MEM_DRAM_CTRL_HH__
+#endif //__MEM_INTERFACE_HH__
diff --git a/tests/gem5/configs/base_config.py b/tests/gem5/configs/base_config.py
index cbea768..fbedbaf 100644
--- a/tests/gem5/configs/base_config.py
+++ b/tests/gem5/configs/base_config.py
@@ -221,7 +221,7 @@
 
     def create_system(self):
         if issubclass(self.mem_class, m5.objects.DRAMInterface):
-            mem_ctrl = DRAMCtrl()
+            mem_ctrl = MemCtrl()
             mem_ctrl.dram = self.mem_class()
         else:
             mem_ctrl = self.mem_class()
@@ -280,7 +280,7 @@
             if issubclass(self.mem_class, m5.objects.DRAMInterface):
                 mem_ctrls = []
                 for r in system.mem_ranges:
-                    mem_ctrl = DRAMCtrl()
+                    mem_ctrl = MemCtrl()
                     mem_ctrl.dram = self.mem_class(range = r)
                     mem_ctrls.append(mem_ctrl)
                 system.physmem = mem_ctrls