misc: Replaced master/slave terminology

Change-Id: I4df2557c71e38cc4e3a485b0e590e85eb45de8b6
Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/33553
Maintainer: Jason Lowe-Power <power.jg@gmail.com>
Reviewed-by: Daniel Carvalho <odanrc@yahoo.com.br>
Reviewed-by: Bobby R. Bruce <bbruce@ucdavis.edu>
Tested-by: kokoro <noreply+kokoro@google.com>
diff --git a/configs/common/FSConfig.py b/configs/common/FSConfig.py
index e154593..5814a03 100644
--- a/configs/common/FSConfig.py
+++ b/configs/common/FSConfig.py
@@ -98,7 +98,7 @@
 def makeCowDisks(disk_paths):
     disks = []
     for disk_path in disk_paths:
-        disk = CowIdeDisk(driveID='master')
+        disk = CowIdeDisk(driveID='device0')
         disk.childImage(disk_path);
         disks.append(disk)
     return disks
diff --git a/src/arch/arm/fastmodel/FastModel.py b/src/arch/arm/fastmodel/FastModel.py
index 5be451a..d35264a 100644
--- a/src/arch/arm/fastmodel/FastModel.py
+++ b/src/arch/arm/fastmodel/FastModel.py
@@ -35,11 +35,11 @@
 def AMBA_INITIATOR_ROLE(width):
     return 'AMBA INITIATOR %d' % width
 
-def SC_MASTER_PORT_ROLE(port_type):
-    return 'SC MASTER PORT for %s' % port_type
+def SC_REQUEST_PORT_ROLE(port_type):
+    return 'SC REQUEST PORT for %s' % port_type
 
-def SC_SLAVE_PORT_ROLE(port_type):
-    return 'SC SLAVE PORT for %s' % port_type
+def SC_RESPONSE_PORT_ROLE(port_type):
+    return 'SC RESPONSE PORT for %s' % port_type
 
 class AmbaTargetSocket(Port):
     def __init__(self, width, desc):
@@ -75,21 +75,21 @@
         super(VectorAmbaInitiatorSocket, self).__init__(
                 my_role, desc, is_source=True)
 
-class ScMasterPort(Port):
+class ScRequestPort(Port):
     def __init__(self, desc, port_type):
-        my_role = SC_MASTER_PORT_ROLE(port_type)
-        peer_role = SC_SLAVE_PORT_ROLE(port_type)
+        my_role = SC_REQUEST_PORT_ROLE(port_type)
+        peer_role = SC_RESPONSE_PORT_ROLE(port_type)
         Port.compat(my_role, peer_role)
 
-        super(ScMasterPort, self).__init__(my_role, desc)
+        super(ScRequestPort, self).__init__(my_role, desc)
 
-class ScSlavePort(Port):
+class ScResponsePort(Port):
     def __init__(self, desc, port_type):
-        my_role = SC_SLAVE_PORT_ROLE(port_type)
-        peer_role = SC_MASTER_PORT_ROLE(port_type)
+        my_role = SC_RESPONSE_PORT_ROLE(port_type)
+        peer_role = SC_REQUEST_PORT_ROLE(port_type)
         Port.compat(my_role, peer_role)
 
-        super(ScSlavePort, self).__init__(my_role, desc)
+        super(ScResponsePort, self).__init__(my_role, desc)
 
 class AmbaToTlmBridge64(SystemC_ScModule):
     type = 'AmbaToTlmBridge64'
diff --git a/src/arch/arm/fastmodel/GIC/FastModelGIC.py b/src/arch/arm/fastmodel/GIC/FastModelGIC.py
index 0980cc4..ddcf728 100644
--- a/src/arch/arm/fastmodel/GIC/FastModelGIC.py
+++ b/src/arch/arm/fastmodel/GIC/FastModelGIC.py
@@ -443,7 +443,7 @@
     output_attributes = Param.String("ExtendedID[62:55]=MPAM_PMG, "
             "ExtendedID[54:39]=MPAM_PARTID, ExtendedID[38]=MPAM_NS",
             "User-defined transform to be applied to bus attributes like "
-            "MasterID, ExtendedID or UserFlags. Currently, only works for "
+            "RequestorID, ExtendedID or UserFlags. Currently, only works for "
             "MPAM Attributes encoding into bus attributes.")
     has_DirtyVLPIOnLoad = Param.Bool(False, "GICR_VPENDBASER.Dirty reflects "
             "transient loading state when valid=1")
diff --git a/src/arch/arm/isa.cc b/src/arch/arm/isa.cc
index 3c6a3a2..9ace236 100644
--- a/src/arch/arm/isa.cc
+++ b/src/arch/arm/isa.cc
@@ -2345,7 +2345,7 @@
     warn_once("Doing AT (address translation) in functional mode! Fix Me!\n");
 
     auto req = std::make_shared<Request>(
-        val, 0, flags,  Request::funcMasterId,
+        val, 0, flags,  Request::funcRequestorId,
         tc->pcState().pc(), tc->contextId());
 
     Fault fault = getDTBPtr(tc)->translateFunctional(
@@ -2396,7 +2396,7 @@
     warn_once("Doing AT (address translation) in functional mode! Fix Me!\n");
 
     auto req = std::make_shared<Request>(
-        val, 0, flags,  Request::funcMasterId,
+        val, 0, flags,  Request::funcRequestorId,
         tc->pcState().pc(), tc->contextId());
 
     Fault fault = getDTBPtr(tc)->translateFunctional(
diff --git a/src/arch/arm/stage2_lookup.hh b/src/arch/arm/stage2_lookup.hh
index a5a984f..66b1359 100644
--- a/src/arch/arm/stage2_lookup.hh
+++ b/src/arch/arm/stage2_lookup.hh
@@ -82,7 +82,7 @@
     {
         req = std::make_shared<Request>();
         req->setVirt(s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
-                     s1Req->getFlags(), s1Req->masterId(), 0);
+                     s1Req->getFlags(), s1Req->requestorId(), 0);
     }
 
     Fault getTe(ThreadContext *tc, TlbEntry *destTe);
diff --git a/src/arch/arm/stage2_mmu.cc b/src/arch/arm/stage2_mmu.cc
index 745bb0f..090c8c9 100644
--- a/src/arch/arm/stage2_mmu.cc
+++ b/src/arch/arm/stage2_mmu.cc
@@ -49,13 +49,13 @@
 Stage2MMU::Stage2MMU(const Params *p)
     : SimObject(p), _stage1Tlb(p->tlb), _stage2Tlb(p->stage2_tlb),
       port(_stage1Tlb->getTableWalker(), p->sys),
-      masterId(p->sys->getMasterId(_stage1Tlb->getTableWalker()))
+      requestorId(p->sys->getRequestorId(_stage1Tlb->getTableWalker()))
 {
     // we use the stage-one table walker as the parent of the port,
-    // and to get our master id, this is done to keep things
+    // and to get our requestor id, this is done to keep things
     // symmetrical with other ISAs in terms of naming and stats
-    stage1Tlb()->setMMU(this, masterId);
-    stage2Tlb()->setMMU(this, masterId);
+    stage1Tlb()->setMMU(this, requestorId);
+    stage2Tlb()->setMMU(this, requestorId);
 }
 
 Fault
@@ -66,7 +66,8 @@
 
     // translate to physical address using the second stage MMU
     auto req = std::make_shared<Request>();
-    req->setVirt(descAddr, numBytes, flags | Request::PT_WALK, masterId, 0);
+    req->setVirt(descAddr, numBytes, flags | Request::PT_WALK,
+                requestorId, 0);
     if (isFunctional) {
         fault = stage2Tlb()->translateFunctional(req, tc, BaseTLB::Read);
     } else {
@@ -102,7 +103,7 @@
 {
     // translate to physical address using the second stage MMU
     translation->setVirt(
-            descAddr, numBytes, flags | Request::PT_WALK, masterId);
+            descAddr, numBytes, flags | Request::PT_WALK, requestorId);
     translation->translateTiming(tc);
 }
 
diff --git a/src/arch/arm/stage2_mmu.hh b/src/arch/arm/stage2_mmu.hh
index 0ac7abe..ed9f59e 100644
--- a/src/arch/arm/stage2_mmu.hh
+++ b/src/arch/arm/stage2_mmu.hh
@@ -60,7 +60,7 @@
     DmaPort port;
 
     /** Request id for requests generated by this MMU */
-    MasterID masterId;
+    RequestorID requestorId;
 
   public:
     /** This translation class is used to trigger the data fetch once a timing
@@ -88,10 +88,11 @@
         finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
                BaseTLB::Mode mode);
 
-        void setVirt(Addr vaddr, int size, Request::Flags flags, int masterId)
+        void setVirt(Addr vaddr, int size, Request::Flags flags,
+                    int requestorId)
         {
             numBytes = size;
-            req->setVirt(vaddr, size, flags, masterId, 0);
+            req->setVirt(vaddr, size, flags, requestorId, 0);
         }
 
         void translateTiming(ThreadContext *tc)
diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc
index 1c89c22..9462e27 100644
--- a/src/arch/arm/table_walker.cc
+++ b/src/arch/arm/table_walker.cc
@@ -55,7 +55,7 @@
 
 TableWalker::TableWalker(const Params *p)
     : ClockedObject(p),
-      stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
+      stage2Mmu(NULL), port(NULL), requestorId(Request::invldRequestorId),
       isStage2(p->is_stage2), tlb(NULL),
       currState(NULL), pending(false),
       numSquashable(p->num_squash_per_cycle),
@@ -97,11 +97,11 @@
 }
 
 void
-TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
+TableWalker::setMMU(Stage2MMU *m, RequestorID requestor_id)
 {
     stage2Mmu = m;
     port = &m->getDMAPort();
-    masterId = master_id;
+    requestorId = requestor_id;
 }
 
 void
@@ -2122,7 +2122,7 @@
             (this->*doDescriptor)();
         } else {
             RequestPtr req = std::make_shared<Request>(
-                descAddr, numBytes, flags, masterId);
+                descAddr, numBytes, flags, requestorId);
 
             req->taskId(ContextSwitchTaskId::DMA);
             PacketPtr  pkt = new Packet(req, MemCmd::ReadReq);
diff --git a/src/arch/arm/table_walker.hh b/src/arch/arm/table_walker.hh
index 6f04149..8f4aaef 100644
--- a/src/arch/arm/table_walker.hh
+++ b/src/arch/arm/table_walker.hh
@@ -829,8 +829,8 @@
     /** Port shared by the two table walkers. */
     DmaPort* port;
 
-    /** Master id assigned by the MMU. */
-    MasterID masterId;
+    /** Requestor id assigned by the MMU. */
+    RequestorID requestorId;
 
     /** Indicates whether this table walker is part of the stage 2 mmu */
     const bool isStage2;
@@ -912,7 +912,7 @@
 
     void setTlb(TLB *_tlb) { tlb = _tlb; }
     TLB* getTlb() { return tlb; }
-    void setMMU(Stage2MMU *m, MasterID master_id);
+    void setMMU(Stage2MMU *m, RequestorID requestor_id);
     void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
                   uint8_t texcb, bool s);
     void memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc
index 1d43a0d..413a13e 100644
--- a/src/arch/arm/tlb.cc
+++ b/src/arch/arm/tlb.cc
@@ -107,10 +107,10 @@
 }
 
 void
-TLB::setMMU(Stage2MMU *m, MasterID master_id)
+TLB::setMMU(Stage2MMU *m, RequestorID requestor_id)
 {
     stage2Mmu = m;
-    tableWalker->setMMU(m, master_id);
+    tableWalker->setMMU(m, requestor_id);
 }
 
 bool
diff --git a/src/arch/arm/tlb.hh b/src/arch/arm/tlb.hh
index 004ce0b..63928cb 100644
--- a/src/arch/arm/tlb.hh
+++ b/src/arch/arm/tlb.hh
@@ -224,7 +224,7 @@
 
     TableWalker *getTableWalker() { return tableWalker; }
 
-    void setMMU(Stage2MMU *m, MasterID master_id);
+    void setMMU(Stage2MMU *m, RequestorID requestor_id);
 
     int getsize() const { return size; }
 
@@ -399,7 +399,7 @@
      * reference. For ARM this method will always return a valid port
      * pointer.
      *
-     * @return A pointer to the walker master port
+     * @return A pointer to the walker request port
      */
     Port *getTableWalkerPort() override;
 
diff --git a/src/arch/arm/tracers/tarmac_parser.cc b/src/arch/arm/tracers/tarmac_parser.cc
index 9ed5bf4..c7bf977 100644
--- a/src/arch/arm/tracers/tarmac_parser.cc
+++ b/src/arch/arm/tracers/tarmac_parser.cc
@@ -1287,7 +1287,7 @@
     ArmISA::TLB* dtb = static_cast<TLB*>(thread->getDTBPtr());
 
     req->setVirt(addr, size, flags, thread->pcState().instAddr(),
-                 Request::funcMasterId);
+                 Request::funcRequestorId);
 
     // Translate to physical address
     Fault fault = dtb->translateAtomic(req, thread, BaseTLB::Read);
diff --git a/src/arch/gcn3/gpu_mem_helpers.hh b/src/arch/gcn3/gpu_mem_helpers.hh
index 562158d..9846f41 100644
--- a/src/arch/gcn3/gpu_mem_helpers.hh
+++ b/src/arch/gcn3/gpu_mem_helpers.hh
@@ -87,14 +87,14 @@
                 assert(!misaligned_acc);
 
                 req = std::make_shared<Request>(vaddr, sizeof(T), 0,
-                    gpuDynInst->computeUnit()->masterId(), 0,
+                    gpuDynInst->computeUnit()->requestorId(), 0,
                     gpuDynInst->wfDynId,
                     gpuDynInst->makeAtomicOpFunctor<T>(
                         &(reinterpret_cast<T*>(gpuDynInst->a_data))[lane],
                         &(reinterpret_cast<T*>(gpuDynInst->x_data))[lane]));
             } else {
                 req = std::make_shared<Request>(vaddr, req_size, 0,
-                                  gpuDynInst->computeUnit()->masterId(), 0,
+                                  gpuDynInst->computeUnit()->requestorId(), 0,
                                   gpuDynInst->wfDynId);
             }
 
@@ -158,7 +158,7 @@
     bool misaligned_acc = split_addr > vaddr;
 
     RequestPtr req = std::make_shared<Request>(vaddr, req_size, 0,
-                                 gpuDynInst->computeUnit()->masterId(), 0,
+                                 gpuDynInst->computeUnit()->requestorId(), 0,
                                  gpuDynInst->wfDynId);
 
     if (misaligned_acc) {
diff --git a/src/arch/gcn3/insts/op_encodings.hh b/src/arch/gcn3/insts/op_encodings.hh
index b35fb3d..e9dcac7 100644
--- a/src/arch/gcn3/insts/op_encodings.hh
+++ b/src/arch/gcn3/insts/op_encodings.hh
@@ -584,7 +584,7 @@
             gpuDynInst->setStatusVector(0, 1);
             RequestPtr req = std::make_shared<Request>(0, 0, 0,
                                        gpuDynInst->computeUnit()->
-                                       masterId(), 0,
+                                       requestorId(), 0,
                                        gpuDynInst->wfDynId);
             gpuDynInst->setRequestFlags(req);
             gpuDynInst->computeUnit()->
diff --git a/src/arch/generic/BaseTLB.py b/src/arch/generic/BaseTLB.py
index cca7da6..03fb68b 100644
--- a/src/arch/generic/BaseTLB.py
+++ b/src/arch/generic/BaseTLB.py
@@ -33,5 +33,9 @@
     abstract = True
     cxx_header = "arch/generic/tlb.hh"
     # Ports to connect with other TLB levels
-    slave  = VectorSlavePort("Port closer to the CPU side")
-    master = RequestPort("Port closer to memory side")
+    cpu_side_ports  = VectorResponsePort("Ports closer to the CPU side")
+    slave     = DeprecatedParam(cpu_side_ports,
+                    '`slave` is now called `cpu_side_ports`')
+    mem_side_port = RequestPort("Port closer to memory side")
+    master   = DeprecatedParam(mem_side_port,
+                    '`master` is now called `mem_side_port`')
diff --git a/src/arch/isa_parser.py b/src/arch/isa_parser.py
index 8eb90ef..7d8bffd 100755
--- a/src/arch/isa_parser.py
+++ b/src/arch/isa_parser.py
@@ -1294,7 +1294,7 @@
 class SubOperandList(OperandList):
     '''Find all the operands in the given code block.  Returns an operand
     descriptor list (instance of class OperandList).'''
-    def __init__(self, parser, code, master_list):
+    def __init__(self, parser, code, requestor_list):
         self.items = []
         self.bases = {}
         # delete strings and comments so we don't match on operands inside
@@ -1315,17 +1315,17 @@
             if op_base in parser.elemToVector:
                 elem_op = op_base
                 op_base = parser.elemToVector[elem_op]
-            # find this op in the master list
-            op_desc = master_list.find_base(op_base)
+            # find this op in the requestor list
+            op_desc = requestor_list.find_base(op_base)
             if not op_desc:
-                error('Found operand %s which is not in the master list!'
+                error('Found operand %s which is not in the requestor list!'
                       % op_base)
             else:
                 # See if we've already found this operand
                 op_desc = self.find_base(op_base)
                 if not op_desc:
                     # if not, add a reference to it to this sub list
-                    self.append(master_list.bases[op_base])
+                    self.append(requestor_list.bases[op_base])
 
             # start next search after end of current match
             next_pos = match.end()
diff --git a/src/arch/riscv/pagetable_walker.cc b/src/arch/riscv/pagetable_walker.cc
index 786cb81..3832ece 100644
--- a/src/arch/riscv/pagetable_walker.cc
+++ b/src/arch/riscv/pagetable_walker.cc
@@ -396,7 +396,7 @@
     else {
         //If we didn't return, we're setting up another read.
         RequestPtr request = std::make_shared<Request>(
-            nextRead, oldRead->getSize(), flags, walker->masterId);
+            nextRead, oldRead->getSize(), flags, walker->requestorId);
         read = new Packet(request, MemCmd::ReadReq);
         read->allocate();
 
@@ -435,7 +435,7 @@
 
     Request::Flags flags = Request::PHYSICAL;
     RequestPtr request = std::make_shared<Request>(
-        topAddr, sizeof(PTESv39), flags, walker->masterId);
+        topAddr, sizeof(PTESv39), flags, walker->requestorId);
 
     read = new Packet(request, MemCmd::ReadReq);
     read->allocate();
diff --git a/src/arch/riscv/pagetable_walker.hh b/src/arch/riscv/pagetable_walker.hh
index d9ab569..de4d635 100644
--- a/src/arch/riscv/pagetable_walker.hh
+++ b/src/arch/riscv/pagetable_walker.hh
@@ -166,7 +166,7 @@
         // The TLB we're supposed to load.
         TLB * tlb;
         System * sys;
-        MasterID masterId;
+        RequestorID requestorId;
 
         // The number of outstanding walks that can be squashed per cycle.
         unsigned numSquashable;
@@ -202,7 +202,7 @@
         Walker(const Params *params) :
             ClockedObject(params), port(name() + ".port", this),
             funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system),
-            masterId(sys->getMasterId(this)),
+            requestorId(sys->getRequestorId(this)),
             numSquashable(params->num_squash_per_cycle),
             startWalkWrapperEvent([this]{ startWalkWrapper(); }, name())
         {
diff --git a/src/arch/x86/X86LocalApic.py b/src/arch/x86/X86LocalApic.py
index 442a5e1..39004d2 100644
--- a/src/arch/x86/X86LocalApic.py
+++ b/src/arch/x86/X86LocalApic.py
@@ -48,8 +48,15 @@
     type = 'X86LocalApic'
     cxx_class = 'X86ISA::Interrupts'
     cxx_header = 'arch/x86/interrupts.hh'
-    int_master = RequestPort("Port for sending interrupt messages")
-    int_slave = ResponsePort("Port for receiving interrupt messages")
+
+    int_requestor = RequestPort("Port for sending interrupt messages")
+    int_master    = DeprecatedParam(int_requestor,
+                        '`int_master` is now called `int_requestor`')
+
+    int_responder = ResponsePort("Port for receiving interrupt messages")
+    int_slave     = DeprecatedParam(int_responder,
+                        '`int_slave` is now called `int_responder`')
+
     int_latency = Param.Latency('1ns', \
             "Latency for an interrupt to propagate through this device.")
     pio = ResponsePort("Programmed I/O port")
diff --git a/src/arch/x86/interrupts.cc b/src/arch/x86/interrupts.cc
index 2be0746..7767c80 100644
--- a/src/arch/x86/interrupts.cc
+++ b/src/arch/x86/interrupts.cc
@@ -288,12 +288,12 @@
 void
 X86ISA::Interrupts::init()
 {
-    panic_if(!intMasterPort.isConnected(),
+    panic_if(!intRequestPort.isConnected(),
             "Int port not connected to anything!");
     panic_if(!pioPort.isConnected(),
             "Pio port of %s not connected to anything!", name());
 
-    intSlavePort.sendRangeChange();
+    intResponsePort.sendRangeChange();
     pioPort.sendRangeChange();
 }
 
@@ -541,7 +541,7 @@
             regs[APIC_INTERRUPT_COMMAND_LOW] = low;
             for (auto id: apics) {
                 PacketPtr pkt = buildIntTriggerPacket(id, message);
-                intMasterPort.sendMessage(pkt, sys->isTimingMode(),
+                intRequestPort.sendMessage(pkt, sys->isTimingMode(),
                         [this](PacketPtr pkt) { completeIPI(pkt); });
             }
             newVal = regs[APIC_INTERRUPT_COMMAND_LOW];
@@ -603,8 +603,8 @@
       pendingStartup(false), startupVector(0),
       startedUp(false), pendingUnmaskableInt(false),
       pendingIPIs(0),
-      intSlavePort(name() + ".int_slave", this, this),
-      intMasterPort(name() + ".int_master", this, this, p->int_latency),
+      intResponsePort(name() + ".int_responder", this, this),
+      intRequestPort(name() + ".int_requestor", this, this, p->int_latency),
       pioPort(this), pioDelay(p->pio_latency)
 {
     memset(regs, 0, sizeof(regs));
diff --git a/src/arch/x86/interrupts.hh b/src/arch/x86/interrupts.hh
index c1b2565..f078d42 100644
--- a/src/arch/x86/interrupts.hh
+++ b/src/arch/x86/interrupts.hh
@@ -174,8 +174,8 @@
     int initialApicId;
 
     // Ports for interrupts.
-    IntSlavePort<Interrupts> intSlavePort;
-    IntMasterPort<Interrupts> intMasterPort;
+    IntResponsePort<Interrupts> intResponsePort;
+    IntRequestPort<Interrupts> intRequestPort;
 
     // Port for memory mapped register accesses.
     PioPort<Interrupts> pioPort;
@@ -228,10 +228,10 @@
     Port &getPort(const std::string &if_name,
                   PortID idx=InvalidPortID) override
     {
-        if (if_name == "int_master") {
-            return intMasterPort;
-        } else if (if_name == "int_slave") {
-            return intSlavePort;
+        if (if_name == "int_requestor") {
+            return intRequestPort;
+        } else if (if_name == "int_responder") {
+            return intResponsePort;
         } else if (if_name == "pio") {
             return pioPort;
         }
diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc
index b540be3..f5b5521 100644
--- a/src/arch/x86/pagetable_walker.cc
+++ b/src/arch/x86/pagetable_walker.cc
@@ -519,7 +519,7 @@
         Request::Flags flags = oldRead->req->getFlags();
         flags.set(Request::UNCACHEABLE, uncacheable);
         RequestPtr request = std::make_shared<Request>(
-            nextRead, oldRead->getSize(), flags, walker->masterId);
+            nextRead, oldRead->getSize(), flags, walker->requestorId);
         read = new Packet(request, MemCmd::ReadReq);
         read->allocate();
         // If we need to write, adjust the read packet to write the modified
@@ -588,7 +588,7 @@
         flags.set(Request::UNCACHEABLE);
 
     RequestPtr request = std::make_shared<Request>(
-        topAddr, dataSize, flags, walker->masterId);
+        topAddr, dataSize, flags, walker->requestorId);
 
     read = new Packet(request, MemCmd::ReadReq);
     read->allocate();
diff --git a/src/arch/x86/pagetable_walker.hh b/src/arch/x86/pagetable_walker.hh
index 55bb098..dba76c1 100644
--- a/src/arch/x86/pagetable_walker.hh
+++ b/src/arch/x86/pagetable_walker.hh
@@ -168,7 +168,7 @@
         // The TLB we're supposed to load.
         TLB * tlb;
         System * sys;
-        MasterID masterId;
+        RequestorID requestorId;
 
         // The number of outstanding walks that can be squashed per cycle.
         unsigned numSquashable;
@@ -204,7 +204,7 @@
         Walker(const Params *params) :
             ClockedObject(params), port(name() + ".port", this),
             funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system),
-            masterId(sys->getMasterId(this)),
+            requestorId(sys->getRequestorId(this)),
             numSquashable(params->num_squash_per_cycle),
             startWalkWrapperEvent([this]{ startWalkWrapper(); }, name())
         {
diff --git a/src/cpu/BaseCPU.py b/src/cpu/BaseCPU.py
index ee6c646..c9e8ae6 100644
--- a/src/cpu/BaseCPU.py
+++ b/src/cpu/BaseCPU.py
@@ -182,25 +182,25 @@
     if buildEnv['TARGET_ISA'] in ['x86', 'arm', 'riscv']:
         _cached_ports += ["itb.walker.port", "dtb.walker.port"]
 
-    _uncached_slave_ports = []
-    _uncached_master_ports = []
+    _uncached_interrupt_response_ports = []
+    _uncached_interrupt_request_ports = []
     if buildEnv['TARGET_ISA'] == 'x86':
-        _uncached_slave_ports += ["interrupts[0].pio",
-                                  "interrupts[0].int_slave"]
-        _uncached_master_ports += ["interrupts[0].int_master"]
+        _uncached_interrupt_response_ports += ["interrupts[0].pio",
+                                  "interrupts[0].int_responder"]
+        _uncached_interrupt_request_ports += ["interrupts[0].int_requestor"]
 
     def createInterruptController(self):
         self.interrupts = [ArchInterrupts() for i in range(self.numThreads)]
 
     def connectCachedPorts(self, bus):
         for p in self._cached_ports:
-            exec('self.%s = bus.slave' % p)
+            exec('self.%s = bus.cpu_side_ports' % p)
 
     def connectUncachedPorts(self, bus):
-        for p in self._uncached_slave_ports:
-            exec('self.%s = bus.master' % p)
-        for p in self._uncached_master_ports:
-            exec('self.%s = bus.slave' % p)
+        for p in self._uncached_interrupt_response_ports:
+            exec('self.%s = bus.mem_side_ports' % p)
+        for p in self._uncached_interrupt_request_ports:
+            exec('self.%s = bus.cpu_side_ports' % p)
 
     def connectAllPorts(self, cached_bus, uncached_bus = None):
         self.connectCachedPorts(cached_bus)
@@ -237,7 +237,7 @@
         self.toL2Bus = xbar if xbar else L2XBar()
         self.connectCachedPorts(self.toL2Bus)
         self.l2cache = l2c
-        self.toL2Bus.master = self.l2cache.cpu_side
+        self.toL2Bus.mem_side_ports = self.l2cache.cpu_side
         self._cached_ports = ['l2cache.mem_side']
 
     def createThreads(self):
diff --git a/src/cpu/base.cc b/src/cpu/base.cc
index fb99712..9ba1b31 100644
--- a/src/cpu/base.cc
+++ b/src/cpu/base.cc
@@ -122,8 +122,8 @@
 
 BaseCPU::BaseCPU(Params *p, bool is_checker)
     : ClockedObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
-      _instMasterId(p->system->getMasterId(this, "inst")),
-      _dataMasterId(p->system->getMasterId(this, "data")),
+      _instRequestorId(p->system->getRequestorId(this, "inst")),
+      _dataRequestorId(p->system->getRequestorId(this, "data")),
       _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
       _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
       interrupts(p->interrupts), numThreads(p->numThreads), system(p->system),
@@ -250,7 +250,7 @@
     if (secondAddr > addr)
         size = secondAddr - addr;
 
-    req->setVirt(addr, size, 0x0, dataMasterId(), tc->instAddr());
+    req->setVirt(addr, size, 0x0, dataRequestorId(), tc->instAddr());
 
     // translate to physical address
     Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
diff --git a/src/cpu/base.hh b/src/cpu/base.hh
index 3ef5519..5320492 100644
--- a/src/cpu/base.hh
+++ b/src/cpu/base.hh
@@ -123,10 +123,10 @@
     const uint32_t _socketId;
 
     /** instruction side request id that must be placed in all requests */
-    MasterID _instMasterId;
+    RequestorID _instRequestorId;
 
     /** data side request id that must be placed in all requests */
-    MasterID _dataMasterId;
+    RequestorID _dataRequestorId;
 
     /** An intrenal representation of a task identifier within gem5. This is
      * used so the CPU can add which taskId (which is an internal representation
@@ -181,9 +181,9 @@
     uint32_t socketId() const { return _socketId; }
 
     /** Reads this CPU's unique data requestor ID */
-    MasterID dataMasterId() const { return _dataMasterId; }
+    RequestorID dataRequestorId() const { return _dataRequestorId; }
     /** Reads this CPU's unique instruction requestor ID */
-    MasterID instMasterId() const { return _instMasterId; }
+    RequestorID instRequestorId() const { return _instRequestorId; }
 
     /**
      * Get a port on this CPU. All CPUs have a data and
diff --git a/src/cpu/base_dyn_inst.hh b/src/cpu/base_dyn_inst.hh
index 56b9114..a6c08cc 100644
--- a/src/cpu/base_dyn_inst.hh
+++ b/src/cpu/base_dyn_inst.hh
@@ -469,7 +469,7 @@
     uint32_t socketId() const { return cpu->socketId(); }
 
     /** Read this CPU's data requestor ID */
-    MasterID masterId() const { return cpu->dataMasterId(); }
+    RequestorID requestorId() const { return cpu->dataRequestorId(); }
 
     /** Read this context's system-wide ID **/
     ContextID contextId() const { return thread->contextId(); }
diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc
index b016938..fe0300e 100644
--- a/src/cpu/checker/cpu.cc
+++ b/src/cpu/checker/cpu.cc
@@ -58,7 +58,7 @@
 void
 CheckerCPU::init()
 {
-    masterId = systemPtr->getMasterId(this);
+    requestorId = systemPtr->getRequestorId(this);
 }
 
 CheckerCPU::CheckerCPU(Params *p)
@@ -154,13 +154,13 @@
         auto it_end = byte_enable.cbegin() + (size - size_left);
         if (isAnyActiveElement(it_start, it_end)) {
             mem_req = std::make_shared<Request>(frag_addr, frag_size,
-                    flags, masterId, thread->pcState().instAddr(),
+                    flags, requestorId, thread->pcState().instAddr(),
                     tc->contextId());
             mem_req->setByteEnable(std::vector<bool>(it_start, it_end));
         }
     } else {
         mem_req = std::make_shared<Request>(frag_addr, frag_size,
-                    flags, masterId, thread->pcState().instAddr(),
+                    flags, requestorId, thread->pcState().instAddr(),
                     tc->contextId());
     }
 
diff --git a/src/cpu/checker/cpu.hh b/src/cpu/checker/cpu.hh
index 4530d4c..f2395d7 100644
--- a/src/cpu/checker/cpu.hh
+++ b/src/cpu/checker/cpu.hh
@@ -89,7 +89,7 @@
     using VecRegContainer = TheISA::VecRegContainer;
 
     /** id attached to all issued requests */
-    MasterID masterId;
+    RequestorID requestorId;
   public:
     void init() override;
 
diff --git a/src/cpu/checker/cpu_impl.hh b/src/cpu/checker/cpu_impl.hh
index c6d2cf8..4fab375 100644
--- a/src/cpu/checker/cpu_impl.hh
+++ b/src/cpu/checker/cpu_impl.hh
@@ -237,11 +237,11 @@
             if (!curMacroStaticInst) {
                 // set up memory request for instruction fetch
                 auto mem_req = std::make_shared<Request>(
-                    fetch_PC, sizeof(MachInst), 0, masterId, fetch_PC,
+                    fetch_PC, sizeof(MachInst), 0, requestorId, fetch_PC,
                     thread->contextId());
 
                 mem_req->setVirt(fetch_PC, sizeof(MachInst),
-                                 Request::INST_FETCH, masterId,
+                                 Request::INST_FETCH, requestorId,
                                  thread->instAddr());
 
                 fault = itb->translateFunctional(
diff --git a/src/cpu/kvm/base.cc b/src/cpu/kvm/base.cc
index 5e3ffd7..83992cd 100644
--- a/src/cpu/kvm/base.cc
+++ b/src/cpu/kvm/base.cc
@@ -1073,7 +1073,7 @@
     syncThreadContext();
 
     RequestPtr mmio_req = std::make_shared<Request>(
-        paddr, size, Request::UNCACHEABLE, dataMasterId());
+        paddr, size, Request::UNCACHEABLE, dataRequestorId());
 
     mmio_req->setContext(tc->contextId());
     // Some architectures do need to massage physical addresses a bit
diff --git a/src/cpu/kvm/x86_cpu.cc b/src/cpu/kvm/x86_cpu.cc
index 6c44af0..5a667d4 100644
--- a/src/cpu/kvm/x86_cpu.cc
+++ b/src/cpu/kvm/x86_cpu.cc
@@ -1351,7 +1351,7 @@
     for (int i = 0; i < count; ++i) {
         RequestPtr io_req = std::make_shared<Request>(
             pAddr, kvm_run.io.size,
-            Request::UNCACHEABLE, dataMasterId());
+            Request::UNCACHEABLE, dataRequestorId());
 
         io_req->setContext(tc->contextId());
 
diff --git a/src/cpu/minor/fetch1.cc b/src/cpu/minor/fetch1.cc
index e49140e..4977e3d 100644
--- a/src/cpu/minor/fetch1.cc
+++ b/src/cpu/minor/fetch1.cc
@@ -168,7 +168,7 @@
 
     request->request->setContext(cpu.threads[tid]->getTC()->contextId());
     request->request->setVirt(
-        aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
+        aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(),
         /* I've no idea why we need the PC, but give it */
         thread.pc.instAddr());
 
diff --git a/src/cpu/minor/lsq.cc b/src/cpu/minor/lsq.cc
index e4a9dc0..106b51b 100644
--- a/src/cpu/minor/lsq.cc
+++ b/src/cpu/minor/lsq.cc
@@ -498,7 +498,7 @@
         if (byte_enable.empty()) {
             fragment->setVirt(
                 fragment_addr, fragment_size, request->getFlags(),
-                request->masterId(), request->getPC());
+                request->requestorId(), request->getPC());
         } else {
             // Set up byte-enable mask for the current fragment
             auto it_start = byte_enable.begin() +
@@ -508,7 +508,7 @@
             if (isAnyActiveElement(it_start, it_end)) {
                 fragment->setVirt(
                     fragment_addr, fragment_size, request->getFlags(),
-                    request->masterId(), request->getPC());
+                    request->requestorId(), request->getPC());
                 fragment->setByteEnable(std::vector<bool>(it_start, it_end));
             } else {
                 disabled_fragment = true;
@@ -1645,7 +1645,7 @@
     int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
     request->request->setContext(cid);
     request->request->setVirt(
-        addr, size, flags, cpu.dataMasterId(),
+        addr, size, flags, cpu.dataRequestorId(),
         /* I've no idea why we need the PC, but give it */
         inst->pc.instAddr(), std::move(amo_op));
     request->request->setByteEnable(byte_enable);
diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh
index fd9146b..75d065f 100644
--- a/src/cpu/o3/commit_impl.hh
+++ b/src/cpu/o3/commit_impl.hh
@@ -736,7 +736,7 @@
 {
     // Verify that we still have an interrupt to handle
     if (!cpu->checkInterrupts(0)) {
-        DPRINTF(Commit, "Pending interrupt is cleared by master before "
+        DPRINTF(Commit, "Pending interrupt is cleared by requestor before "
                 "it got handled. Restart fetching from the orig path.\n");
         toIEW->commitInfo[0].clearInterrupt = true;
         interrupt = NoFault;
diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc
index 414913d..01938f1 100644
--- a/src/cpu/o3/cpu.cc
+++ b/src/cpu/o3/cpu.cc
@@ -1843,7 +1843,7 @@
 
     // notify l1 d-cache (ruby) that core has aborted transaction
     RequestPtr req =
-        std::make_shared<Request>(addr, size, flags, _dataMasterId);
+        std::make_shared<Request>(addr, size, flags, _dataRequestorId);
 
     req->taskId(taskId());
     req->setContext(this->thread[tid]->contextId());
diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh
index f449cac..d38420b 100644
--- a/src/cpu/o3/fetch_impl.hh
+++ b/src/cpu/o3/fetch_impl.hh
@@ -599,7 +599,7 @@
     // Build request here.
     RequestPtr mem_req = std::make_shared<Request>(
         fetchBufferBlockPC, fetchBufferSize,
-        Request::INST_FETCH, cpu->instMasterId(), pc,
+        Request::INST_FETCH, cpu->instRequestorId(), pc,
         cpu->thread[tid]->contextId());
 
     mem_req->taskId(cpu->taskId());
diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh
index bc5e154..6e7d8d7 100644
--- a/src/cpu/o3/lsq.hh
+++ b/src/cpu/o3/lsq.hh
@@ -409,7 +409,7 @@
             if (byte_enable.empty() ||
                 isAnyActiveElement(byte_enable.begin(), byte_enable.end())) {
                 auto request = std::make_shared<Request>(
-                        addr, size, _flags, _inst->masterId(),
+                        addr, size, _flags, _inst->requestorId(),
                         _inst->instAddr(), _inst->contextId(),
                         std::move(_amo_op));
                 if (!byte_enable.empty()) {
@@ -456,9 +456,9 @@
          */
         void
         setVirt(Addr vaddr, unsigned size, Request::Flags flags_,
-                MasterID mid, Addr pc)
+                RequestorID requestor_id, Addr pc)
         {
-            request()->setVirt(vaddr, size, flags_, mid, pc);
+            request()->setVirt(vaddr, size, flags_, requestor_id, pc);
         }
 
         void
diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh
index 7657b23..c4cb45e 100644
--- a/src/cpu/o3/lsq_impl.hh
+++ b/src/cpu/o3/lsq_impl.hh
@@ -892,7 +892,7 @@
     uint32_t size_so_far = 0;
 
     mainReq = std::make_shared<Request>(base_addr,
-                _size, _flags, _inst->masterId(),
+                _size, _flags, _inst->requestorId(),
                 _inst->instAddr(), _inst->contextId());
     if (!_byteEnable.empty()) {
         mainReq->setByteEnable(_byteEnable);
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 34be352..20c6e1c 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -350,14 +350,14 @@
         auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
         auto it_end = byte_enable.begin() + (size - size_left);
         if (isAnyActiveElement(it_start, it_end)) {
-            req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
+            req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
                          inst_addr);
             req->setByteEnable(std::vector<bool>(it_start, it_end));
         } else {
             predicate = false;
         }
     } else {
-        req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
+        req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
                      inst_addr);
         req->setByteEnable(std::vector<bool>());
     }
@@ -592,7 +592,7 @@
     dcache_latency = 0;
 
     req->taskId(taskId());
-    req->setVirt(addr, size, flags, dataMasterId(),
+    req->setVirt(addr, size, flags, dataRequestorId(),
                  thread->pcState().instAddr(), std::move(amo_op));
 
     // translate to physical address
diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc
index bf940ba..132d919 100644
--- a/src/cpu/simple/base.cc
+++ b/src/cpu/simple/base.cc
@@ -487,7 +487,7 @@
     DPRINTF(Fetch, "Fetch: Inst PC:%08p, Fetch PC:%08p\n", instAddr, fetchPC);
 
     req->setVirt(fetchPC, sizeof(MachInst), Request::INST_FETCH,
-                 instMasterId(), instAddr);
+                 instRequestorId(), instAddr);
 }
 
 
diff --git a/src/cpu/simple/probes/simpoint.cc b/src/cpu/simple/probes/simpoint.cc
index e72e4ca..10f3105 100644
--- a/src/cpu/simple/probes/simpoint.cc
+++ b/src/cpu/simple/probes/simpoint.cc
@@ -93,7 +93,8 @@
         auto map_itr = bbMap.find(currentBBV);
         if (map_itr == bbMap.end()){
             // If a new (previously unseen) basic block is found,
-            // add a new unique id, record num of insts and insert into bbMap.
+            // add a new unique id, record num of insts and insert
+            // into bbMap.
             BBInfo info;
             info.id = bbMap.size() + 1;
             info.insts = currentBBVInstCount;
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 820bede..c898d79 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -466,7 +466,7 @@
         traceData->setMem(addr, size, flags);
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, flags, dataMasterId(), pc, thread->contextId());
+        addr, size, flags, dataRequestorId(), pc, thread->contextId());
     if (!byte_enable.empty()) {
         req->setByteEnable(byte_enable);
     }
@@ -550,7 +550,7 @@
         traceData->setMem(addr, size, flags);
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, flags, dataMasterId(), pc, thread->contextId());
+        addr, size, flags, dataRequestorId(), pc, thread->contextId());
     if (!byte_enable.empty()) {
         req->setByteEnable(byte_enable);
     }
@@ -608,7 +608,7 @@
         traceData->setMem(addr, size, flags);
 
     RequestPtr req = make_shared<Request>(addr, size, flags,
-                            dataMasterId(), pc, thread->contextId(),
+                            dataRequestorId(), pc, thread->contextId(),
                             std::move(amo_op));
 
     assert(req->hasAtomicOpFunctor());
@@ -1228,7 +1228,7 @@
         traceData->setMem(addr, size, flags);
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, flags, dataMasterId());
+        addr, size, flags, dataRequestorId());
 
     req->setPC(pc);
     req->setContext(thread->contextId());
@@ -1277,7 +1277,7 @@
     // notify l1 d-cache (ruby) that core has aborted transaction
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, flags, dataMasterId());
+        addr, size, flags, dataRequestorId());
 
     req->setPC(pc);
     req->setContext(thread->contextId());
diff --git a/src/cpu/testers/directedtest/DirectedGenerator.cc b/src/cpu/testers/directedtest/DirectedGenerator.cc
index 2d76b86..44f3640 100644
--- a/src/cpu/testers/directedtest/DirectedGenerator.cc
+++ b/src/cpu/testers/directedtest/DirectedGenerator.cc
@@ -33,7 +33,7 @@
 
 DirectedGenerator::DirectedGenerator(const Params *p)
     : SimObject(p),
-      masterId(p->system->getMasterId(this))
+      requestorId(p->system->getRequestorId(this))
 {
     m_num_cpus = p->num_cpus;
     m_directed_tester = NULL;
diff --git a/src/cpu/testers/directedtest/DirectedGenerator.hh b/src/cpu/testers/directedtest/DirectedGenerator.hh
index 2d03372..f53ff07 100644
--- a/src/cpu/testers/directedtest/DirectedGenerator.hh
+++ b/src/cpu/testers/directedtest/DirectedGenerator.hh
@@ -49,7 +49,7 @@
 
   protected:
     int m_num_cpus;
-    MasterID masterId;
+    RequestorID requestorId;
     RubyDirectedTester* m_directed_tester;
 };
 
diff --git a/src/cpu/testers/directedtest/InvalidateGenerator.cc b/src/cpu/testers/directedtest/InvalidateGenerator.cc
index 5640163..a35c87e 100644
--- a/src/cpu/testers/directedtest/InvalidateGenerator.cc
+++ b/src/cpu/testers/directedtest/InvalidateGenerator.cc
@@ -60,7 +60,8 @@
     Packet::Command cmd;
 
     // For simplicity, requests are assumed to be 1 byte-sized
-    RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
+    RequestPtr req = std::make_shared<Request>(m_address, 1, flags,
+                                               requestorId);
 
     //
     // Based on the current state, issue a load or a store
diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.py b/src/cpu/testers/directedtest/RubyDirectedTester.py
index 25a5228..0bbcb34 100644
--- a/src/cpu/testers/directedtest/RubyDirectedTester.py
+++ b/src/cpu/testers/directedtest/RubyDirectedTester.py
@@ -53,6 +53,6 @@
 class RubyDirectedTester(ClockedObject):
     type = 'RubyDirectedTester'
     cxx_header = "cpu/testers/directedtest/RubyDirectedTester.hh"
-    cpuPort = VectorMasterPort("the cpu ports")
+    cpuPort = VectorRequestPort("the cpu ports")
     requests_to_complete = Param.Int("checks to complete")
     generator = Param.DirectedGenerator("the request generator")
diff --git a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc
index 562b7d5..a404ee9 100644
--- a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc
+++ b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc
@@ -60,7 +60,8 @@
     Request::Flags flags;
 
     // For simplicity, requests are assumed to be 1 byte-sized
-    RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
+    RequestPtr req = std::make_shared<Request>(m_address, 1, flags,
+                                               requestorId);
 
     Packet::Command cmd;
     bool do_write = (random_mt.random(0, 100) < m_percent_writes);
diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc
index 87e940c..dc92055 100644
--- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc
+++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc
@@ -90,7 +90,7 @@
       injVnet(p->inj_vnet),
       precision(p->precision),
       responseLimit(p->response_limit),
-      masterId(p->system->getMasterId(this))
+      requestorId(p->system->getRequestorId(this))
 {
     // set up counters
     noResponseCycles = 0;
@@ -290,18 +290,20 @@
     if (injReqType == 0) {
         // generate packet for virtual network 0
         requestType = MemCmd::ReadReq;
-        req = std::make_shared<Request>(paddr, access_size, flags, masterId);
+        req = std::make_shared<Request>(paddr, access_size, flags,
+                                        requestorId);
     } else if (injReqType == 1) {
         // generate packet for virtual network 1
         requestType = MemCmd::ReadReq;
         flags.set(Request::INST_FETCH);
         req = std::make_shared<Request>(
-            0x0, access_size, flags, masterId, 0x0, 0);
+            0x0, access_size, flags, requestorId, 0x0, 0);
         req->setPaddr(paddr);
     } else {  // if (injReqType == 2)
         // generate packet for virtual network 2
         requestType = MemCmd::WriteReq;
-        req = std::make_shared<Request>(paddr, access_size, flags, masterId);
+        req = std::make_shared<Request>(paddr, access_size, flags,
+                                        requestorId);
     }
 
     req->setContext(id);
diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh
index 524a960..2864ccf 100644
--- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh
+++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh
@@ -130,7 +130,7 @@
 
     const Cycles responseLimit;
 
-    MasterID masterId;
+    RequestorID requestorId;
 
     void completeRequest(PacketPtr pkt);
 
diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc
index 026a325..134f0f6 100644
--- a/src/cpu/testers/memtest/memtest.cc
+++ b/src/cpu/testers/memtest/memtest.cc
@@ -91,7 +91,7 @@
       percentReads(p->percent_reads),
       percentFunctional(p->percent_functional),
       percentUncacheable(p->percent_uncacheable),
-      masterId(p->system->getMasterId(this)),
+      requestorId(p->system->getRequestorId(this)),
       blockSize(p->system->cacheLineSize()),
       blockAddrMask(blockSize - 1),
       progressInterval(p->progress_interval),
@@ -230,7 +230,7 @@
 
     bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
         !uncacheable;
-    RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
+    RequestPtr req = std::make_shared<Request>(paddr, 1, flags, requestorId);
     req->setContext(id);
 
     outstandingAddrs.insert(paddr);
diff --git a/src/cpu/testers/memtest/memtest.hh b/src/cpu/testers/memtest/memtest.hh
index 5eb4e35..fc61b75 100644
--- a/src/cpu/testers/memtest/memtest.hh
+++ b/src/cpu/testers/memtest/memtest.hh
@@ -126,7 +126,7 @@
     const unsigned percentUncacheable;
 
     /** Request id for all generated traffic */
-    MasterID masterId;
+    RequestorID requestorId;
 
     unsigned int id;
 
diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc
index e3732bf..cf60097 100644
--- a/src/cpu/testers/rubytest/Check.cc
+++ b/src/cpu/testers/rubytest/Check.cc
@@ -108,7 +108,7 @@
 
     // Prefetches are assumed to be 0 sized
     RequestPtr req = std::make_shared<Request>(
-            m_address, 0, flags, m_tester_ptr->masterId());
+            m_address, 0, flags, m_tester_ptr->requestorId());
     req->setPC(m_pc);
     req->setContext(index);
 
@@ -147,7 +147,7 @@
     Request::Flags flags;
 
     RequestPtr req = std::make_shared<Request>(
-            m_address, CHECK_SIZE, flags, m_tester_ptr->masterId());
+            m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId());
     req->setPC(m_pc);
 
     Packet::Command cmd;
@@ -181,7 +181,7 @@
 
     // Stores are assumed to be 1 byte-sized
     RequestPtr req = std::make_shared<Request>(
-        writeAddr, 1, flags, m_tester_ptr->masterId());
+        writeAddr, 1, flags, m_tester_ptr->requestorId());
     req->setPC(m_pc);
 
     req->setContext(index);
@@ -246,7 +246,7 @@
 
     // Checks are sized depending on the number of bytes written
     RequestPtr req = std::make_shared<Request>(
-            m_address, CHECK_SIZE, flags, m_tester_ptr->masterId());
+            m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId());
     req->setPC(m_pc);
 
     req->setContext(index);
diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc
index 8dfe994..a64a965 100644
--- a/src/cpu/testers/rubytest/RubyTester.cc
+++ b/src/cpu/testers/rubytest/RubyTester.cc
@@ -53,7 +53,7 @@
   : ClockedObject(p),
     checkStartEvent([this]{ wakeup(); }, "RubyTester tick",
                     false, Event::CPU_Tick_Pri),
-    _masterId(p->system->getMasterId(this)),
+    _requestorId(p->system->getRequestorId(this)),
     m_checkTable_ptr(nullptr),
     m_num_cpus(p->num_cpus),
     m_checks_to_complete(p->checks_to_complete),
diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh
index e63729a..64c33b8 100644
--- a/src/cpu/testers/rubytest/RubyTester.hh
+++ b/src/cpu/testers/rubytest/RubyTester.hh
@@ -117,11 +117,11 @@
     void print(std::ostream& out) const;
     bool getCheckFlush() { return m_check_flush; }
 
-    MasterID masterId() { return _masterId; }
+    RequestorID requestorId() { return _requestorId; }
   protected:
     EventFunctionWrapper checkStartEvent;
 
-    MasterID _masterId;
+    RequestorID _requestorId;
 
   private:
     void hitCallback(NodeID proc, SubBlock* data);
diff --git a/src/cpu/testers/rubytest/RubyTester.py b/src/cpu/testers/rubytest/RubyTester.py
index ecf52b6..9bcbcd1 100644
--- a/src/cpu/testers/rubytest/RubyTester.py
+++ b/src/cpu/testers/rubytest/RubyTester.py
@@ -34,9 +34,10 @@
     type = 'RubyTester'
     cxx_header = "cpu/testers/rubytest/RubyTester.hh"
     num_cpus = Param.Int("number of cpus / RubyPorts")
-    cpuInstDataPort = VectorMasterPort("cpu combo ports to inst & data caches")
-    cpuInstPort = VectorMasterPort("cpu ports to only inst caches")
-    cpuDataPort = VectorMasterPort("cpu ports to only data caches")
+    cpuInstDataPort = VectorRequestPort("cpu combo ports to inst & "
+                                        "data caches")
+    cpuInstPort = VectorRequestPort("cpu ports to only inst caches")
+    cpuDataPort = VectorRequestPort("cpu ports to only data caches")
     checks_to_complete = Param.Int(100, "checks to complete")
     deadlock_threshold = Param.Int(50000, "how often to check for deadlock")
     wakeup_frequency = Param.Int(10, "number of cycles between wakeups")
diff --git a/src/cpu/testers/traffic_gen/BaseTrafficGen.py b/src/cpu/testers/traffic_gen/BaseTrafficGen.py
index ff50a19..3055348 100644
--- a/src/cpu/testers/traffic_gen/BaseTrafficGen.py
+++ b/src/cpu/testers/traffic_gen/BaseTrafficGen.py
@@ -44,7 +44,7 @@
 # generated (Random, Linear, Trace etc)
 class StreamGenType(ScopedEnum): vals = [ 'none', 'fixed', 'random' ]
 
-# The traffic generator is a master module that generates stimuli for
+# The traffic generator is a requestor module that generates stimuli for
 # the memory system, based on a collection of simple behaviours that
 # are either probabilistic or based on traces. It can be used stand
 # alone for creating test cases for interconnect and memory
@@ -57,7 +57,7 @@
     cxx_header = "cpu/testers/traffic_gen/traffic_gen.hh"
 
     # Port used for sending requests and receiving responses
-    port = RequestPort("Master port")
+    port = RequestPort("This port sends requests and receives responses")
 
     # System used to determine the mode of the memory system
     system = Param.System(Parent.any, "System this generator is part of")
@@ -110,9 +110,9 @@
     def connectCachedPorts(self, bus):
         if hasattr(self, '_cached_ports') and (len(self._cached_ports) > 0):
             for p in self._cached_ports:
-                exec('self.%s = bus.slave' % p)
+                exec('self.%s = bus.cpu_side_ports' % p)
         else:
-            self.port = bus.slave
+            self.port = bus.cpu_side_ports
 
     def connectAllPorts(self, cached_bus, uncached_bus = None):
         self.connectCachedPorts(cached_bus)
diff --git a/src/cpu/testers/traffic_gen/base.cc b/src/cpu/testers/traffic_gen/base.cc
index bc8b601..dcc410b 100644
--- a/src/cpu/testers/traffic_gen/base.cc
+++ b/src/cpu/testers/traffic_gen/base.cc
@@ -80,7 +80,7 @@
       retryPktTick(0), blockedWaitingResp(false),
       updateEvent([this]{ update(); }, name()),
       stats(this),
-      masterID(system->getMasterId(this)),
+      requestorId(system->getRequestorId(this)),
       streamGenerator(StreamGen::create(p))
 {
 }
@@ -358,13 +358,15 @@
 std::shared_ptr<BaseGen>
 BaseTrafficGen::createIdle(Tick duration)
 {
-    return std::shared_ptr<BaseGen>(new IdleGen(*this, masterID, duration));
+    return std::shared_ptr<BaseGen>(new IdleGen(*this, requestorId,
+                                                duration));
 }
 
 std::shared_ptr<BaseGen>
 BaseTrafficGen::createExit(Tick duration)
 {
-    return std::shared_ptr<BaseGen>(new ExitGen(*this, masterID, duration));
+    return std::shared_ptr<BaseGen>(new ExitGen(*this, requestorId,
+                                                duration));
 }
 
 std::shared_ptr<BaseGen>
@@ -373,7 +375,7 @@
                              Tick min_period, Tick max_period,
                              uint8_t read_percent, Addr data_limit)
 {
-    return std::shared_ptr<BaseGen>(new LinearGen(*this, masterID,
+    return std::shared_ptr<BaseGen>(new LinearGen(*this, requestorId,
                                                   duration, start_addr,
                                                   end_addr, blocksize,
                                                   system->cacheLineSize(),
@@ -387,7 +389,7 @@
                              Tick min_period, Tick max_period,
                              uint8_t read_percent, Addr data_limit)
 {
-    return std::shared_ptr<BaseGen>(new RandomGen(*this, masterID,
+    return std::shared_ptr<BaseGen>(new RandomGen(*this, requestorId,
                                                   duration, start_addr,
                                                   end_addr, blocksize,
                                                   system->cacheLineSize(),
@@ -406,7 +408,7 @@
                            Enums::AddrMap addr_mapping,
                            unsigned int nbr_of_ranks)
 {
-    return std::shared_ptr<BaseGen>(new DramGen(*this, masterID,
+    return std::shared_ptr<BaseGen>(new DramGen(*this, requestorId,
                                                 duration, start_addr,
                                                 end_addr, blocksize,
                                                 system->cacheLineSize(),
@@ -432,7 +434,7 @@
                               unsigned int nbr_of_ranks,
                               unsigned int max_seq_count_per_rank)
 {
-    return std::shared_ptr<BaseGen>(new DramRotGen(*this, masterID,
+    return std::shared_ptr<BaseGen>(new DramRotGen(*this, requestorId,
                                                    duration, start_addr,
                                                    end_addr, blocksize,
                                                    system->cacheLineSize(),
@@ -467,7 +469,7 @@
                            unsigned int nbr_of_ranks_nvm,
                            uint8_t nvm_percent)
 {
-    return std::shared_ptr<BaseGen>(new HybridGen(*this, masterID,
+    return std::shared_ptr<BaseGen>(new HybridGen(*this, requestorId,
                                                 duration, start_addr_dram,
                                                 end_addr_dram, blocksize_dram,
                                                 start_addr_nvm,
@@ -500,7 +502,7 @@
                            Enums::AddrMap addr_mapping,
                            unsigned int nbr_of_ranks)
 {
-    return std::shared_ptr<BaseGen>(new NvmGen(*this, masterID,
+    return std::shared_ptr<BaseGen>(new NvmGen(*this, requestorId,
                                                 duration, start_addr,
                                                 end_addr, blocksize,
                                                 system->cacheLineSize(),
@@ -519,7 +521,7 @@
 {
 #if HAVE_PROTOBUF
     return std::shared_ptr<BaseGen>(
-        new TraceGen(*this, masterID, duration, trace_file, addr_offset));
+        new TraceGen(*this, requestorId, duration, trace_file, addr_offset));
 #else
     panic("Can't instantiate trace generation without Protobuf support!\n");
 #endif
diff --git a/src/cpu/testers/traffic_gen/base.hh b/src/cpu/testers/traffic_gen/base.hh
index 6f419e8..7c3386e 100644
--- a/src/cpu/testers/traffic_gen/base.hh
+++ b/src/cpu/testers/traffic_gen/base.hh
@@ -53,7 +53,7 @@
 struct BaseTrafficGenParams;
 
 /**
- * The traffic generator is a master module that generates stimuli for
+ * The traffic generator is a module that generates stimuli for
  * the memory system, based on a collection of simple generator
  * behaviours that are either probabilistic or based on traces. It can
  * be used stand alone for creating test cases for interconnect and
@@ -123,7 +123,7 @@
     const int maxOutstandingReqs;
 
 
-    /** Master port specialisation for the traffic generator */
+    /** Request port specialisation for the traffic generator */
     class TrafficGenPort : public RequestPort
     {
       public:
@@ -157,7 +157,7 @@
      */
     void update();
 
-    /** The instance of master port used by the traffic generator. */
+    /** The instance of request port used by the traffic generator. */
     TrafficGenPort port;
 
     /** Packet waiting to be sent. */
@@ -324,9 +324,9 @@
     virtual std::shared_ptr<BaseGen> nextGenerator() = 0;
 
     /**
-     * MasterID used in generated requests.
+     * RequestorID used in generated requests.
      */
-    const MasterID masterID;
+    const RequestorID requestorId;
 
     /** Currently active generator */
     std::shared_ptr<BaseGen> activeGenerator;
diff --git a/src/cpu/testers/traffic_gen/base_gen.cc b/src/cpu/testers/traffic_gen/base_gen.cc
index d5cdf71..d8ce001 100644
--- a/src/cpu/testers/traffic_gen/base_gen.cc
+++ b/src/cpu/testers/traffic_gen/base_gen.cc
@@ -46,8 +46,8 @@
 #include "debug/TrafficGen.hh"
 #include "sim/system.hh"
 
-BaseGen::BaseGen(SimObject &obj, MasterID master_id, Tick _duration)
-    : _name(obj.name()), masterID(master_id),
+BaseGen::BaseGen(SimObject &obj, RequestorID requestor_id, Tick _duration)
+    : _name(obj.name()), requestorId(requestor_id),
       duration(_duration)
 {
 }
@@ -57,10 +57,11 @@
                    Request::FlagsType flags)
 {
     // Create new request
-    RequestPtr req = std::make_shared<Request>(addr, size, flags, masterID);
+    RequestPtr req = std::make_shared<Request>(addr, size, flags,
+                                               requestorId);
     // Dummy PC to have PC-based prefetchers latch on; get entropy into higher
     // bits
-    req->setPC(((Addr)masterID) << 2);
+    req->setPC(((Addr)requestorId) << 2);
 
     // Embed it in a packet
     PacketPtr pkt = new Packet(req, cmd);
@@ -69,19 +70,19 @@
     pkt->dataDynamic(pkt_data);
 
     if (cmd.isWrite()) {
-        std::fill_n(pkt_data, req->getSize(), (uint8_t)masterID);
+        std::fill_n(pkt_data, req->getSize(), (uint8_t)requestorId);
     }
 
     return pkt;
 }
 
 StochasticGen::StochasticGen(SimObject &obj,
-                             MasterID master_id, Tick _duration,
+                             RequestorID requestor_id, Tick _duration,
                              Addr start_addr, Addr end_addr,
                              Addr _blocksize, Addr cacheline_size,
                              Tick min_period, Tick max_period,
                              uint8_t read_percent, Addr data_limit)
-        : BaseGen(obj, master_id, _duration),
+        : BaseGen(obj, requestor_id, _duration),
           startAddr(start_addr), endAddr(end_addr),
           blocksize(_blocksize), cacheLineSize(cacheline_size),
           minPeriod(min_period), maxPeriod(max_period),
diff --git a/src/cpu/testers/traffic_gen/base_gen.hh b/src/cpu/testers/traffic_gen/base_gen.hh
index 0f51b6c..ab9d385 100644
--- a/src/cpu/testers/traffic_gen/base_gen.hh
+++ b/src/cpu/testers/traffic_gen/base_gen.hh
@@ -62,8 +62,8 @@
     /** Name to use for status and debug printing */
     const std::string _name;
 
-    /** The MasterID used for generating requests */
-    const MasterID masterID;
+    /** The RequestorID used for generating requests */
+    const RequestorID requestorId;
 
     /**
      * Generate a new request and associated packet
@@ -85,10 +85,10 @@
      * Create a base generator.
      *
      * @param obj simobject owning the generator
-     * @param master_id MasterID set on each request
+     * @param requestor_id RequestorID set on each request
      * @param _duration duration of this state before transitioning
      */
-    BaseGen(SimObject &obj, MasterID master_id, Tick _duration);
+    BaseGen(SimObject &obj, RequestorID requestor_id, Tick _duration);
 
     virtual ~BaseGen() { }
 
@@ -133,7 +133,7 @@
 {
   public:
     StochasticGen(SimObject &obj,
-                  MasterID master_id, Tick _duration,
+                  RequestorID requestor_id, Tick _duration,
                   Addr start_addr, Addr end_addr,
                   Addr _blocksize, Addr cacheline_size,
                   Tick min_period, Tick max_period,
diff --git a/src/cpu/testers/traffic_gen/dram_gen.cc b/src/cpu/testers/traffic_gen/dram_gen.cc
index 0534a8a..e29f6d1 100644
--- a/src/cpu/testers/traffic_gen/dram_gen.cc
+++ b/src/cpu/testers/traffic_gen/dram_gen.cc
@@ -45,7 +45,7 @@
 #include "enums/AddrMap.hh"
 
 DramGen::DramGen(SimObject &obj,
-                 MasterID master_id, Tick _duration,
+                 RequestorID requestor_id, Tick _duration,
                  Addr start_addr, Addr end_addr,
                  Addr _blocksize, Addr cacheline_size,
                  Tick min_period, Tick max_period,
@@ -55,7 +55,7 @@
                  unsigned int nbr_of_banks_util,
                  Enums::AddrMap addr_mapping,
                  unsigned int nbr_of_ranks)
-        : RandomGen(obj, master_id, _duration, start_addr, end_addr,
+        : RandomGen(obj, requestor_id, _duration, start_addr, end_addr,
           _blocksize, cacheline_size, min_period, max_period,
           read_percent, data_limit),
           numSeqPkts(num_seq_pkts), countNumSeqPkts(0), addr(0),
diff --git a/src/cpu/testers/traffic_gen/dram_gen.hh b/src/cpu/testers/traffic_gen/dram_gen.hh
index 081ca24..b09081b 100644
--- a/src/cpu/testers/traffic_gen/dram_gen.hh
+++ b/src/cpu/testers/traffic_gen/dram_gen.hh
@@ -64,7 +64,7 @@
      * Create a DRAM address sequence generator.
      *
      * @param obj SimObject owning this sequence generator
-     * @param master_id MasterID related to the memory requests
+     * @param requestor_id RequestorID related to the memory requests
      * @param _duration duration of this state before transitioning
      * @param start_addr Start address
      * @param end_addr End address
@@ -83,7 +83,7 @@
      *                     assumes single channel system
      */
     DramGen(SimObject &obj,
-            MasterID master_id, Tick _duration,
+            RequestorID requestor_id, Tick _duration,
             Addr start_addr, Addr end_addr,
             Addr _blocksize, Addr cacheline_size,
             Tick min_period, Tick max_period,
diff --git a/src/cpu/testers/traffic_gen/dram_rot_gen.hh b/src/cpu/testers/traffic_gen/dram_rot_gen.hh
index bb53d49..34140ac 100644
--- a/src/cpu/testers/traffic_gen/dram_rot_gen.hh
+++ b/src/cpu/testers/traffic_gen/dram_rot_gen.hh
@@ -63,7 +63,7 @@
      * 3) Ranks per channel
      *
      * @param obj SimObject owning this sequence generator
-     * @param master_id MasterID related to the memory requests
+     * @param requestor_id RequestorID related to the memory requests
      * @param _duration duration of this state before transitioning
      * @param start_addr Start address
      * @param end_addr End address
@@ -82,7 +82,7 @@
      * @param addr_mapping Address mapping to be used,
      *                     assumes single channel system
      */
-    DramRotGen(SimObject &obj, MasterID master_id, Tick _duration,
+    DramRotGen(SimObject &obj, RequestorID requestor_id, Tick _duration,
             Addr start_addr, Addr end_addr,
             Addr _blocksize, Addr cacheline_size,
             Tick min_period, Tick max_period,
@@ -92,7 +92,7 @@
             Enums::AddrMap addr_mapping,
             unsigned int nbr_of_ranks,
             unsigned int max_seq_count_per_rank)
-        : DramGen(obj, master_id, _duration, start_addr, end_addr,
+        : DramGen(obj, requestor_id, _duration, start_addr, end_addr,
           _blocksize, cacheline_size, min_period, max_period,
           read_percent, data_limit,
           num_seq_pkts, page_size, nbr_of_banks_DRAM,
diff --git a/src/cpu/testers/traffic_gen/exit_gen.hh b/src/cpu/testers/traffic_gen/exit_gen.hh
index ec863f7..65939a6 100644
--- a/src/cpu/testers/traffic_gen/exit_gen.hh
+++ b/src/cpu/testers/traffic_gen/exit_gen.hh
@@ -54,8 +54,8 @@
 
   public:
 
-    ExitGen(SimObject &obj, MasterID master_id, Tick _duration)
-        : BaseGen(obj, master_id, _duration)
+    ExitGen(SimObject &obj, RequestorID requestor_id, Tick _duration)
+        : BaseGen(obj, requestor_id, _duration)
     { }
 
     void enter();
diff --git a/src/cpu/testers/traffic_gen/hybrid_gen.cc b/src/cpu/testers/traffic_gen/hybrid_gen.cc
index 303884a..638d7a3 100644
--- a/src/cpu/testers/traffic_gen/hybrid_gen.cc
+++ b/src/cpu/testers/traffic_gen/hybrid_gen.cc
@@ -49,7 +49,7 @@
 using namespace std;
 
 HybridGen::HybridGen(SimObject &obj,
-               MasterID master_id, Tick _duration,
+               RequestorID requestor_id, Tick _duration,
                Addr start_addr_dram, Addr end_addr_dram,
                Addr blocksize_dram,
                Addr start_addr_nvm, Addr end_addr_nvm,
@@ -67,7 +67,7 @@
                unsigned int nbr_of_ranks_dram,
                unsigned int nbr_of_ranks_nvm,
                uint8_t nvm_percent)
-       : BaseGen(obj, master_id, _duration),
+       : BaseGen(obj, requestor_id, _duration),
          startAddrDram(start_addr_dram),
          endAddrDram(end_addr_dram),
          blocksizeDram(blocksize_dram),
diff --git a/src/cpu/testers/traffic_gen/hybrid_gen.hh b/src/cpu/testers/traffic_gen/hybrid_gen.hh
index 795826c..59ac87f 100644
--- a/src/cpu/testers/traffic_gen/hybrid_gen.hh
+++ b/src/cpu/testers/traffic_gen/hybrid_gen.hh
@@ -66,7 +66,7 @@
      * Create a hybrid DRAM + NVM address sequence generator.
      *
      * @param obj SimObject owning this sequence generator
-     * @param master_id MasterID related to the memory requests
+     * @param requestor_id RequestorID related to the memory requests
      * @param _duration duration of this state before transitioning
      * @param start_addr_dram Start address for DRAM range
      * @param end_addr_dram End address for DRAM range
@@ -96,7 +96,7 @@
      * @param nvm_percent Percentage of traffic going to NVM
      */
     HybridGen(SimObject &obj,
-           MasterID master_id, Tick _duration,
+           RequestorID requestor_id, Tick _duration,
            Addr start_addr_dram, Addr end_addr_dram,
            Addr blocksize_dram,
            Addr start_addr_nvm, Addr end_addr_nvm,
diff --git a/src/cpu/testers/traffic_gen/idle_gen.hh b/src/cpu/testers/traffic_gen/idle_gen.hh
index 761b717..40e98b8 100644
--- a/src/cpu/testers/traffic_gen/idle_gen.hh
+++ b/src/cpu/testers/traffic_gen/idle_gen.hh
@@ -56,8 +56,8 @@
 
   public:
 
-    IdleGen(SimObject &obj, MasterID master_id, Tick _duration)
-        : BaseGen(obj, master_id, _duration)
+    IdleGen(SimObject &obj, RequestorID requestor_id, Tick _duration)
+        : BaseGen(obj, requestor_id, _duration)
     { }
 
     void enter();
diff --git a/src/cpu/testers/traffic_gen/linear_gen.hh b/src/cpu/testers/traffic_gen/linear_gen.hh
index b6f4282..fbd3d8f 100644
--- a/src/cpu/testers/traffic_gen/linear_gen.hh
+++ b/src/cpu/testers/traffic_gen/linear_gen.hh
@@ -67,7 +67,7 @@
      * time.
      *
      * @param obj SimObject owning this sequence generator
-     * @param master_id MasterID related to the memory requests
+     * @param requestor_id RequestorID related to the memory requests
      * @param _duration duration of this state before transitioning
      * @param start_addr Start address
      * @param end_addr End address
@@ -79,12 +79,12 @@
      * @param data_limit Upper limit on how much data to read/write
      */
     LinearGen(SimObject &obj,
-              MasterID master_id, Tick _duration,
+              RequestorID requestor_id, Tick _duration,
               Addr start_addr, Addr end_addr,
               Addr _blocksize, Addr cacheline_size,
               Tick min_period, Tick max_period,
               uint8_t read_percent, Addr data_limit)
-        : StochasticGen(obj, master_id, _duration, start_addr, end_addr,
+        : StochasticGen(obj, requestor_id, _duration, start_addr, end_addr,
                         _blocksize, cacheline_size, min_period, max_period,
                         read_percent, data_limit),
           nextAddr(0),
diff --git a/src/cpu/testers/traffic_gen/nvm_gen.cc b/src/cpu/testers/traffic_gen/nvm_gen.cc
index fa3efd3..2191b4e 100644
--- a/src/cpu/testers/traffic_gen/nvm_gen.cc
+++ b/src/cpu/testers/traffic_gen/nvm_gen.cc
@@ -47,7 +47,7 @@
 #include "enums/AddrMap.hh"
 
 NvmGen::NvmGen(SimObject &obj,
-               MasterID master_id, Tick _duration,
+               RequestorID requestor_id, Tick _duration,
                Addr start_addr, Addr end_addr,
                Addr _blocksize, Addr cacheline_size,
                Tick min_period, Tick max_period,
@@ -57,7 +57,7 @@
                unsigned int nbr_of_banks_util,
                Enums::AddrMap addr_mapping,
                unsigned int nbr_of_ranks)
-       : RandomGen(obj, master_id, _duration, start_addr, end_addr,
+       : RandomGen(obj, requestor_id, _duration, start_addr, end_addr,
          _blocksize, cacheline_size, min_period, max_period,
          read_percent, data_limit),
          numSeqPkts(num_seq_pkts), countNumSeqPkts(0), addr(0),
diff --git a/src/cpu/testers/traffic_gen/nvm_gen.hh b/src/cpu/testers/traffic_gen/nvm_gen.hh
index c5a89ee..9251898 100644
--- a/src/cpu/testers/traffic_gen/nvm_gen.hh
+++ b/src/cpu/testers/traffic_gen/nvm_gen.hh
@@ -66,7 +66,7 @@
      * Create a NVM address sequence generator.
      *
      * @param obj SimObject owning this sequence generator
-     * @param master_id MasterID related to the memory requests
+     * @param requestor_id RequestorID related to the memory requests
      * @param _duration duration of this state before transitioning
      * @param start_addr Start address
      * @param end_addr End address
@@ -85,7 +85,7 @@
      *                     assumes single channel system
      */
     NvmGen(SimObject &obj,
-           MasterID master_id, Tick _duration,
+           RequestorID requestor_id, Tick _duration,
            Addr start_addr, Addr end_addr,
            Addr _blocksize, Addr cacheline_size,
            Tick min_period, Tick max_period,
diff --git a/src/cpu/testers/traffic_gen/random_gen.hh b/src/cpu/testers/traffic_gen/random_gen.hh
index ba398a9..3bf1a86 100644
--- a/src/cpu/testers/traffic_gen/random_gen.hh
+++ b/src/cpu/testers/traffic_gen/random_gen.hh
@@ -66,21 +66,23 @@
      *
      * @param gen Traffic generator owning this sequence generator
      * @param _duration duration of this state before transitioning
+     * @param requestor_id RequestorID related to the memory requests
      * @param start_addr Start address
      * @param end_addr End address
      * @param _blocksize Size used for transactions injected
+     * @param cacheline_size cache line size in the system
      * @param min_period Lower limit of random inter-transaction time
      * @param max_period Upper limit of random inter-transaction time
      * @param read_percent Percent of transactions that are reads
      * @param data_limit Upper limit on how much data to read/write
      */
     RandomGen(SimObject &obj,
-              MasterID master_id, Tick _duration,
+              RequestorID requestor_id, Tick _duration,
               Addr start_addr, Addr end_addr,
               Addr _blocksize, Addr cacheline_size,
               Tick min_period, Tick max_period,
               uint8_t read_percent, Addr data_limit)
-        : StochasticGen(obj, master_id, _duration, start_addr, end_addr,
+        : StochasticGen(obj, requestor_id, _duration, start_addr, end_addr,
                         _blocksize, cacheline_size, min_period, max_period,
                         read_percent, data_limit),
           dataManipulated(0)
diff --git a/src/cpu/testers/traffic_gen/trace_gen.hh b/src/cpu/testers/traffic_gen/trace_gen.hh
index fe386d8..a31868e 100644
--- a/src/cpu/testers/traffic_gen/trace_gen.hh
+++ b/src/cpu/testers/traffic_gen/trace_gen.hh
@@ -148,14 +148,14 @@
      * Create a trace generator.
      *
      * @param obj SimObject owning this sequence generator
-     * @param master_id MasterID related to the memory requests
+     * @param requestor_id RequestorID related to the memory requests
      * @param _duration duration of this state before transitioning
      * @param trace_file File to read the transactions from
      * @param addr_offset Positive offset to add to trace address
      */
-    TraceGen(SimObject &obj, MasterID master_id, Tick _duration,
+    TraceGen(SimObject &obj, RequestorID requestor_id, Tick _duration,
              const std::string& trace_file, Addr addr_offset)
-        : BaseGen(obj, master_id, _duration),
+        : BaseGen(obj, requestor_id, _duration),
           trace(trace_file),
           tickOffset(0),
           addrOffset(addr_offset),
diff --git a/src/cpu/testers/traffic_gen/traffic_gen.hh b/src/cpu/testers/traffic_gen/traffic_gen.hh
index 3f1c2ab..d90df64 100644
--- a/src/cpu/testers/traffic_gen/traffic_gen.hh
+++ b/src/cpu/testers/traffic_gen/traffic_gen.hh
@@ -45,7 +45,7 @@
 struct TrafficGenParams;
 
 /**
- * The traffic generator is a master module that generates stimuli for
+ * The traffic generator is a module that generates stimuli for
  * the memory system, based on a collection of simple behaviours that
  * are either probabilistic or based on traces. It can be used stand
  * alone for creating test cases for interconnect and memory
@@ -53,7 +53,7 @@
  * components that are not yet modelled in detail, e.g. a video engine
  * or baseband subsystem in an SoC.
  *
- * The traffic generator has a single master port that is used to send
+ * The traffic generator has a single request port that is used to send
  * requests, independent of the specific behaviour. The behaviour of
  * the traffic generator is specified in a configuration file, and this
  * file describes a state transition graph where each state is a
diff --git a/src/cpu/trace/trace_cpu.cc b/src/cpu/trace/trace_cpu.cc
index 80db94c..3ac3207 100644
--- a/src/cpu/trace/trace_cpu.cc
+++ b/src/cpu/trace/trace_cpu.cc
@@ -46,12 +46,12 @@
     :   BaseCPU(params),
         icachePort(this),
         dcachePort(this),
-        instMasterID(params->system->getMasterId(this, "inst")),
-        dataMasterID(params->system->getMasterId(this, "data")),
+        instRequestorID(params->system->getRequestorId(this, "inst")),
+        dataRequestorID(params->system->getRequestorId(this, "data")),
         instTraceFile(params->instTraceFile),
         dataTraceFile(params->dataTraceFile),
-        icacheGen(*this, "iside", icachePort, instMasterID, instTraceFile),
-        dcacheGen(*this, "dside", dcachePort, dataMasterID, dataTraceFile,
+        icacheGen(*this, ".iside", icachePort, instRequestorID, instTraceFile),
+        dcacheGen(*this, ".dside", dcachePort, dataRequestorID, dataTraceFile,
                   params),
         icacheNextEvent([this]{ schedIcacheNext(); }, name()),
         dcacheNextEvent([this]{ schedDcacheNext(); }, name()),
@@ -593,7 +593,7 @@
 
     // Create a request and the packet containing request
     auto req = std::make_shared<Request>(
-        node_ptr->physAddr, node_ptr->size, node_ptr->flags, masterID);
+        node_ptr->physAddr, node_ptr->size, node_ptr->flags, requestorId);
     req->setReqInstSeqNum(node_ptr->seqNum);
 
     // If this is not done it triggers assert in L1 cache for invalid contextId
@@ -604,7 +604,7 @@
     // of the request.
     if (node_ptr->virtAddr != 0) {
         req->setVirt(node_ptr->virtAddr, node_ptr->size,
-                     node_ptr->flags, masterID, node_ptr->pc);
+                     node_ptr->flags, requestorId, node_ptr->pc);
         req->setPaddr(node_ptr->physAddr);
         req->setReqInstSeqNum(node_ptr->seqNum);
     }
@@ -619,7 +619,7 @@
     }
     pkt->dataDynamic(pkt_data);
 
-    // Call MasterPort method to send a timing request for this packet
+    // Call RequestPort method to send a timing request for this packet
     bool success = port.sendTimingReq(pkt);
     ++elasticStats.numSendAttempted;
 
@@ -1073,7 +1073,7 @@
 {
 
     // Create new request
-    auto req = std::make_shared<Request>(addr, size, flags, masterID);
+    auto req = std::make_shared<Request>(addr, size, flags, requestorId);
     req->setPC(pc);
 
     // If this is not done it triggers assert in L1 cache for invalid contextId
@@ -1089,7 +1089,7 @@
         memset(pkt_data, 0xA, req->getSize());
     }
 
-    // Call MasterPort method to send a timing request for this packet
+    // Call RequestPort method to send a timing request for this packet
     bool success = port.sendTimingReq(pkt);
     if (!success) {
         // If it fails, save the packet to retry when a retry is signalled by
diff --git a/src/cpu/trace/trace_cpu.hh b/src/cpu/trace/trace_cpu.hh
index 8754bfd..ba1c5e6 100644
--- a/src/cpu/trace/trace_cpu.hh
+++ b/src/cpu/trace/trace_cpu.hh
@@ -65,7 +65,7 @@
  * same trace is used for playback on different memory sub-systems.
  *
  * The TraceCPU inherits from BaseCPU so some virtual methods need to be
- * defined. It has two port subclasses inherited from MasterPort for
+ * defined. It has two port subclasses inherited from RequestPort for
  * instruction and data ports. It issues the memory requests deducing the
  * timing from the trace and without performing real execution of micro-ops. As
  * soon as the last dependency for an instruction is complete, its
@@ -321,11 +321,11 @@
     /** Port to connect to L1 data cache. */
     DcachePort dcachePort;
 
-    /** Master id for instruction read requests. */
-    const MasterID instMasterID;
+    /** Requestor id for instruction read requests. */
+    const RequestorID instRequestorID;
 
-    /** Master id for data read and write requests. */
-    const MasterID dataMasterID;
+    /** Requestor id for data read and write requests. */
+    const RequestorID dataRequestorID;
 
     /** File names for input instruction and data traces. */
     std::string instTraceFile, dataTraceFile;
@@ -423,11 +423,11 @@
         public:
         /* Constructor */
         FixedRetryGen(TraceCPU& _owner, const std::string& _name,
-                   RequestPort& _port, MasterID master_id,
+                   RequestPort& _port, RequestorID requestor_id,
                    const std::string& trace_file)
             : owner(_owner),
               port(_port),
-              masterID(master_id),
+              requestorId(requestor_id),
               trace(trace_file),
               genName(owner.name() + ".fixedretry." + _name),
               retryPkt(nullptr),
@@ -502,8 +502,8 @@
         /** Reference of the port to be used to issue memory requests. */
         RequestPort& port;
 
-        /** MasterID used for the requests being sent. */
-        const MasterID masterID;
+        /** RequestorID used for the requests being sent. */
+        const RequestorID requestorId;
 
         /** Input stream used for reading the input trace file. */
         InputStream trace;
@@ -852,11 +852,11 @@
         public:
         /* Constructor */
         ElasticDataGen(TraceCPU& _owner, const std::string& _name,
-                   RequestPort& _port, MasterID master_id,
+                   RequestPort& _port, RequestorID requestor_id,
                    const std::string& trace_file, TraceCPUParams *params)
             : owner(_owner),
               port(_port),
-              masterID(master_id),
+              requestorId(requestor_id),
               trace(trace_file, 1.0 / params->freqMultiplier),
               genName(owner.name() + ".elastic." + _name),
               retryPkt(nullptr),
@@ -990,8 +990,8 @@
         /** Reference of the port to be used to issue memory requests. */
         RequestPort& port;
 
-        /** MasterID used for the requests being sent. */
-        const MasterID masterID;
+        /** RequestorID used for the requests being sent. */
+        const RequestorID requestorId;
 
         /** Input stream used for reading the input trace file. */
         InputStream trace;
diff --git a/src/dev/arm/RealView.py b/src/dev/arm/RealView.py
index 684567f..9ab0472 100644
--- a/src/dev/arm/RealView.py
+++ b/src/dev/arm/RealView.py
@@ -93,8 +93,9 @@
     type = 'AmbaDmaDevice'
     abstract = True
     cxx_header = "dev/arm/amba_device.hh"
-    pio_addr = Param.Addr("Address for AMBA slave interface")
-    pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device")
+    pio_addr = Param.Addr("Address for AMBA responder interface")
+    pio_latency = Param.Latency("10ns", "Time between action and write/read"
+                                        "result by AMBA DMA Device")
     interrupt = Param.ArmInterruptPin("Interrupt that connects to GIC")
     amba_id = Param.UInt32("ID of AMBA device for kernel detection")
 
@@ -576,16 +577,16 @@
     def _attach_memory(self, mem, bus, mem_ports=None):
         if hasattr(mem, "port"):
             if mem_ports is None:
-                mem.port = bus.master
+                mem.port = bus.mem_side_ports
             else:
                 mem_ports.append(mem.port)
 
     def _attach_device(self, device, bus, dma_ports=None):
         if hasattr(device, "pio"):
-            device.pio = bus.master
+            device.pio = bus.mem_side_ports
         if hasattr(device, "dma"):
             if dma_ports is None:
-                device.dma = bus.slave
+                device.dma = bus.cpu_side_ports
             else:
                 dma_ports.append(device.dma)
 
@@ -1092,15 +1093,15 @@
         """
         Instantiate a single SMMU and attach a group of client devices to it.
         The devices' dma port is wired to the SMMU and the SMMU's dma port
-        (master) is attached to the bus. In order to make it work, the list
-        of clients shouldn't contain any device part of the _off_chip_devices
-        or _on_chip_devices.
+        is attached to the bus. In order to make it work, the list of clients
+        shouldn't contain any device part of the _off_chip_devices or
+        _on_chip_devices.
         This method should be called only once.
 
         Parameters:
             devices (list): List of devices which will be using the SMMU
-            bus (Bus): The bus downstream of the SMMU. Its slave port will
-                       receive memory requests from the SMMU, and its master
+            bus (Bus): The bus downstream of the SMMU. Its response port will
+                       receive memory requests from the SMMU, and its request
                        port will forward accesses to the memory mapped devices
         """
         if hasattr(self, 'smmu'):
@@ -1108,8 +1109,8 @@
 
         self.smmu = SMMUv3(reg_map=AddrRange(0x2b400000, size=0x00020000))
 
-        self.smmu.master = bus.slave
-        self.smmu.control = bus.master
+        self.smmu.request = bus.cpu_side_ports
+        self.smmu.control = bus.mem_side_ports
 
         dma_ports = []
         for dev in devices:
diff --git a/src/dev/arm/SConscript b/src/dev/arm/SConscript
index 7041bd9..46ff259 100644
--- a/src/dev/arm/SConscript
+++ b/src/dev/arm/SConscript
@@ -77,7 +77,7 @@
     Source('smmu_v3_ports.cc');
     Source('smmu_v3_proc.cc');
     Source('smmu_v3_ptops.cc');
-    Source('smmu_v3_slaveifc.cc');
+    Source('smmu_v3_deviceifc.cc');
     Source('smmu_v3_transl.cc');
     Source('timer_sp804.cc')
     Source('watchdog_sp805.cc')
diff --git a/src/dev/arm/SMMUv3.py b/src/dev/arm/SMMUv3.py
index 0b9ab21..29c1568 100644
--- a/src/dev/arm/SMMUv3.py
+++ b/src/dev/arm/SMMUv3.py
@@ -39,13 +39,21 @@
 from m5.SimObject import *
 from m5.objects.ClockedObject import ClockedObject
 
-class SMMUv3SlaveInterface(ClockedObject):
-    type = 'SMMUv3SlaveInterface'
-    cxx_header = 'dev/arm/smmu_v3_slaveifc.hh'
+class SMMUv3DeviceInterface(ClockedObject):
+    type = 'SMMUv3DeviceInterface'
+    cxx_header = 'dev/arm/smmu_v3_deviceifc.hh'
 
-    slave = ResponsePort('Device port')
-    ats_master = RequestPort('ATS master port')
-    ats_slave  = ResponsePort('ATS slave port')
+    device_port = ResponsePort('Device port')
+    slave     = DeprecatedParam(device_port,
+                                '`slave` is now called `device_port`')
+    ats_mem_side_port = RequestPort('ATS mem side port,'
+                                'sends requests and receives responses')
+    ats_master   = DeprecatedParam(ats_mem_side_port,
+                        '`ats_master` is now called `ats_mem_side_port`')
+    ats_dev_side_port  = ResponsePort('ATS dev_side_port,'
+                                'sends responses and receives requests')
+    ats_slave     = DeprecatedParam(ats_dev_side_port,
+                        '`ats_slave` is now called `ats_dev_side_port`')
 
     port_width = Param.Unsigned(16, 'Port width in bytes (= 1 beat)')
     wrbuf_slots = Param.Unsigned(16, 'Write buffer size (in beats)')
@@ -74,18 +82,19 @@
     type = 'SMMUv3'
     cxx_header = 'dev/arm/smmu_v3.hh'
 
-    master = RequestPort('Master port')
-    master_walker = RequestPort(
-        'Master port for SMMU initiated HWTW requests (optional)')
+    request = RequestPort('Request port')
+    walker = RequestPort(
+        'Request port for SMMU initiated HWTW requests (optional)')
     control = ResponsePort(
         'Control port for accessing memory-mapped registers')
     sample_period = Param.Clock('10us', 'Stats sample period')
     reg_map = Param.AddrRange('Address range for control registers')
     system = Param.System(Parent.any, "System this device is part of")
 
-    slave_interfaces = VectorParam.SMMUv3SlaveInterface([], "Slave interfaces")
+    device_interfaces = VectorParam.SMMUv3DeviceInterface([],
+                                        "Responder interfaces")
 
-    # SLAVE INTERFACE<->SMMU link parameters
+    # RESPONDER INTERFACE<->SMMU link parameters
     ifc_smmu_lat = Param.Cycles(8, 'IFC to SMMU communication latency')
     smmu_ifc_lat = Param.Cycles(8, 'SMMU to IFC communication latency')
 
@@ -93,8 +102,8 @@
     xlate_slots = Param.Unsigned(64, 'SMMU translation slots')
     ptw_slots = Param.Unsigned(16, 'SMMU page table walk slots')
 
-    master_port_width = Param.Unsigned(16,
-        'Master port width in bytes (= 1 beat)')
+    request_port_width = Param.Unsigned(16,
+        'Request port width in bytes (= 1 beat)')
 
     tlb_entries = Param.Unsigned(2048, 'TLB size (entries)')
     tlb_assoc = Param.Unsigned(4, 'TLB associativity (0=full)')
@@ -185,23 +194,23 @@
 
     def connect(self, device):
         """
-        Helper method used to connect the SMMU. The master could
+        Helper method used to connect the SMMU. The requestor could
         be either a dma port (if the SMMU is attached directly to a
-        dma device), or to a master port (this is the case where the SMMU
+        dma device), or to a request port (this is the case where the SMMU
         is attached to a bridge).
         """
 
-        slave_interface = SMMUv3SlaveInterface()
+        device_interface = SMMUv3DeviceInterface()
 
-        if hasattr(device, "master"):
-            slave_interface.slave = device.master
+        if hasattr(device, "request_port"):
+            device_interface.device_port = device.request_port
         elif hasattr(device, "dma"):
-            slave_interface.slave = device.dma
+            device_interface.device_port = device.dma
         else:
             print("Unable to attach SMMUv3\n")
             sys.exit(1)
 
-        self.slave_interfaces.append(slave_interface)
+        self.device_interfaces.append(device_interface)
 
         # Storing a reference to the smmu to be used when generating
         # the binding in the device DTB.
diff --git a/src/dev/arm/UFSHostDevice.py b/src/dev/arm/UFSHostDevice.py
index a444a9a..879503a 100644
--- a/src/dev/arm/UFSHostDevice.py
+++ b/src/dev/arm/UFSHostDevice.py
@@ -42,7 +42,7 @@
 class UFSHostDevice(DmaDevice):
     type = 'UFSHostDevice'
     cxx_header = "dev/arm/ufs_device.hh"
-    pio_addr = Param.Addr("Address for SCSI configuration slave interface")
+    pio_addr = Param.Addr("Address for SCSI configuration responder interface")
     pio_latency = Param.Latency("10ns", "Time between action and write/read \
        result by AMBA DMA Device")
     gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
diff --git a/src/dev/arm/amba.hh b/src/dev/arm/amba.hh
index 4bfba34..cfc3f56 100644
--- a/src/dev/arm/amba.hh
+++ b/src/dev/arm/amba.hh
@@ -43,12 +43,12 @@
 namespace AMBA
 {
 
-typedef MasterID OrderID;
+typedef RequestorID OrderID;
 
 static OrderID
 orderId(PacketPtr pkt)
 {
-    return pkt->req->masterId();
+    return pkt->req->requestorId();
 }
 
 } // namespace AMBA
diff --git a/src/dev/arm/gic_v3_its.cc b/src/dev/arm/gic_v3_its.cc
index 4ea7219..a3bb672 100644
--- a/src/dev/arm/gic_v3_its.cc
+++ b/src/dev/arm/gic_v3_its.cc
@@ -89,7 +89,7 @@
     a.type = ItsActionType::SEND_REQ;
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, 0, its.masterId);
+        addr, size, 0, its.requestorId);
 
     req->taskId(ContextSwitchTaskId::DMA);
 
@@ -113,7 +113,7 @@
     a.type = ItsActionType::SEND_REQ;
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, 0, its.masterId);
+        addr, size, 0, its.requestorId);
 
     req->taskId(ContextSwitchTaskId::DMA);
 
@@ -779,7 +779,7 @@
    gitsCbaser(0), gitsCreadr(0),
    gitsCwriter(0), gitsIidr(0),
    tableBases(NUM_BASER_REGS, 0),
-   masterId(params->system->getMasterId(this)),
+   requestorId(params->system->getRequestorId(this)),
    gic(nullptr),
    commandEvent([this] { checkCommandQueue(); }, name()),
    pendingCommands(false),
diff --git a/src/dev/arm/gic_v3_its.hh b/src/dev/arm/gic_v3_its.hh
index 54beb3e..e3b8734 100644
--- a/src/dev/arm/gic_v3_its.hh
+++ b/src/dev/arm/gic_v3_its.hh
@@ -319,7 +319,7 @@
 
   private:
     std::queue<ItsAction> packetsToRetry;
-    uint32_t masterId;
+    uint32_t requestorId;
     Gicv3 *gic;
     EventFunctionWrapper commandEvent;
 
diff --git a/src/dev/arm/smmu_v3.cc b/src/dev/arm/smmu_v3.cc
index f9d99da..f9bdc27 100644
--- a/src/dev/arm/smmu_v3.cc
+++ b/src/dev/arm/smmu_v3.cc
@@ -54,9 +54,9 @@
 SMMUv3::SMMUv3(SMMUv3Params *params) :
     ClockedObject(params),
     system(*params->system),
-    masterId(params->system->getMasterId(this)),
-    masterPort(name() + ".master", *this),
-    masterTableWalkPort(name() + ".master_walker", *this),
+    requestorId(params->system->getRequestorId(this)),
+    requestPort(name() + ".request", *this),
+    tableWalkPort(name() + ".walker", *this),
     controlPort(name() + ".control", *this, params->reg_map),
     tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
     configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
@@ -74,14 +74,14 @@
     walkCacheNonfinalEnable(params->wc_nonfinal_enable),
     walkCacheS1Levels(params->wc_s1_levels),
     walkCacheS2Levels(params->wc_s2_levels),
-    masterPortWidth(params->master_port_width),
+    requestPortWidth(params->request_port_width),
     tlbSem(params->tlb_slots),
     ifcSmmuSem(1),
     smmuIfcSem(1),
     configSem(params->cfg_slots),
     ipaSem(params->ipa_slots),
     walkSem(params->walk_slots),
-    masterPortSem(1),
+    requestPortSem(1),
     transSem(params->xlate_slots),
     ptwSem(params->ptw_slots),
     cycleSem(1),
@@ -91,7 +91,7 @@
     configLat(params->cfg_lat),
     ipaLat(params->ipa_lat),
     walkLat(params->walk_lat),
-    slaveInterfaces(params->slave_interfaces),
+    deviceInterfaces(params->device_interfaces),
     commandExecutor(name() + ".cmd_exec", *this),
     regsMap(params->reg_map),
     processCommandsEvent(this)
@@ -119,14 +119,14 @@
     // store an unallowed values or if the are configuration conflicts.
     warn("SMMUv3 IDx register values unchecked\n");
 
-    for (auto ifc : slaveInterfaces)
+    for (auto ifc : deviceInterfaces)
         ifc->setSMMU(this);
 }
 
 bool
-SMMUv3::masterRecvTimingResp(PacketPtr pkt)
+SMMUv3::recvTimingResp(PacketPtr pkt)
 {
-    DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n",
+    DPRINTF(SMMUv3, "[t] requestor resp addr=%#x size=%#x\n",
         pkt->getAddr(), pkt->getSize());
 
     // @todo: We need to pay for this and not just zero it out
@@ -141,7 +141,7 @@
 }
 
 void
-SMMUv3::masterRecvReqRetry()
+SMMUv3::recvReqRetry()
 {
     assert(!packetsToRetry.empty());
 
@@ -150,29 +150,29 @@
 
         assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
 
-        DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n",
+        DPRINTF(SMMUv3, "[t] requestor retr addr=%#x size=%#x\n",
             a.pkt->getAddr(), a.pkt->getSize());
 
-        if (!masterPort.sendTimingReq(a.pkt))
+        if (!requestPort.sendTimingReq(a.pkt))
             break;
 
         packetsToRetry.pop();
 
         /*
          * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
-         * on the master interface; this means that we no longer hold on to
+         * on the requestor interface; this means that we no longer hold on to
          * that transaction and therefore can accept a new one.
-         * If the slave port was stalled then unstall it (send retry).
+         * If the response port was stalled then unstall it (send retry).
          */
         if (a.type == ACTION_SEND_REQ_FINAL)
-            scheduleSlaveRetries();
+            scheduleDeviceRetries();
     }
 }
 
 bool
-SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt)
+SMMUv3::tableWalkRecvTimingResp(PacketPtr pkt)
 {
-    DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n",
+    DPRINTF(SMMUv3, "[t] requestor HWTW resp addr=%#x size=%#x\n",
         pkt->getAddr(), pkt->getSize());
 
     // @todo: We need to pay for this and not just zero it out
@@ -187,7 +187,7 @@
 }
 
 void
-SMMUv3::masterTableWalkRecvReqRetry()
+SMMUv3::tableWalkRecvReqRetry()
 {
     assert(tableWalkPortEnable);
     assert(!packetsTableWalkToRetry.empty());
@@ -197,10 +197,10 @@
 
         assert(a.type==ACTION_SEND_REQ);
 
-        DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n",
+        DPRINTF(SMMUv3, "[t] requestor HWTW retr addr=%#x size=%#x\n",
             a.pkt->getAddr(), a.pkt->getSize());
 
-        if (!masterTableWalkPort.sendTimingReq(a.pkt))
+        if (!tableWalkPort.sendTimingReq(a.pkt))
             break;
 
         packetsTableWalkToRetry.pop();
@@ -208,9 +208,9 @@
 }
 
 void
-SMMUv3::scheduleSlaveRetries()
+SMMUv3::scheduleDeviceRetries()
 {
-    for (auto ifc : slaveInterfaces) {
+    for (auto ifc : deviceInterfaces) {
         ifc->scheduleDeviceRetry();
     }
 }
@@ -239,17 +239,17 @@
 
         switch (action.type) {
             case ACTION_SEND_REQ:
-                // Send an MMU initiated request on the table walk port if it is
-                // enabled. Otherwise, fall through and handle same as the final
-                // ACTION_SEND_REQ_FINAL request.
+                // Send an MMU initiated request on the table walk port if
+                // it is enabled. Otherwise, fall through and handle same
+                // as the final ACTION_SEND_REQ_FINAL request.
                 if (tableWalkPortEnable) {
-                    delay += masterTableWalkPort.sendAtomic(action.pkt);
+                    delay += tableWalkPort.sendAtomic(action.pkt);
                     pkt = action.pkt;
                     break;
                 }
                 M5_FALLTHROUGH;
             case ACTION_SEND_REQ_FINAL:
-                delay += masterPort.sendAtomic(action.pkt);
+                delay += requestPort.sendAtomic(action.pkt);
                 pkt = action.pkt;
                 break;
 
@@ -289,14 +289,14 @@
             if (tableWalkPortEnable) {
                 action.pkt->pushSenderState(proc);
 
-                DPRINTF(SMMUv3, "[t] master HWTW req  addr=%#x size=%#x\n",
+                DPRINTF(SMMUv3, "[t] requestor HWTW req  addr=%#x size=%#x\n",
                         action.pkt->getAddr(), action.pkt->getSize());
 
                 if (packetsTableWalkToRetry.empty()
-                        && masterTableWalkPort.sendTimingReq(action.pkt)) {
-                    scheduleSlaveRetries();
+                        && tableWalkPort.sendTimingReq(action.pkt)) {
+                    scheduleDeviceRetries();
                 } else {
-                    DPRINTF(SMMUv3, "[t] master HWTW req  needs retry,"
+                    DPRINTF(SMMUv3, "[t] requestor HWTW req  needs retry,"
                             " qlen=%d\n", packetsTableWalkToRetry.size());
                     packetsTableWalkToRetry.push(action);
                 }
@@ -307,13 +307,14 @@
         case ACTION_SEND_REQ_FINAL:
             action.pkt->pushSenderState(proc);
 
-            DPRINTF(SMMUv3, "[t] master req  addr=%#x size=%#x\n",
+            DPRINTF(SMMUv3, "[t] requestor req  addr=%#x size=%#x\n",
                     action.pkt->getAddr(), action.pkt->getSize());
 
-            if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) {
-                scheduleSlaveRetries();
+            if (packetsToRetry.empty() &&
+                requestPort.sendTimingReq(action.pkt)) {
+                scheduleDeviceRetries();
             } else {
-                DPRINTF(SMMUv3, "[t] master req  needs retry, qlen=%d\n",
+                DPRINTF(SMMUv3, "[t] requestor req  needs retry, qlen=%d\n",
                         packetsToRetry.size());
                 packetsToRetry.push(action);
             }
@@ -324,7 +325,7 @@
             // @todo: We need to pay for this and not just zero it out
             action.pkt->headerDelay = action.pkt->payloadDelay = 0;
 
-            DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n",
+            DPRINTF(SMMUv3, "[t] responder resp addr=%#x size=%#x\n",
                     action.pkt->getAddr(),
                     action.pkt->getSize());
 
@@ -338,7 +339,7 @@
             // @todo: We need to pay for this and not just zero it out
             action.pkt->headerDelay = action.pkt->payloadDelay = 0;
 
-            DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n",
+            DPRINTF(SMMUv3, "[t] ATS responder resp addr=%#x size=%#x\n",
                     action.pkt->getAddr(), action.pkt->getSize());
 
             assert(action.ifc);
@@ -394,9 +395,9 @@
             DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid);
             configCache.invalidateSID(cmd.dw0.sid);
 
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
-                slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
+                dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
             }
             break;
         }
@@ -409,9 +410,9 @@
                 DPRINTF(SMMUv3, "CMD_CFGI_ALL\n");
                 configCache.invalidateAll();
 
-                for (auto slave_interface : slaveInterfaces) {
-                    slave_interface->microTLB->invalidateAll();
-                    slave_interface->mainTLB->invalidateAll();
+                for (auto dev_interface : deviceInterfaces) {
+                    dev_interface->microTLB->invalidateAll();
+                    dev_interface->mainTLB->invalidateAll();
                 }
             } else {
                 DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n");
@@ -420,9 +421,9 @@
                 for (auto sid = start_sid; sid <= end_sid; sid++) {
                     configCache.invalidateSID(sid);
 
-                    for (auto slave_interface : slaveInterfaces) {
-                        slave_interface->microTLB->invalidateSID(sid);
-                        slave_interface->mainTLB->invalidateSID(sid);
+                    for (auto dev_interface : deviceInterfaces) {
+                        dev_interface->microTLB->invalidateSID(sid);
+                        dev_interface->mainTLB->invalidateSID(sid);
                     }
                 }
             }
@@ -434,10 +435,10 @@
                     cmd.dw0.sid, cmd.dw0.ssid);
             configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid);
 
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateSSID(
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateSSID(
                     cmd.dw0.sid, cmd.dw0.ssid);
-                slave_interface->mainTLB->invalidateSSID(
+                dev_interface->mainTLB->invalidateSSID(
                     cmd.dw0.sid, cmd.dw0.ssid);
             }
             break;
@@ -447,18 +448,18 @@
             DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid);
             configCache.invalidateSID(cmd.dw0.sid);
 
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
-                slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
+                dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
             }
             break;
         }
 
         case CMD_TLBI_NH_ALL: {
             DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid);
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
-                slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
+                dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
             }
             tlb.invalidateVMID(cmd.dw0.vmid);
             walkCache.invalidateVMID(cmd.dw0.vmid);
@@ -468,10 +469,10 @@
         case CMD_TLBI_NH_ASID: {
             DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
                     cmd.dw0.asid, cmd.dw0.vmid);
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateASID(
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateASID(
                     cmd.dw0.asid, cmd.dw0.vmid);
-                slave_interface->mainTLB->invalidateASID(
+                dev_interface->mainTLB->invalidateASID(
                     cmd.dw0.asid, cmd.dw0.vmid);
             }
             tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
@@ -483,10 +484,10 @@
             const Addr addr = cmd.addr();
             DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
                     addr, cmd.dw0.vmid);
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateVAA(
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateVAA(
                     addr, cmd.dw0.vmid);
-                slave_interface->mainTLB->invalidateVAA(
+                dev_interface->mainTLB->invalidateVAA(
                     addr, cmd.dw0.vmid);
             }
             tlb.invalidateVAA(addr, cmd.dw0.vmid);
@@ -499,10 +500,10 @@
             const Addr addr = cmd.addr();
             DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
                     addr, cmd.dw0.asid, cmd.dw0.vmid);
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateVA(
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateVA(
                     addr, cmd.dw0.asid, cmd.dw0.vmid);
-                slave_interface->mainTLB->invalidateVA(
+                dev_interface->mainTLB->invalidateVA(
                     addr, cmd.dw0.asid, cmd.dw0.vmid);
             }
             tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid);
@@ -527,9 +528,9 @@
 
         case CMD_TLBI_S12_VMALL: {
             DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid);
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
-                slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
+                dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
             }
             tlb.invalidateVMID(cmd.dw0.vmid);
             ipaCache.invalidateVMID(cmd.dw0.vmid);
@@ -539,9 +540,9 @@
 
         case CMD_TLBI_NSNH_ALL: {
             DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n");
-            for (auto slave_interface : slaveInterfaces) {
-                slave_interface->microTLB->invalidateAll();
-                slave_interface->mainTLB->invalidateAll();
+            for (auto dev_interface : deviceInterfaces) {
+                dev_interface->microTLB->invalidateAll();
+                dev_interface->mainTLB->invalidateAll();
             }
             tlb.invalidateAll();
             ipaCache.invalidateAll();
@@ -717,16 +718,16 @@
 SMMUv3::init()
 {
     // make sure both sides are connected and have the same block size
-    if (!masterPort.isConnected())
-        fatal("Master port is not connected.\n");
+    if (!requestPort.isConnected())
+        fatal("Request port is not connected.\n");
 
-    // If the second master port is connected for the table walks, enable
+    // If the second request port is connected for the table walks, enable
     // the mode to send table walks through this port instead
-    if (masterTableWalkPort.isConnected())
+    if (tableWalkPort.isConnected())
         tableWalkPortEnable = true;
 
-    // notify the master side  of our address ranges
-    for (auto ifc : slaveInterfaces) {
+    // notify the request side  of our address ranges
+    for (auto ifc : deviceInterfaces) {
         ifc->sendRange();
     }
 
@@ -741,10 +742,10 @@
 
     using namespace Stats;
 
-    for (size_t i = 0; i < slaveInterfaces.size(); i++) {
-        slaveInterfaces[i]->microTLB->regStats(
+    for (size_t i = 0; i < deviceInterfaces.size(); i++) {
+        deviceInterfaces[i]->microTLB->regStats(
             csprintf("%s.utlb%d", name(), i));
-        slaveInterfaces[i]->mainTLB->regStats(
+        deviceInterfaces[i]->mainTLB->regStats(
             csprintf("%s.maintlb%d", name(), i));
     }
 
@@ -815,10 +816,10 @@
 Port&
 SMMUv3::getPort(const std::string &name, PortID id)
 {
-    if (name == "master") {
-        return masterPort;
-    } else if (name == "master_walker") {
-        return masterTableWalkPort;
+    if (name == "request") {
+        return requestPort;
+    } else if (name == "walker") {
+        return tableWalkPort;
     } else if (name == "control") {
         return controlPort;
     } else {
diff --git a/src/dev/arm/smmu_v3.hh b/src/dev/arm/smmu_v3.hh
index 8f35cdf..6b3f398 100644
--- a/src/dev/arm/smmu_v3.hh
+++ b/src/dev/arm/smmu_v3.hh
@@ -48,11 +48,11 @@
 #include "dev/arm/smmu_v3_caches.hh"
 #include "dev/arm/smmu_v3_cmdexec.hh"
 #include "dev/arm/smmu_v3_defs.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
 #include "dev/arm/smmu_v3_events.hh"
 #include "dev/arm/smmu_v3_ports.hh"
 #include "dev/arm/smmu_v3_proc.hh"
 #include "dev/arm/smmu_v3_ptops.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
 #include "mem/packet.hh"
 #include "params/SMMUv3.hh"
 #include "sim/clocked_object.hh"
@@ -85,13 +85,13 @@
     friend class SMMUProcess;
     friend class SMMUTranslationProcess;
     friend class SMMUCommandExecProcess;
-    friend class SMMUv3SlaveInterface;
+    friend class SMMUv3DeviceInterface;
 
     const System &system;
-    const MasterID masterId;
+    const RequestorID requestorId;
 
-    SMMUMasterPort    masterPort;
-    SMMUMasterTableWalkPort masterTableWalkPort;
+    SMMURequestPort    requestPort;
+    SMMUTableWalkPort tableWalkPort;
     SMMUControlPort   controlPort;
 
     ARMArchTLB  tlb;
@@ -108,7 +108,7 @@
     const bool walkCacheNonfinalEnable;
     const unsigned walkCacheS1Levels;
     const unsigned walkCacheS2Levels;
-    const unsigned masterPortWidth; // in bytes
+    const unsigned requestPortWidth; // in bytes
 
     SMMUSemaphore tlbSem;
     SMMUSemaphore ifcSmmuSem;
@@ -116,7 +116,7 @@
     SMMUSemaphore configSem;
     SMMUSemaphore ipaSem;
     SMMUSemaphore walkSem;
-    SMMUSemaphore masterPortSem;
+    SMMUSemaphore requestPortSem;
 
     SMMUSemaphore transSem; // max N transactions in SMMU
     SMMUSemaphore ptwSem; // max N concurrent PTWs
@@ -138,7 +138,7 @@
     Stats::Distribution translationTimeDist;
     Stats::Distribution ptwTimeDist;
 
-    std::vector<SMMUv3SlaveInterface *> slaveInterfaces;
+    std::vector<SMMUv3DeviceInterface *> deviceInterfaces;
 
     SMMUCommandExecProcess commandExecutor;
 
@@ -151,7 +151,7 @@
     std::queue<SMMUAction> packetsTableWalkToRetry;
 
 
-    void scheduleSlaveRetries();
+    void scheduleDeviceRetries();
 
     SMMUAction runProcess(SMMUProcess *proc, PacketPtr pkt);
     SMMUAction runProcessAtomic(SMMUProcess *proc, PacketPtr pkt);
@@ -171,13 +171,13 @@
     virtual void init() override;
     virtual void regStats() override;
 
-    Tick slaveRecvAtomic(PacketPtr pkt, PortID id);
-    bool slaveRecvTimingReq(PacketPtr pkt, PortID id);
-    bool masterRecvTimingResp(PacketPtr pkt);
-    void masterRecvReqRetry();
+    Tick recvAtomic(PacketPtr pkt, PortID id);
+    bool recvTimingReq(PacketPtr pkt, PortID id);
+    bool recvTimingResp(PacketPtr pkt);
+    void recvReqRetry();
 
-    bool masterTableWalkRecvTimingResp(PacketPtr pkt);
-    void masterTableWalkRecvReqRetry();
+    bool tableWalkRecvTimingResp(PacketPtr pkt);
+    void tableWalkRecvReqRetry();
 
     Tick readControl(PacketPtr pkt);
     Tick writeControl(PacketPtr pkt);
diff --git a/src/dev/arm/smmu_v3_slaveifc.cc b/src/dev/arm/smmu_v3_deviceifc.cc
similarity index 74%
rename from src/dev/arm/smmu_v3_slaveifc.cc
rename to src/dev/arm/smmu_v3_deviceifc.cc
index 5b3dd98..7516055 100644
--- a/src/dev/arm/smmu_v3_slaveifc.cc
+++ b/src/dev/arm/smmu_v3_deviceifc.cc
@@ -35,15 +35,15 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include "dev/arm/smmu_v3_slaveifc.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
 
 #include "base/trace.hh"
 #include "debug/SMMUv3.hh"
 #include "dev/arm/smmu_v3.hh"
 #include "dev/arm/smmu_v3_transl.hh"
 
-SMMUv3SlaveInterface::SMMUv3SlaveInterface(
-    const SMMUv3SlaveInterfaceParams *p) :
+SMMUv3DeviceInterface::SMMUv3DeviceInterface(
+    const SMMUv3DeviceInterfaceParams *p) :
     ClockedObject(p),
     smmu(nullptr),
     microTLB(new SMMUTLB(p->utlb_entries,
@@ -54,14 +54,15 @@
                         p->tlb_policy)),
     microTLBEnable(p->utlb_enable),
     mainTLBEnable(p->tlb_enable),
-    slavePortSem(1),
+    devicePortSem(1),
     microTLBSem(p->utlb_slots),
     mainTLBSem(p->tlb_slots),
     microTLBLat(p->utlb_lat),
     mainTLBLat(p->tlb_lat),
-    slavePort(new SMMUSlavePort(csprintf("%s.slave", name()), *this)),
-    atsSlavePort(name() + ".atsSlave", *this),
-    atsMasterPort(name() + ".atsMaster", *this),
+    devicePort(new SMMUDevicePort(csprintf("%s.device_port",
+                                            name()), *this)),
+    atsDevicePort(name() + ".atsDevicePort", *this),
+    atsMemPort(name() + ".atsMemPort", *this),
     portWidth(p->port_width),
     wrBufSlotsRemaining(p->wrbuf_slots),
     xlateSlotsRemaining(p->xlate_slots),
@@ -76,41 +77,41 @@
 {}
 
 void
-SMMUv3SlaveInterface::sendRange()
+SMMUv3DeviceInterface::sendRange()
 {
-    if (slavePort->isConnected()) {
-        inform("Slave port is connected to %s\n", slavePort->getPeer());
+    if (devicePort->isConnected()) {
+        inform("Device port is connected to %s\n", devicePort->getPeer());
 
-        slavePort->sendRangeChange();
+        devicePort->sendRangeChange();
     } else {
-        fatal("Slave port is not connected.\n");
+        fatal("Device port is not connected.\n");
     }
 }
 
 Port&
-SMMUv3SlaveInterface::getPort(const std::string &name, PortID id)
+SMMUv3DeviceInterface::getPort(const std::string &name, PortID id)
 {
-    if (name == "ats_master") {
-        return atsMasterPort;
-    } else if (name == "slave") {
-        return *slavePort;
-    } else if (name == "ats_slave") {
-        return atsSlavePort;
+    if (name == "ats_mem_side_port") {
+        return atsMemPort;
+    } else if (name == "device_port") {
+        return *devicePort;
+    } else if (name == "ats_dev_side_port") {
+        return atsDevicePort;
     } else {
         return ClockedObject::getPort(name, id);
     }
 }
 
 void
-SMMUv3SlaveInterface::schedTimingResp(PacketPtr pkt)
+SMMUv3DeviceInterface::schedTimingResp(PacketPtr pkt)
 {
-    slavePort->schedTimingResp(pkt, nextCycle());
+    devicePort->schedTimingResp(pkt, nextCycle());
 }
 
 void
-SMMUv3SlaveInterface::schedAtsTimingResp(PacketPtr pkt)
+SMMUv3DeviceInterface::schedAtsTimingResp(PacketPtr pkt)
 {
-    atsSlavePort.schedTimingResp(pkt, nextCycle());
+    atsDevicePort.schedTimingResp(pkt, nextCycle());
 
     if (atsDeviceNeedsRetry) {
         atsDeviceNeedsRetry = false;
@@ -119,10 +120,10 @@
 }
 
 Tick
-SMMUv3SlaveInterface::recvAtomic(PacketPtr pkt)
+SMMUv3DeviceInterface::recvAtomic(PacketPtr pkt)
 {
     DPRINTF(SMMUv3, "[a] req from %s addr=%#x size=%#x\n",
-            slavePort->getPeer(), pkt->getAddr(), pkt->getSize());
+            devicePort->getPeer(), pkt->getAddr(), pkt->getSize());
 
     std::string proc_name = csprintf("%s.port", name());
     SMMUTranslationProcess proc(proc_name, *smmu, *this);
@@ -135,10 +136,10 @@
 }
 
 bool
-SMMUv3SlaveInterface::recvTimingReq(PacketPtr pkt)
+SMMUv3DeviceInterface::recvTimingReq(PacketPtr pkt)
 {
     DPRINTF(SMMUv3, "[t] req from %s addr=%#x size=%#x\n",
-            slavePort->getPeer(), pkt->getAddr(), pkt->getSize());
+            devicePort->getPeer(), pkt->getAddr(), pkt->getSize());
 
     // @todo: We need to pay for this and not just zero it out
     pkt->headerDelay = pkt->payloadDelay = 0;
@@ -167,9 +168,9 @@
 }
 
 Tick
-SMMUv3SlaveInterface::atsSlaveRecvAtomic(PacketPtr pkt)
+SMMUv3DeviceInterface::atsRecvAtomic(PacketPtr pkt)
 {
-    DPRINTF(SMMUv3, "[a] ATS slave  req  addr=%#x size=%#x\n",
+    DPRINTF(SMMUv3, "[a] ATS responder req  addr=%#x size=%#x\n",
             pkt->getAddr(), pkt->getSize());
 
     std::string proc_name = csprintf("%s.atsport", name());
@@ -185,9 +186,9 @@
 }
 
 bool
-SMMUv3SlaveInterface::atsSlaveRecvTimingReq(PacketPtr pkt)
+SMMUv3DeviceInterface::atsRecvTimingReq(PacketPtr pkt)
 {
-    DPRINTF(SMMUv3, "[t] ATS slave  req  addr=%#x size=%#x\n",
+    DPRINTF(SMMUv3, "[t] ATS responder  req  addr=%#x size=%#x\n",
             pkt->getAddr(), pkt->getSize());
 
     // @todo: We need to pay for this and not just zero it out
@@ -210,9 +211,9 @@
 }
 
 bool
-SMMUv3SlaveInterface::atsMasterRecvTimingResp(PacketPtr pkt)
+SMMUv3DeviceInterface::atsRecvTimingResp(PacketPtr pkt)
 {
-    DPRINTF(SMMUv3, "[t] ATS master resp addr=%#x size=%#x\n",
+    DPRINTF(SMMUv3, "[t] ATS requestor resp addr=%#x size=%#x\n",
             pkt->getAddr(), pkt->getSize());
 
     // @todo: We need to pay for this and not just zero it out
@@ -227,30 +228,30 @@
 }
 
 void
-SMMUv3SlaveInterface::sendDeviceRetry()
+SMMUv3DeviceInterface::sendDeviceRetry()
 {
-    slavePort->sendRetryReq();
+    devicePort->sendRetryReq();
 }
 
 void
-SMMUv3SlaveInterface::atsSendDeviceRetry()
+SMMUv3DeviceInterface::atsSendDeviceRetry()
 {
     DPRINTF(SMMUv3, "ATS retry\n");
-    atsSlavePort.sendRetryReq();
+    atsDevicePort.sendRetryReq();
 }
 
 void
-SMMUv3SlaveInterface::scheduleDeviceRetry()
+SMMUv3DeviceInterface::scheduleDeviceRetry()
 {
     if (deviceNeedsRetry && !sendDeviceRetryEvent.scheduled()) {
-        DPRINTF(SMMUv3, "sched slave retry\n");
+        DPRINTF(SMMUv3, "sched responder retry\n");
         deviceNeedsRetry = false;
         schedule(sendDeviceRetryEvent, nextCycle());
     }
 }
 
 DrainState
-SMMUv3SlaveInterface::drain()
+SMMUv3DeviceInterface::drain()
 {
     // Wait until all SMMU translations are completed
     if (xlateSlotsRemaining < params()->xlate_slots) {
@@ -259,8 +260,8 @@
     return DrainState::Drained;
 }
 
-SMMUv3SlaveInterface*
-SMMUv3SlaveInterfaceParams::create()
+SMMUv3DeviceInterface*
+SMMUv3DeviceInterfaceParams::create()
 {
-    return new SMMUv3SlaveInterface(this);
+    return new SMMUv3DeviceInterface(this);
 }
diff --git a/src/dev/arm/smmu_v3_slaveifc.hh b/src/dev/arm/smmu_v3_deviceifc.hh
similarity index 81%
rename from src/dev/arm/smmu_v3_slaveifc.hh
rename to src/dev/arm/smmu_v3_deviceifc.hh
index e1f8ef2..64dcc57 100644
--- a/src/dev/arm/smmu_v3_slaveifc.hh
+++ b/src/dev/arm/smmu_v3_deviceifc.hh
@@ -35,8 +35,8 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#ifndef __DEV_ARM_SMMU_V3_SLAVEIFC_HH__
-#define __DEV_ARM_SMMU_V3_SLAVEIFC_HH__
+#ifndef __DEV_ARM_SMMU_V3_DEVICEIFC_HH__
+#define __DEV_ARM_SMMU_V3_DEVICEIFC_HH__
 
 #include <list>
 
@@ -45,14 +45,14 @@
 #include "dev/arm/smmu_v3_events.hh"
 #include "dev/arm/smmu_v3_ports.hh"
 #include "dev/arm/smmu_v3_proc.hh"
-#include "params/SMMUv3SlaveInterface.hh"
+#include "params/SMMUv3DeviceInterface.hh"
 #include "sim/clocked_object.hh"
 
 class SMMUTranslationProcess;
 class SMMUv3;
-class SMMUSlavePort;
+class SMMUDevicePort;
 
-class SMMUv3SlaveInterface : public ClockedObject
+class SMMUv3DeviceInterface : public ClockedObject
 {
   protected:
     friend class SMMUTranslationProcess;
@@ -65,16 +65,16 @@
     const bool microTLBEnable;
     const bool mainTLBEnable;
 
-    SMMUSemaphore slavePortSem;
+    SMMUSemaphore devicePortSem;
     SMMUSemaphore microTLBSem;
     SMMUSemaphore mainTLBSem;
 
     const Cycles microTLBLat;
     const Cycles mainTLBLat;
 
-    SMMUSlavePort *slavePort;
-    SMMUATSSlavePort  atsSlavePort;
-    SMMUATSMasterPort atsMasterPort;
+    SMMUDevicePort *devicePort;
+    SMMUATSDevicePort  atsDevicePort;
+    SMMUATSMemoryPort atsMemPort;
 
     // in bytes
     const unsigned portWidth;
@@ -93,14 +93,14 @@
     std::list<SMMUTranslationProcess *> dependentWrites[SMMU_MAX_TRANS_ID];
     SMMUSignal dependentReqRemoved;
 
-    // Receiving translation requests from the master device
+    // Receiving translation requests from the requestor device
     Tick recvAtomic(PacketPtr pkt);
     bool recvTimingReq(PacketPtr pkt);
     void schedTimingResp(PacketPtr pkt);
 
-    Tick atsSlaveRecvAtomic(PacketPtr pkt);
-    bool atsSlaveRecvTimingReq(PacketPtr pkt);
-    bool atsMasterRecvTimingResp(PacketPtr pkt);
+    Tick atsRecvAtomic(PacketPtr pkt);
+    bool atsRecvTimingReq(PacketPtr pkt);
+    bool atsRecvTimingResp(PacketPtr pkt);
     void schedAtsTimingResp(PacketPtr pkt);
 
     void scheduleDeviceRetry();
@@ -112,24 +112,24 @@
 
     SMMUDeviceRetryEvent sendDeviceRetryEvent;
     EventWrapper<
-        SMMUv3SlaveInterface,
-        &SMMUv3SlaveInterface::atsSendDeviceRetry> atsSendDeviceRetryEvent;
+        SMMUv3DeviceInterface,
+        &SMMUv3DeviceInterface::atsSendDeviceRetry> atsSendDeviceRetryEvent;
 
     Port& getPort(const std::string &name, PortID id) override;
 
   public:
-    SMMUv3SlaveInterface(const SMMUv3SlaveInterfaceParams *p);
+    SMMUv3DeviceInterface(const SMMUv3DeviceInterfaceParams *p);
 
-    ~SMMUv3SlaveInterface()
+    ~SMMUv3DeviceInterface()
     {
         delete microTLB;
         delete mainTLB;
     }
 
-    const SMMUv3SlaveInterfaceParams *
+    const SMMUv3DeviceInterfaceParams *
     params() const
     {
-        return static_cast<const SMMUv3SlaveInterfaceParams *>(_params);
+        return static_cast<const SMMUv3DeviceInterfaceParams *>(_params);
     }
 
     DrainState drain() override;
@@ -138,4 +138,4 @@
     void sendRange();
 };
 
-#endif /* __DEV_ARM_SMMU_V3_SLAVEIFC_HH__ */
+#endif /* __DEV_ARM_SMMU_V3_DEVICEIFC_HH__ */
diff --git a/src/dev/arm/smmu_v3_events.cc b/src/dev/arm/smmu_v3_events.cc
index be0f2df..774f2da 100644
--- a/src/dev/arm/smmu_v3_events.cc
+++ b/src/dev/arm/smmu_v3_events.cc
@@ -37,7 +37,7 @@
 
 #include "dev/arm/smmu_v3_events.hh"
 
-#include "dev/arm/smmu_v3_slaveifc.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
 
 void
 SMMUDeviceRetryEvent::process()
diff --git a/src/dev/arm/smmu_v3_events.hh b/src/dev/arm/smmu_v3_events.hh
index e8b4975..cfc980e 100644
--- a/src/dev/arm/smmu_v3_events.hh
+++ b/src/dev/arm/smmu_v3_events.hh
@@ -41,15 +41,15 @@
 #include <base/types.hh>
 #include <sim/eventq.hh>
 
-class SMMUv3SlaveInterface;
+class SMMUv3DeviceInterface;
 
 class SMMUDeviceRetryEvent : public Event
 {
   private:
-    SMMUv3SlaveInterface &smmuIfc;
+    SMMUv3DeviceInterface &smmuIfc;
 
   public:
-    SMMUDeviceRetryEvent(SMMUv3SlaveInterface &ifc)
+    SMMUDeviceRetryEvent(SMMUv3DeviceInterface &ifc)
         : smmuIfc(ifc)
     {}
 
@@ -58,7 +58,7 @@
     const std::string name() const;
 
     const char *description() const
-    { return "SlaveRetryEvent"; }
+    { return "DeviceRetryEvent"; }
 };
 
 #endif /* __DEV_ARM_SMMU_V3_EVENTS_HH__ */
diff --git a/src/dev/arm/smmu_v3_ports.cc b/src/dev/arm/smmu_v3_ports.cc
index 3f54250..5d5e787 100644
--- a/src/dev/arm/smmu_v3_ports.cc
+++ b/src/dev/arm/smmu_v3_ports.cc
@@ -39,67 +39,67 @@
 
 #include "base/logging.hh"
 #include "dev/arm/smmu_v3.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
 
-SMMUMasterPort::SMMUMasterPort(const std::string &_name, SMMUv3 &_smmu) :
+SMMURequestPort::SMMURequestPort(const std::string &_name, SMMUv3 &_smmu) :
     RequestPort(_name, &_smmu),
     smmu(_smmu)
 {}
 
 bool
-SMMUMasterPort::recvTimingResp(PacketPtr pkt)
+SMMURequestPort::recvTimingResp(PacketPtr pkt)
 {
-    return smmu.masterRecvTimingResp(pkt);
+    return smmu.recvTimingResp(pkt);
 }
 
 void
-SMMUMasterPort::recvReqRetry()
+SMMURequestPort::recvReqRetry()
 {
-    return smmu.masterRecvReqRetry();
+    return smmu.recvReqRetry();
 }
 
-SMMUMasterTableWalkPort::SMMUMasterTableWalkPort(const std::string &_name,
+SMMUTableWalkPort::SMMUTableWalkPort(const std::string &_name,
                                                  SMMUv3 &_smmu) :
     RequestPort(_name, &_smmu),
     smmu(_smmu)
 {}
 
 bool
-SMMUMasterTableWalkPort::recvTimingResp(PacketPtr pkt)
+SMMUTableWalkPort::recvTimingResp(PacketPtr pkt)
 {
-    return smmu.masterTableWalkRecvTimingResp(pkt);
+    return smmu.tableWalkRecvTimingResp(pkt);
 }
 
 void
-SMMUMasterTableWalkPort::recvReqRetry()
+SMMUTableWalkPort::recvReqRetry()
 {
-    return smmu.masterTableWalkRecvReqRetry();
+    return smmu.tableWalkRecvReqRetry();
 }
 
-SMMUSlavePort::SMMUSlavePort(const std::string &_name,
-                             SMMUv3SlaveInterface &_ifc,
+SMMUDevicePort::SMMUDevicePort(const std::string &_name,
+                             SMMUv3DeviceInterface &_ifc,
                              PortID _id)
 :
-    QueuedSlavePort(_name, &_ifc, respQueue, _id),
+    QueuedResponsePort(_name, &_ifc, respQueue, _id),
     ifc(_ifc),
     respQueue(_ifc, *this)
 {}
 
 void
-SMMUSlavePort::recvFunctional(PacketPtr pkt)
+SMMUDevicePort::recvFunctional(PacketPtr pkt)
 {
     if (!respQueue.trySatisfyFunctional(pkt))
         recvAtomic(pkt);
 }
 
 Tick
-SMMUSlavePort::recvAtomic(PacketPtr pkt)
+SMMUDevicePort::recvAtomic(PacketPtr pkt)
 {
     return ifc.recvAtomic(pkt);
 }
 
 bool
-SMMUSlavePort::recvTimingReq(PacketPtr pkt)
+SMMUDevicePort::recvTimingReq(PacketPtr pkt)
 {
     return ifc.recvTimingReq(pkt);
 }
@@ -136,41 +136,41 @@
     return list;
 }
 
-SMMUATSMasterPort::SMMUATSMasterPort(const std::string &_name,
-                                     SMMUv3SlaveInterface &_ifc) :
-    QueuedMasterPort(_name, &_ifc, reqQueue, snoopRespQueue),
+SMMUATSMemoryPort::SMMUATSMemoryPort(const std::string &_name,
+                                     SMMUv3DeviceInterface &_ifc) :
+    QueuedRequestPort(_name, &_ifc, reqQueue, snoopRespQueue),
     ifc(_ifc),
     reqQueue(_ifc, *this),
     snoopRespQueue(_ifc, *this)
 {}
 
 bool
-SMMUATSMasterPort::recvTimingResp(PacketPtr pkt)
+SMMUATSMemoryPort::recvTimingResp(PacketPtr pkt)
 {
-    return ifc.atsMasterRecvTimingResp(pkt);
+    return ifc.atsRecvTimingResp(pkt);
 }
 
-SMMUATSSlavePort::SMMUATSSlavePort(const std::string &_name,
-                                   SMMUv3SlaveInterface &_ifc) :
-    QueuedSlavePort(_name, &_ifc, respQueue),
+SMMUATSDevicePort::SMMUATSDevicePort(const std::string &_name,
+                                   SMMUv3DeviceInterface &_ifc) :
+    QueuedResponsePort(_name, &_ifc, respQueue),
     ifc(_ifc),
     respQueue(_ifc, *this)
 {}
 
 void
-SMMUATSSlavePort::recvFunctional(PacketPtr pkt)
+SMMUATSDevicePort::recvFunctional(PacketPtr pkt)
 {
     panic("Functional access on ATS port!");
 }
 
 Tick
-SMMUATSSlavePort::recvAtomic(PacketPtr pkt)
+SMMUATSDevicePort::recvAtomic(PacketPtr pkt)
 {
-    return ifc.atsSlaveRecvAtomic(pkt);
+    return ifc.atsRecvAtomic(pkt);
 }
 
 bool
-SMMUATSSlavePort::recvTimingReq(PacketPtr pkt)
+SMMUATSDevicePort::recvTimingReq(PacketPtr pkt)
 {
-    return ifc.atsSlaveRecvTimingReq(pkt);
+    return ifc.atsRecvTimingReq(pkt);
 }
diff --git a/src/dev/arm/smmu_v3_ports.hh b/src/dev/arm/smmu_v3_ports.hh
index ee68bbb..9d567fa8 100644
--- a/src/dev/arm/smmu_v3_ports.hh
+++ b/src/dev/arm/smmu_v3_ports.hh
@@ -42,9 +42,9 @@
 #include "mem/tport.hh"
 
 class SMMUv3;
-class SMMUv3SlaveInterface;
+class SMMUv3DeviceInterface;
 
-class SMMUMasterPort : public RequestPort
+class SMMURequestPort : public RequestPort
 {
   protected:
     SMMUv3 &smmu;
@@ -53,12 +53,12 @@
     virtual void recvReqRetry();
 
   public:
-    SMMUMasterPort(const std::string &_name, SMMUv3 &_smmu);
-    virtual ~SMMUMasterPort() {}
+    SMMURequestPort(const std::string &_name, SMMUv3 &_smmu);
+    virtual ~SMMURequestPort() {}
 };
 
-// Separate master port to send MMU initiated requests on
-class SMMUMasterTableWalkPort : public RequestPort
+// Separate request port to send MMU initiated requests on
+class SMMUTableWalkPort : public RequestPort
 {
   protected:
     SMMUv3 &smmu;
@@ -67,14 +67,14 @@
     virtual void recvReqRetry();
 
   public:
-    SMMUMasterTableWalkPort(const std::string &_name, SMMUv3 &_smmu);
-    virtual ~SMMUMasterTableWalkPort() {}
+    SMMUTableWalkPort(const std::string &_name, SMMUv3 &_smmu);
+    virtual ~SMMUTableWalkPort() {}
 };
 
-class SMMUSlavePort : public QueuedSlavePort
+class SMMUDevicePort : public QueuedResponsePort
 {
   protected:
-    SMMUv3SlaveInterface &ifc;
+    SMMUv3DeviceInterface &ifc;
     RespPacketQueue respQueue;
 
     virtual void recvFunctional(PacketPtr pkt);
@@ -82,10 +82,10 @@
     virtual bool recvTimingReq(PacketPtr pkt);
 
   public:
-    SMMUSlavePort(const std::string &_name,
-                  SMMUv3SlaveInterface &_ifc,
+    SMMUDevicePort(const std::string &_name,
+                  SMMUv3DeviceInterface &_ifc,
                   PortID _id = InvalidPortID);
-    virtual ~SMMUSlavePort() {}
+    virtual ~SMMUDevicePort() {}
 
     virtual AddrRangeList getAddrRanges() const
     { return AddrRangeList { AddrRange(0, UINT64_MAX) }; }
@@ -106,24 +106,24 @@
     virtual ~SMMUControlPort() {}
 };
 
-class SMMUATSMasterPort : public QueuedMasterPort
+class SMMUATSMemoryPort : public QueuedRequestPort
 {
   protected:
-    SMMUv3SlaveInterface &ifc;
+    SMMUv3DeviceInterface &ifc;
     ReqPacketQueue reqQueue;
     SnoopRespPacketQueue snoopRespQueue;
 
     virtual bool recvTimingResp(PacketPtr pkt);
 
   public:
-    SMMUATSMasterPort(const std::string &_name, SMMUv3SlaveInterface &_ifc);
-    virtual ~SMMUATSMasterPort() {}
+    SMMUATSMemoryPort(const std::string &_name, SMMUv3DeviceInterface &_ifc);
+    virtual ~SMMUATSMemoryPort() {}
 };
 
-class SMMUATSSlavePort : public QueuedSlavePort
+class SMMUATSDevicePort : public QueuedResponsePort
 {
   protected:
-    SMMUv3SlaveInterface &ifc;
+    SMMUv3DeviceInterface &ifc;
     RespPacketQueue respQueue;
 
     virtual void recvFunctional(PacketPtr pkt);
@@ -134,8 +134,8 @@
     { return AddrRangeList(); }
 
   public:
-    SMMUATSSlavePort(const std::string &_name, SMMUv3SlaveInterface &_ifc);
-    virtual ~SMMUATSSlavePort() {}
+    SMMUATSDevicePort(const std::string &_name, SMMUv3DeviceInterface &_ifc);
+    virtual ~SMMUATSDevicePort() {}
 };
 
 #endif /* __DEV_ARM_SMMU_V3_PORTS_HH__ */
diff --git a/src/dev/arm/smmu_v3_proc.cc b/src/dev/arm/smmu_v3_proc.cc
index 9dec2cb..0a11c15 100644
--- a/src/dev/arm/smmu_v3_proc.cc
+++ b/src/dev/arm/smmu_v3_proc.cc
@@ -68,15 +68,15 @@
 void
 SMMUProcess::doRead(Yield &yield, Addr addr, void *ptr, size_t size)
 {
-    doSemaphoreDown(yield, smmu.masterPortSem);
+    doSemaphoreDown(yield, smmu.requestPortSem);
     doDelay(yield, Cycles(1)); // request - assume 1 cycle
-    doSemaphoreUp(smmu.masterPortSem);
+    doSemaphoreUp(smmu.requestPortSem);
 
     SMMUAction a;
     a.type = ACTION_SEND_REQ;
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, 0, smmu.masterId);
+        addr, size, 0, smmu.requestorId);
 
     req->taskId(ContextSwitchTaskId::DMA);
 
@@ -97,18 +97,19 @@
 void
 SMMUProcess::doWrite(Yield &yield, Addr addr, const void *ptr, size_t size)
 {
-    unsigned nbeats = (size + (smmu.masterPortWidth-1)) / smmu.masterPortWidth;
+    unsigned nbeats = (size + (smmu.requestPortWidth-1))
+                            / smmu.requestPortWidth;
 
-    doSemaphoreDown(yield, smmu.masterPortSem);
+    doSemaphoreDown(yield, smmu.requestPortSem);
     doDelay(yield, Cycles(nbeats));
-    doSemaphoreUp(smmu.masterPortSem);
+    doSemaphoreUp(smmu.requestPortSem);
 
 
     SMMUAction a;
     a.type = ACTION_SEND_REQ;
 
     RequestPtr req = std::make_shared<Request>(
-        addr, size, 0, smmu.masterId);
+        addr, size, 0, smmu.requestorId);
 
     req->taskId(ContextSwitchTaskId::DMA);
 
diff --git a/src/dev/arm/smmu_v3_proc.hh b/src/dev/arm/smmu_v3_proc.hh
index 89b5a67..fe81d19 100644
--- a/src/dev/arm/smmu_v3_proc.hh
+++ b/src/dev/arm/smmu_v3_proc.hh
@@ -46,7 +46,7 @@
 #include "base/types.hh"
 #include "mem/packet.hh"
 
-class SMMUv3SlaveInterface;
+class SMMUv3DeviceInterface;
 
 /*
  * The meaning of these becomes apparent when you
@@ -67,7 +67,7 @@
 {
     SMMUActionType type;
     PacketPtr pkt;
-    SMMUv3SlaveInterface *ifc;
+    SMMUv3DeviceInterface *ifc;
     Tick delay;
 };
 
diff --git a/src/dev/arm/smmu_v3_transl.cc b/src/dev/arm/smmu_v3_transl.cc
index c7b20f9..ab8a0e7 100644
--- a/src/dev/arm/smmu_v3_transl.cc
+++ b/src/dev/arm/smmu_v3_transl.cc
@@ -77,12 +77,12 @@
 }
 
 SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
-    SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
+    SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
   :
     SMMUProcess(name, _smmu),
     ifc(_ifc)
 {
-    // Decrease number of pending translation slots on the slave interface
+    // Decrease number of pending translation slots on the device interface
     assert(ifc.xlateSlotsRemaining > 0);
     ifc.xlateSlotsRemaining--;
 
@@ -92,12 +92,12 @@
 
 SMMUTranslationProcess::~SMMUTranslationProcess()
 {
-    // Increase number of pending translation slots on the slave interface
+    // Increase number of pending translation slots on the device interface
     assert(ifc.pendingMemAccesses > 0);
     ifc.pendingMemAccesses--;
 
     // If no more SMMU memory accesses are pending,
-    // signal SMMU Slave Interface as drained
+    // signal SMMU Device Interface as drained
     if (ifc.pendingMemAccesses == 0) {
         ifc.signalDrainDone();
     }
@@ -147,12 +147,12 @@
                 request.addr, request.size);
 
 
-    unsigned numSlaveBeats = request.isWrite ?
+    unsigned numResponderBeats = request.isWrite ?
         (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
 
-    doSemaphoreDown(yield, ifc.slavePortSem);
-    doDelay(yield, Cycles(numSlaveBeats));
-    doSemaphoreUp(ifc.slavePortSem);
+    doSemaphoreDown(yield, ifc.devicePortSem);
+    doDelay(yield, Cycles(numResponderBeats));
+    doSemaphoreUp(ifc.devicePortSem);
 
 
     recvTick = curTick();
@@ -261,7 +261,7 @@
 
     bool haveConfig = true;
     if (!configCacheLookup(yield, context)) {
-        if(findConfig(yield, context, tr)) {
+        if (findConfig(yield, context, tr)) {
             configCacheUpdate(yield, context);
         } else {
             haveConfig = false;
@@ -295,7 +295,7 @@
             smmuTLBUpdate(yield, tr);
     }
 
-    // Simulate pipelined SMMU->SLAVE INTERFACE link
+    // Simulate pipelined SMMU->RESPONSE INTERFACE link
     doSemaphoreDown(yield, smmu.smmuIfcSem);
     doDelay(yield, Cycles(1)); // serialize transactions
     doSemaphoreUp(smmu.smmuIfcSem);
@@ -353,14 +353,14 @@
 
     if (!e) {
         DPRINTF(SMMUv3,
-                "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
+                "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
                 request.addr, request.sid, request.ssid);
 
         return false;
     }
 
     DPRINTF(SMMUv3,
-            "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
+            "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
             "paddr=%#x\n", request.addr, e->vaMask, request.sid,
             request.ssid, e->pa);
 
@@ -465,7 +465,7 @@
     doSemaphoreDown(yield, ifc.mainTLBSem);
 
     DPRINTF(SMMUv3,
-            "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
+            "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
             "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
 
     ifc.mainTLB->store(e, alloc);
@@ -1226,14 +1226,14 @@
 {
     assert(tr.fault == FAULT_NONE);
 
-    unsigned numMasterBeats = request.isWrite ?
-        (request.size + (smmu.masterPortWidth-1))
-            / smmu.masterPortWidth :
+    unsigned numRequestorBeats = request.isWrite ?
+        (request.size + (smmu.requestPortWidth-1))
+            / smmu.requestPortWidth :
         1;
 
-    doSemaphoreDown(yield, smmu.masterPortSem);
-    doDelay(yield, Cycles(numMasterBeats));
-    doSemaphoreUp(smmu.masterPortSem);
+    doSemaphoreDown(yield, smmu.requestPortSem);
+    doDelay(yield, Cycles(numRequestorBeats));
+    doSemaphoreUp(smmu.requestPortSem);
 
 
     smmu.translationTimeDist.sample(curTick() - recvTick);
@@ -1242,7 +1242,7 @@
         ifc.wrBufSlotsRemaining +=
             (request.size + (ifc.portWidth-1)) / ifc.portWidth;
 
-    smmu.scheduleSlaveRetries();
+    smmu.scheduleDeviceRetries();
 
 
     SMMUAction a;
diff --git a/src/dev/arm/smmu_v3_transl.hh b/src/dev/arm/smmu_v3_transl.hh
index d72c44c..878addd 100644
--- a/src/dev/arm/smmu_v3_transl.hh
+++ b/src/dev/arm/smmu_v3_transl.hh
@@ -39,9 +39,9 @@
 #define __DEV_ARM_SMMU_V3_TRANSL_HH__
 
 #include "base/compiler.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
 #include "dev/arm/smmu_v3_proc.hh"
 #include "dev/arm/smmu_v3_ptops.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
 #include "mem/packet.hh"
 
 struct SMMUTranslRequest
@@ -91,7 +91,7 @@
         bool       writable;
     };
 
-    SMMUv3SlaveInterface &ifc;
+    SMMUv3DeviceInterface &ifc;
 
     SMMUTranslRequest request;
     TranslContext context;
@@ -174,7 +174,7 @@
 
   public:
     SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu,
-        SMMUv3SlaveInterface &_ifc);
+        SMMUv3DeviceInterface &_ifc);
 
     virtual ~SMMUTranslationProcess();
 
diff --git a/src/dev/arm/vgic.cc b/src/dev/arm/vgic.cc
index 441e182..1542561 100644
--- a/src/dev/arm/vgic.cc
+++ b/src/dev/arm/vgic.cc
@@ -148,7 +148,7 @@
 
     DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr);
 
-    /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
+    /* Munge the address: 0-0xfff is the usual space banked by requestor CPU.
      * Anything > that is 0x200-sized slices of 'per CPU' regs.
      */
     if (daddr & ~0x1ff) {
@@ -292,7 +292,7 @@
     DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n",
             daddr, pkt->getLE<uint32_t>());
 
-    /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
+    /* Munge the address: 0-0xfff is the usual space banked by requestor CPU.
      * Anything > that is 0x200-sized slices of 'per CPU' regs.
      */
     if (daddr & ~0x1ff) {
diff --git a/src/dev/dma_device.cc b/src/dev/dma_device.cc
index 03882e3..ace8f2c 100644
--- a/src/dev/dma_device.cc
+++ b/src/dev/dma_device.cc
@@ -52,7 +52,7 @@
 DmaPort::DmaPort(ClockedObject *dev, System *s,
                  uint32_t sid, uint32_t ssid)
     : RequestPort(dev->name() + ".dma", dev),
-      device(dev), sys(s), masterId(s->getMasterId(dev)),
+      device(dev), sys(s), requestorId(s->getRequestorId(dev)),
       sendEvent([this]{ sendDma(); }, dev->name()),
       pendingCount(0), inRetry(false),
       defaultSid(sid),
@@ -166,7 +166,7 @@
          !gen.done(); gen.next()) {
 
         req = std::make_shared<Request>(
-            gen.addr(), gen.size(), flag, masterId);
+            gen.addr(), gen.size(), flag, requestorId);
 
         req->setStreamId(sid);
         req->setSubStreamId(ssid);
diff --git a/src/dev/dma_device.hh b/src/dev/dma_device.hh
index 56c8d4c..2369fc4 100644
--- a/src/dev/dma_device.hh
+++ b/src/dev/dma_device.hh
@@ -114,7 +114,7 @@
     System *const sys;
 
     /** Id for all requests */
-    const MasterID masterId;
+    const RequestorID requestorId;
 
   protected:
     /** Use a deque as we never do any insertion or removal in the middle */
diff --git a/src/dev/mips/Malta.py b/src/dev/mips/Malta.py
index baed7e8..18fa219 100755
--- a/src/dev/mips/Malta.py
+++ b/src/dev/mips/Malta.py
@@ -59,6 +59,6 @@
     # earlier, since the bus object itself is typically defined at the
     # System level.
     def attachIO(self, bus):
-        self.cchip.pio = bus.master
-        self.io.pio = bus.master
-        self.uart.pio = bus.master
+        self.cchip.pio = bus.mem_side_ports
+        self.io.pio = bus.mem_side_ports
+        self.uart.pio = bus.mem_side_ports
diff --git a/src/dev/net/dist_iface.cc b/src/dev/net/dist_iface.cc
index cc408e0..7974242 100644
--- a/src/dev/net/dist_iface.cc
+++ b/src/dev/net/dist_iface.cc
@@ -60,7 +60,7 @@
 DistIface::SyncEvent *DistIface::syncEvent = nullptr;
 unsigned DistIface::distIfaceNum = 0;
 unsigned DistIface::recvThreadsNum = 0;
-DistIface *DistIface::master = nullptr;
+DistIface *DistIface::primary = nullptr;
 bool DistIface::isSwitch = false;
 
 void
@@ -142,7 +142,7 @@
         needExit = ReqType::pending;
     if (needStopSync != ReqType::none)
         needStopSync = ReqType::pending;
-    DistIface::master->sendCmd(header);
+    DistIface::primary->sendCmd(header);
     // now wait until all receiver threads complete the synchronisation
     auto lf = [this]{ return waitNum == 0; };
     cv.wait(sync_lock, lf);
@@ -191,7 +191,7 @@
     } else {
         header.needStopSync = ReqType::none;
     }
-    DistIface::master->sendCmd(header);
+    DistIface::primary->sendCmd(header);
     return true;
 }
 
@@ -410,7 +410,7 @@
             start();
         } else {
             // Wake up thread contexts on non-switch nodes.
-            for (auto *tc: master->sys->threads) {
+            for (auto *tc: primary->sys->threads) {
                 if (tc->status() == ThreadContext::Suspended)
                     tc->activate();
                 else
@@ -503,10 +503,10 @@
             "send_tick:%llu send_delay:%llu link_delay:%llu recv_tick:%llu\n",
             send_tick, send_delay, linkDelay, recv_tick);
     // Every packet must be sent and arrive in the same quantum
-    assert(send_tick > master->syncEvent->when() -
-           master->syncEvent->repeat);
+    assert(send_tick > primary->syncEvent->when() -
+           primary->syncEvent->repeat);
     // No packet may be scheduled for receive in the arrival quantum
-    assert(send_tick + send_delay + linkDelay > master->syncEvent->when());
+    assert(send_tick + send_delay + linkDelay > primary->syncEvent->when());
 
     // Now we are about to schedule a recvDone event for the new data packet.
     // We use the same recvDone object for all incoming data packets. Packet
@@ -611,8 +611,8 @@
     rank(dist_rank), size(dist_size)
 {
     DPRINTF(DistEthernet, "DistIface() ctor rank:%d\n",dist_rank);
-    isMaster = false;
-    if (master == nullptr) {
+    isPrimary = false;
+    if (primary == nullptr) {
         assert(sync == nullptr);
         assert(syncEvent == nullptr);
         isSwitch = is_switch;
@@ -621,8 +621,8 @@
         else
             sync = new SyncNode();
         syncEvent = new SyncEvent();
-        master = this;
-        isMaster = true;
+        primary = this;
+        isPrimary = true;
     }
     distIfaceId = distIfaceNum;
     distIfaceNum++;
@@ -639,8 +639,8 @@
         assert(sync);
         delete sync;
     }
-    if (this == master)
-        master = nullptr;
+    if (this == primary)
+        primary = nullptr;
 }
 
 void
@@ -728,7 +728,7 @@
 {
     DPRINTF(DistEthernet,"DistIFace::drain() called\n");
     // This can be called multiple times in the same drain cycle.
-    if (this == master)
+    if (this == primary)
         syncEvent->draining(true);
     return DrainState::Drained;
 }
@@ -736,7 +736,7 @@
 void
 DistIface::drainResume() {
     DPRINTF(DistEthernet,"DistIFace::drainResume() called\n");
-    if (this == master)
+    if (this == primary)
         syncEvent->draining(false);
     recvScheduler.resumeRecvTicks();
 }
@@ -755,7 +755,7 @@
     SERIALIZE_SCALAR(dist_iface_id_orig);
 
     recvScheduler.serializeSection(cp, "recvScheduler");
-    if (this == master) {
+    if (this == primary) {
         sync->serializeSection(cp, "Sync");
     }
 }
@@ -774,7 +774,7 @@
              dist_iface_id_orig);
 
     recvScheduler.unserializeSection(cp, "recvScheduler");
-    if (this == master) {
+    if (this == primary) {
         sync->unserializeSection(cp, "Sync");
     }
 }
@@ -801,8 +801,8 @@
 
     // Initialize the seed for random generator to avoid the same sequence
     // in all gem5 peer processes
-    assert(master != nullptr);
-    if (this == master)
+    assert(primary != nullptr);
+    if (this == primary)
         random_mt.init(5489 * (rank+1) + 257);
 }
 
@@ -811,7 +811,7 @@
 {
     DPRINTF(DistEthernet, "DistIface::startup() started\n");
     // Schedule synchronization unless we are not a switch in pseudo_op mode.
-    if (this == master && (!syncStartOnPseudoOp || isSwitch))
+    if (this == primary && (!syncStartOnPseudoOp || isSwitch))
         syncEvent->start();
     DPRINTF(DistEthernet, "DistIface::startup() done\n");
 }
@@ -822,7 +822,7 @@
     bool ret = true;
     DPRINTF(DistEthernet, "DistIface::readyToCkpt() called, delay:%lu "
             "period:%lu\n", delay, period);
-    if (master) {
+    if (primary) {
         if (delay == 0) {
             inform("m5 checkpoint called with zero delay => triggering collaborative "
                    "checkpoint\n");
@@ -851,38 +851,38 @@
 DistIface::toggleSync(ThreadContext *tc)
 {
     // Unforunate that we have to populate the system pointer member this way.
-    master->sys = tc->getSystemPtr();
+    primary->sys = tc->getSystemPtr();
 
     // The invariant for both syncing and "unsyncing" is that all threads will
     // stop executing intructions until the desired sync state has been reached
     // for all nodes.  This is the easiest way to prevent deadlock (in the case
     // of "unsyncing") and causality errors (in the case of syncing).
-    if (master->syncEvent->scheduled()) {
+    if (primary->syncEvent->scheduled()) {
         inform("Request toggling syncronization off\n");
-        master->sync->requestStopSync(ReqType::collective);
+        primary->sync->requestStopSync(ReqType::collective);
 
         // At this point, we have no clue when everyone will reach the sync
         // stop point.  Suspend execution of all local thread contexts.
         // Dist-gem5 will reactivate all thread contexts when everyone has
         // reached the sync stop point.
 #if THE_ISA != NULL_ISA
-        for (auto *tc: master->sys->threads) {
+        for (auto *tc: primary->sys->threads) {
             if (tc->status() == ThreadContext::Active)
                 tc->quiesce();
         }
 #endif
     } else {
         inform("Request toggling syncronization on\n");
-        master->syncEvent->start();
+        primary->syncEvent->start();
 
         // We need to suspend all CPUs until the sync point is reached by all
         // nodes to prevent causality errors.  We can also schedule CPU
         // activation here, since we know exactly when the next sync will
         // occur.
 #if THE_ISA != NULL_ISA
-        for (auto *tc: master->sys->threads) {
+        for (auto *tc: primary->sys->threads) {
             if (tc->status() == ThreadContext::Active)
-                tc->quiesceTick(master->syncEvent->when() + 1);
+                tc->quiesceTick(primary->syncEvent->when() + 1);
         }
 #endif
     }
@@ -894,10 +894,10 @@
     bool ret = true;
     DPRINTF(DistEthernet, "DistIface::readyToExit() called, delay:%lu\n",
             delay);
-    if (master) {
+    if (primary) {
         // To successfully coordinate an exit, all nodes must be synchronising
-        if (!master->syncEvent->scheduled())
-            master->syncEvent->start();
+        if (!primary->syncEvent->scheduled())
+            primary->syncEvent->start();
 
         if (delay == 0) {
             inform("m5 exit called with zero delay => triggering collaborative "
@@ -917,8 +917,8 @@
 DistIface::rankParam()
 {
     uint64_t val;
-    if (master) {
-        val = master->rank;
+    if (primary) {
+        val = primary->rank;
     } else {
         warn("Dist-rank parameter is queried in single gem5 simulation.");
         val = 0;
@@ -930,8 +930,8 @@
 DistIface::sizeParam()
 {
     uint64_t val;
-    if (master) {
-        val = master->size;
+    if (primary) {
+        val = primary->size;
     } else {
         warn("Dist-size parameter is queried in single gem5 simulation.");
         val = 1;
diff --git a/src/dev/net/dist_iface.hh b/src/dev/net/dist_iface.hh
index e568549..04843aa 100644
--- a/src/dev/net/dist_iface.hh
+++ b/src/dev/net/dist_iface.hh
@@ -491,7 +491,7 @@
      */
     unsigned distIfaceId;
 
-    bool isMaster;
+    bool isPrimary;
 
   private:
     /**
@@ -507,10 +507,10 @@
      */
     static SyncEvent *syncEvent;
     /**
-     * The very first DistIface object created becomes the master. We need
-     * a master to co-ordinate the global synchronisation.
+     * The very first DistIface object created becomes the primary interface.
+     * We need a primary interface to co-ordinate the global synchronisation.
      */
-    static DistIface *master;
+    static DistIface *primary;
     /**
      * System pointer used to wakeup sleeping threads when stopping sync.
      */
@@ -635,7 +635,7 @@
      */
     static uint64_t sizeParam();
     /**
-     * Trigger the master to start/stop synchronization.
+     * Trigger the primary to start/stop synchronization.
      */
     static void toggleSync(ThreadContext *tc);
  };
diff --git a/src/dev/net/tcp_iface.cc b/src/dev/net/tcp_iface.cc
index ba21334..cb6fecb 100644
--- a/src/dev/net/tcp_iface.cc
+++ b/src/dev/net/tcp_iface.cc
@@ -87,7 +87,7 @@
               is_switch, num_nodes), serverName(server_name),
     serverPort(server_port), isSwitch(is_switch), listening(false)
 {
-    if (is_switch && isMaster) {
+    if (is_switch && isPrimary) {
         while (!listen(serverPort)) {
             DPRINTF(DistEthernet, "TCPIface(listen): Can't bind port %d\n",
                     serverPort);
@@ -307,7 +307,7 @@
 {
     DPRINTF(DistEthernetCmd, "TCPIface::sendCmd() type: %d\n",
             static_cast<int>(header.msgType));
-    // Global commands (i.e. sync request) are always sent by the master
+    // Global commands (i.e. sync request) are always sent by the primary
     // DistIface. The transfer method is simply implemented as point-to-point
     // messages for now
     for (auto s: sockRegistry)
diff --git a/src/dev/pci/CopyEngine.py b/src/dev/pci/CopyEngine.py
index abf4fbb..78b7e39 100644
--- a/src/dev/pci/CopyEngine.py
+++ b/src/dev/pci/CopyEngine.py
@@ -33,7 +33,7 @@
 class CopyEngine(PciDevice):
     type = 'CopyEngine'
     cxx_header = "dev/pci/copy_engine.hh"
-    dma = VectorMasterPort("Copy engine DMA port")
+    dma = VectorRequestPort("Copy engine DMA port")
     VendorID = 0x8086
     DeviceID = 0x1a38
     Revision = 0xA2 # CM2 stepping (newest listed)
diff --git a/src/dev/serial/terminal.cc b/src/dev/serial/terminal.cc
index 7cfc6d7..8b420da 100644
--- a/src/dev/serial/terminal.cc
+++ b/src/dev/serial/terminal.cc
@@ -204,7 +204,7 @@
     pollQueue.schedule(dataEvent);
 
     stringstream stream;
-    ccprintf(stream, "==== m5 slave terminal: Terminal %d ====", number);
+    ccprintf(stream, "==== m5 terminal: Terminal %d ====", number);
 
     // we need an actual carriage return followed by a newline for the
     // terminal
diff --git a/src/dev/sparc/T1000.py b/src/dev/sparc/T1000.py
index dee2207..c98fb86 100644
--- a/src/dev/sparc/T1000.py
+++ b/src/dev/sparc/T1000.py
@@ -112,8 +112,8 @@
     iob = Iob()
     # Attach I/O devices that are on chip
     def attachOnChipIO(self, bus):
-        self.iob.pio = bus.master
-        self.htod.pio = bus.master
+        self.iob.pio = bus.mem_side_ports
+        self.htod.pio = bus.mem_side_ports
 
 
     # Attach I/O devices to specified bus object.  Can't do this
@@ -122,17 +122,17 @@
     def attachIO(self, bus):
         self.hvuart.device = self.hterm
         self.puart0.device = self.pterm
-        self.fake_clk.pio = bus.master
-        self.fake_membnks.pio = bus.master
-        self.fake_l2_1.pio = bus.master
-        self.fake_l2_2.pio = bus.master
-        self.fake_l2_3.pio = bus.master
-        self.fake_l2_4.pio = bus.master
-        self.fake_l2esr_1.pio = bus.master
-        self.fake_l2esr_2.pio = bus.master
-        self.fake_l2esr_3.pio = bus.master
-        self.fake_l2esr_4.pio = bus.master
-        self.fake_ssi.pio = bus.master
-        self.fake_jbi.pio = bus.master
-        self.puart0.pio = bus.master
-        self.hvuart.pio = bus.master
+        self.fake_clk.pio = bus.mem_side_ports
+        self.fake_membnks.pio = bus.mem_side_ports
+        self.fake_l2_1.pio = bus.mem_side_ports
+        self.fake_l2_2.pio = bus.mem_side_ports
+        self.fake_l2_3.pio = bus.mem_side_ports
+        self.fake_l2_4.pio = bus.mem_side_ports
+        self.fake_l2esr_1.pio = bus.mem_side_ports
+        self.fake_l2esr_2.pio = bus.mem_side_ports
+        self.fake_l2esr_3.pio = bus.mem_side_ports
+        self.fake_l2esr_4.pio = bus.mem_side_ports
+        self.fake_ssi.pio = bus.mem_side_ports
+        self.fake_jbi.pio = bus.mem_side_ports
+        self.puart0.pio = bus.mem_side_ports
+        self.hvuart.pio = bus.mem_side_ports
diff --git a/src/dev/storage/Ide.py b/src/dev/storage/Ide.py
index 439a977..5edea49 100644
--- a/src/dev/storage/Ide.py
+++ b/src/dev/storage/Ide.py
@@ -28,13 +28,13 @@
 from m5.params import *
 from m5.objects.PciDevice import PciDevice
 
-class IdeID(Enum): vals = ['master', 'slave']
+class IdeID(Enum): vals = ['device0', 'device1']
 
 class IdeDisk(SimObject):
     type = 'IdeDisk'
     cxx_header = "dev/storage/ide_disk.hh"
     delay = Param.Latency('1us', "Fixed disk delay in microseconds")
-    driveID = Param.IdeID('master', "Drive ID")
+    driveID = Param.IdeID('device0', "Drive ID")
     image = Param.DiskImage("Disk image")
 
 class IdeController(PciDevice):
diff --git a/src/dev/storage/ide_ctrl.cc b/src/dev/storage/ide_ctrl.cc
index 632144c..47cdd10 100644
--- a/src/dev/storage/ide_ctrl.cc
+++ b/src/dev/storage/ide_ctrl.cc
@@ -77,7 +77,7 @@
         string newName, Addr _cmdSize, Addr _ctrlSize) :
     _name(newName),
     cmdAddr(0), cmdSize(_cmdSize), ctrlAddr(0), ctrlSize(_ctrlSize),
-    master(NULL), slave(NULL), selected(NULL)
+    device0(NULL), device1(NULL), selected(NULL)
 {
     bmiRegs.reset();
     bmiRegs.status.dmaCap0 = 1;
@@ -105,16 +105,16 @@
             continue;
         switch (i) {
           case 0:
-            primary.master = params()->disks[0];
+            primary.device0 = params()->disks[0];
             break;
           case 1:
-            primary.slave = params()->disks[1];
+            primary.device1 = params()->disks[1];
             break;
           case 2:
-            secondary.master = params()->disks[2];
+            secondary.device0 = params()->disks[2];
             break;
           case 3:
-            secondary.slave = params()->disks[3];
+            secondary.device1 = params()->disks[3];
             break;
           default:
             panic("IDE controllers support a maximum "
@@ -156,9 +156,9 @@
 IdeController::setDmaComplete(IdeDisk *disk)
 {
     Channel *channel;
-    if (disk == primary.master || disk == primary.slave) {
+    if (disk == primary.device0 || disk == primary.device1) {
         channel = &primary;
-    } else if (disk == secondary.master || disk == secondary.slave) {
+    } else if (disk == secondary.device0 || disk == secondary.device1) {
         channel = &secondary;
     } else {
         panic("Unable to find disk based on pointer %#x\n", disk);
diff --git a/src/dev/storage/ide_ctrl.hh b/src/dev/storage/ide_ctrl.hh
index 44e8c5b..51e1603 100644
--- a/src/dev/storage/ide_ctrl.hh
+++ b/src/dev/storage/ide_ctrl.hh
@@ -89,8 +89,13 @@
             uint32_t bmidtp;
         } bmiRegs;
 
-        /** IDE disks connected to this controller */
-        IdeDisk *master, *slave;
+        /** IDE disks connected to this controller
+         * For more details about device0 and device1 see:
+         * https://en.wikipedia.org/wiki/Parallel_ATA
+         * #Multiple_devices_on_a_cable
+         *
+        */
+        IdeDisk *device0, *device1;
 
         /** Currently selected disk */
         IdeDisk *selected;
@@ -98,10 +103,10 @@
         bool selectBit;
 
         void
-        select(bool selSlave)
+        select(bool select_device_1)
         {
-            selectBit = selSlave;
-            selected = selectBit ? slave : master;
+            selectBit = select_device_1;
+            selected = selectBit ? device1 : device0;
         }
 
         void accessCommand(Addr offset, int size, uint8_t *data, bool read);
diff --git a/src/dev/storage/ide_disk.hh b/src/dev/storage/ide_disk.hh
index 8a90d1c..9f42941 100644
--- a/src/dev/storage/ide_disk.hh
+++ b/src/dev/storage/ide_disk.hh
@@ -245,7 +245,7 @@
     uint32_t curPrdAddr;
     /** PRD entry */
     PrdTableEntry curPrd;
-    /** Device ID (master=0/slave=1) */
+    /** Device ID (device0=0/device1=1) */
     int devID;
     /** Interrupt pending */
     bool intrPending;
diff --git a/src/dev/x86/I82094AA.py b/src/dev/x86/I82094AA.py
index 2cb210a..ce1f394 100644
--- a/src/dev/x86/I82094AA.py
+++ b/src/dev/x86/I82094AA.py
@@ -34,7 +34,7 @@
     cxx_class = 'X86ISA::I82094AA'
     cxx_header = "dev/x86/i82094aa.hh"
     apic_id = Param.Int(1, 'APIC id for this IO APIC')
-    int_master = RequestPort("Port for sending interrupt messages")
+    int_requestor = RequestPort("Port for sending interrupt messages")
     int_latency = Param.Latency('1ns', \
             "Latency for an interrupt to propagate through this device.")
     external_int_pic = Param.I8259(NULL, "External PIC, if any")
diff --git a/src/dev/x86/Pc.py b/src/dev/x86/Pc.py
index 4a732e6..a0a9825 100644
--- a/src/dev/x86/Pc.py
+++ b/src/dev/x86/Pc.py
@@ -75,12 +75,12 @@
 
     def attachIO(self, bus, dma_ports = []):
         self.south_bridge.attachIO(bus, dma_ports)
-        self.i_dont_exist1.pio = bus.master
-        self.i_dont_exist2.pio = bus.master
-        self.behind_pci.pio = bus.master
-        self.com_1.pio = bus.master
-        self.fake_com_2.pio = bus.master
-        self.fake_com_3.pio = bus.master
-        self.fake_com_4.pio = bus.master
-        self.fake_floppy.pio = bus.master
+        self.i_dont_exist1.pio = bus.mem_side_ports
+        self.i_dont_exist2.pio = bus.mem_side_ports
+        self.behind_pci.pio = bus.mem_side_ports
+        self.com_1.pio = bus.mem_side_ports
+        self.fake_com_2.pio = bus.mem_side_ports
+        self.fake_com_3.pio = bus.mem_side_ports
+        self.fake_com_4.pio = bus.mem_side_ports
+        self.fake_floppy.pio = bus.mem_side_ports
         self.pci_host.pio = bus.default
diff --git a/src/dev/x86/SouthBridge.py b/src/dev/x86/SouthBridge.py
index 22e2d1f..095f88b 100644
--- a/src/dev/x86/SouthBridge.py
+++ b/src/dev/x86/SouthBridge.py
@@ -97,15 +97,15 @@
         self.speaker.i8254 = self.pit
         self.io_apic.external_int_pic = self.pic1
         # Connect to the bus
-        self.cmos.pio = bus.master
-        self.dma1.pio = bus.master
-        self.ide.pio = bus.master
+        self.cmos.pio = bus.mem_side_ports
+        self.dma1.pio = bus.mem_side_ports
+        self.ide.pio = bus.mem_side_ports
         if dma_ports.count(self.ide.dma) == 0:
-                self.ide.dma = bus.slave
-        self.keyboard.pio = bus.master
-        self.pic1.pio = bus.master
-        self.pic2.pio = bus.master
-        self.pit.pio = bus.master
-        self.speaker.pio = bus.master
-        self.io_apic.pio = bus.master
-        self.io_apic.int_master = bus.slave
+                self.ide.dma = bus.cpu_side_ports
+        self.keyboard.pio = bus.mem_side_ports
+        self.pic1.pio = bus.mem_side_ports
+        self.pic2.pio = bus.mem_side_ports
+        self.pit.pio = bus.mem_side_ports
+        self.speaker.pio = bus.mem_side_ports
+        self.io_apic.pio = bus.mem_side_ports
+        self.io_apic.int_requestor = bus.cpu_side_ports
diff --git a/src/dev/x86/i82094aa.cc b/src/dev/x86/i82094aa.cc
index 8d91cc6..c7817dc 100644
--- a/src/dev/x86/i82094aa.cc
+++ b/src/dev/x86/i82094aa.cc
@@ -42,7 +42,7 @@
 X86ISA::I82094AA::I82094AA(Params *p)
     : BasicPioDevice(p, 20), extIntPic(p->external_int_pic),
       lowestPriorityOffset(0),
-      intMasterPort(name() + ".int_master", this, this, p->int_latency)
+      intRequestPort(name() + ".int_request", this, this, p->int_latency)
 {
     // This assumes there's only one I/O APIC in the system and since the apic
     // id is stored in a 8-bit field with 0xff meaning broadcast, the id must
@@ -71,16 +71,16 @@
     // the piodevice init() function.
     BasicPioDevice::init();
 
-    // If the master port isn't connected, we can't send interrupts anywhere.
-    panic_if(!intMasterPort.isConnected(),
+    // If the request port isn't connected, we can't send interrupts anywhere.
+    panic_if(!intRequestPort.isConnected(),
             "Int port not connected to anything!");
 }
 
 Port &
 X86ISA::I82094AA::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "int_master")
-        return intMasterPort;
+    if (if_name == "int_request")
+        return intRequestPort;
     if (if_name == "inputs")
         return *inputs.at(idx);
     else
@@ -242,7 +242,7 @@
         }
         for (auto id: apics) {
             PacketPtr pkt = buildIntTriggerPacket(id, message);
-            intMasterPort.sendMessage(pkt, sys->isTimingMode());
+            intRequestPort.sendMessage(pkt, sys->isTimingMode());
         }
     }
 }
diff --git a/src/dev/x86/i82094aa.hh b/src/dev/x86/i82094aa.hh
index 2c81e27..a5263b3 100644
--- a/src/dev/x86/i82094aa.hh
+++ b/src/dev/x86/i82094aa.hh
@@ -82,7 +82,7 @@
 
     std::vector<IntSinkPin<I82094AA> *> inputs;
 
-    IntMasterPort<I82094AA> intMasterPort;
+    IntRequestPort<I82094AA> intRequestPort;
 
   public:
     typedef I82094AAParams Params;
diff --git a/src/dev/x86/i8259.cc b/src/dev/x86/i8259.cc
index 1e081d7..8ba1235 100644
--- a/src/dev/x86/i8259.cc
+++ b/src/dev/x86/i8259.cc
@@ -191,7 +191,8 @@
           case 0x2:
             DPRINTF(I8259, "Received initialization command word 3.\n");
             if (mode == Enums::I8259Master) {
-                DPRINTF(I8259, "Slaves attached to IRQs:%s%s%s%s%s%s%s%s\n",
+                DPRINTF(I8259, "Responders attached to "
+                        "IRQs:%s%s%s%s%s%s%s%s\n",
                         bits(val, 0) ? " 0" : "",
                         bits(val, 1) ? " 1" : "",
                         bits(val, 2) ? " 2" : "",
@@ -202,7 +203,7 @@
                         bits(val, 7) ? " 7" : "");
                 cascadeBits = val;
             } else {
-                DPRINTF(I8259, "Slave ID is %d.\n", val & mask(3));
+                DPRINTF(I8259, "Responder ID is %d.\n", val & mask(3));
                 cascadeBits = val & mask(3);
             }
             if (expectICW4)
@@ -307,10 +308,10 @@
 X86ISA::I8259::getVector()
 {
     /*
-     * This code only handles one slave. Since that's how the PC platform
+     * This code only handles one responder. Since that's how the PC platform
      * always uses the 8259 PIC, there shouldn't be any need for more. If
-     * there -is- a need for more for some reason, "slave" can become a
-     * vector of slaves.
+     * there -is- a need for more for some reason, "responder" can become a
+     * vector of responders.
      */
     int line = findMsbSet(IRR);
     IRR &= ~(1 << line);
@@ -321,7 +322,7 @@
         ISR |= 1 << line;
     }
     if (slave && bits(cascadeBits, line)) {
-        DPRINTF(I8259, "Interrupt was from slave who will "
+        DPRINTF(I8259, "Interrupt was from responder who will "
                 "provide the vector.\n");
         return slave->getVector();
     }
diff --git a/src/dev/x86/i8259.hh b/src/dev/x86/i8259.hh
index 85f9ef8..889a8cb 100644
--- a/src/dev/x86/i8259.hh
+++ b/src/dev/x86/i8259.hh
@@ -49,7 +49,7 @@
     std::vector<IntSourcePin<I8259> *> output;
     std::vector<IntSinkPin<I8259> *> inputs;
     Enums::X86I8259CascadeMode mode;
-    I8259 * slave;
+    I8259 *slave;
 
     // Interrupt Request Register
     uint8_t IRR;
@@ -62,8 +62,9 @@
     uint8_t vectorOffset;
 
     bool cascadeMode;
-    // A bit vector of lines with slaves attached, or the slave id, depending
-    // on if this is a master or slave PIC.
+    // A bit vector of lines with responders attached, or the
+    // responder id, depending
+    // on if this is a requestor or responder PIC.
     uint8_t cascadeBits;
 
     bool edgeTriggered;
diff --git a/src/dev/x86/intdev.hh b/src/dev/x86/intdev.hh
index a681a2e..f757fdb 100644
--- a/src/dev/x86/intdev.hh
+++ b/src/dev/x86/intdev.hh
@@ -53,12 +53,12 @@
 {
 
 template <class Device>
-class IntSlavePort : public SimpleTimingPort
+class IntResponsePort : public SimpleTimingPort
 {
     Device * device;
 
   public:
-    IntSlavePort(const std::string& _name, SimObject* _parent,
+    IntResponsePort(const std::string& _name, SimObject* _parent,
                  Device* dev) :
         SimpleTimingPort(_name, _parent), device(dev)
     {
@@ -86,7 +86,7 @@
 buildIntPacket(Addr addr, T payload)
 {
     RequestPtr req = std::make_shared<Request>(
-        addr, sizeof(T), Request::UNCACHEABLE, Request::intMasterId);
+        addr, sizeof(T), Request::UNCACHEABLE, Request::intRequestorId);
     PacketPtr pkt = new Packet(req, MemCmd::WriteReq);
     pkt->allocate();
     pkt->setRaw<T>(payload);
@@ -94,7 +94,7 @@
 }
 
 template <class Device>
-class IntMasterPort : public QueuedMasterPort
+class IntRequestPort : public QueuedRequestPort
 {
   private:
     ReqPacketQueue reqQueue;
@@ -113,9 +113,9 @@
     static void defaultOnCompletion(PacketPtr pkt) { delete pkt; }
 
   public:
-    IntMasterPort(const std::string& _name, SimObject* _parent,
+    IntRequestPort(const std::string& _name, SimObject* _parent,
                   Device* dev, Tick _latency) :
-        QueuedMasterPort(_name, _parent, reqQueue, snoopRespQueue),
+        QueuedRequestPort(_name, _parent, reqQueue, snoopRespQueue),
         reqQueue(*_parent, *this), snoopRespQueue(*_parent, *this),
         device(dev), latency(_latency)
     {
diff --git a/src/gpu-compute/GPU.py b/src/gpu-compute/GPU.py
index 05df84d..b82ad18 100644
--- a/src/gpu-compute/GPU.py
+++ b/src/gpu-compute/GPU.py
@@ -159,8 +159,8 @@
     coalescer_to_vrf_bus_width = Param.Int(64, "Coalescer->VRF data bus "\
                                            "width  in bytes")
 
-    memory_port = VectorMasterPort("Port to the memory system")
-    translation_port = VectorMasterPort('Port to the TLB hierarchy')
+    memory_port = VectorRequestPort("Port to the memory system")
+    translation_port = VectorRequestPort('Port to the TLB hierarchy')
     sqc_port = RequestPort("Port to the SQC (I-cache")
     sqc_tlb_port = RequestPort("Port to the TLB for the SQC (I-cache)")
     scalar_port = RequestPort("Port to the scalar data cache")
diff --git a/src/gpu-compute/X86GPUTLB.py b/src/gpu-compute/X86GPUTLB.py
index bd22bee..45cb962 100644
--- a/src/gpu-compute/X86GPUTLB.py
+++ b/src/gpu-compute/X86GPUTLB.py
@@ -58,8 +58,12 @@
     missLatency1 = Param.Int(5, "Latency #1 of a TLB miss")
     missLatency2 = Param.Int(100, "Latency #2 of a TLB miss")
     maxOutstandingReqs = Param.Int(64, "# of maximum outstanding requests")
-    slave = VectorSlavePort("Port on side closer to CPU/CU")
-    master = VectorMasterPort("Port on side closer to memory")
+    cpu_side_ports = VectorResponsePort("Ports on side closer to CPU/CU")
+    slave    = DeprecatedParam(cpu_side_ports,
+                        '`slave` is now called `cpu_side_ports`')
+    mem_side_ports = VectorRequestPort("Ports on side closer to memory")
+    master   = DeprecatedParam(mem_side_ports,
+                        '`master` is now called `mem_side_ports`')
     allocationPolicy = Param.Bool(True, "Allocate on an access")
     accessDistance = Param.Bool(False, "print accessDistance stats")
 
@@ -69,6 +73,10 @@
     cxx_header = 'gpu-compute/tlb_coalescer.hh'
     probesPerCycle = Param.Int(2, "Number of TLB probes per cycle")
     coalescingWindow = Param.Int(1, "Permit coalescing across that many ticks")
-    slave = VectorSlavePort("Port on side closer to CPU/CU")
-    master = VectorMasterPort("Port on side closer to memory")
+    cpu_side_ports = VectorResponsePort("Port on side closer to CPU/CU")
+    slave    = DeprecatedParam(cpu_side_ports,
+                        '`slave` is now called `cpu_side_ports`')
+    mem_side_ports = VectorRequestPort("Port on side closer to memory")
+    master   DeprecatedParam(mem_side_ports,
+                        '`master` is now called `mem_side_ports`')
     disableCoalescing = Param.Bool(False,"Dispable Coalescing")
diff --git a/src/gpu-compute/compute_unit.cc b/src/gpu-compute/compute_unit.cc
index 920257d..33f5c6e 100644
--- a/src/gpu-compute/compute_unit.cc
+++ b/src/gpu-compute/compute_unit.cc
@@ -95,7 +95,7 @@
     countPages(p->countPages),
     req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()),
     resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()),
-    _masterId(p->system->getMasterId(this, "ComputeUnit")),
+    _requestorId(p->system->getRequestorId(this, "ComputeUnit")),
     lds(*p->localDataStore), gmTokenPort(name() + ".gmTokenPort", this),
     ldsPort(csprintf("%s-port", name()), this),
     scalarDataPort(csprintf("%s-port", name()), this),
@@ -183,7 +183,7 @@
         tlbPort.emplace_back(csprintf("%s-port%d", name(), i), this, i);
     }
 
-    // Setup tokens for slave ports. The number of tokens in memSlaveTokens
+    // Setup tokens for response ports. The number of tokens in memPortTokens
     // is the total token count for the entire vector port (i.e., this CU).
     memPortTokens = new TokenManager(p->max_cu_tokens);
 
@@ -1235,7 +1235,7 @@
 
     if (!req) {
         req = std::make_shared<Request>(
-            0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
+            0, 0, 0, requestorId(), 0, gpuDynInst->wfDynId);
     }
 
     // all mem sync requests have Paddr == 0
@@ -1500,7 +1500,7 @@
             RequestPtr prefetch_req = std::make_shared<Request>(
                 vaddr + stride * pf * TheISA::PageBytes,
                 sizeof(uint8_t), 0,
-                computeUnit->masterId(),
+                computeUnit->requestorId(),
                 0, 0, nullptr);
 
             PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd);
@@ -1528,7 +1528,7 @@
     }
 
     // First we must convert the response cmd back to a request cmd so that
-    // the request can be sent through the cu's master port
+    // the request can be sent through the cu's request port
     PacketPtr new_pkt = new Packet(pkt->req, requestCmd);
     new_pkt->dataStatic(pkt->getPtr<uint8_t>());
     delete pkt->senderState;
@@ -1749,7 +1749,7 @@
     if (success) {
         // pkt is reused in fetch(), don't delete it here.  However, we must
         // reset the command to be a request so that it can be sent through
-        // the cu's master port
+        // the cu's request port
         assert(pkt->cmd == MemCmd::ReadResp);
         pkt->cmd = MemCmd::ReadReq;
 
diff --git a/src/gpu-compute/compute_unit.hh b/src/gpu-compute/compute_unit.hh
index f7484af..fe2091d 100644
--- a/src/gpu-compute/compute_unit.hh
+++ b/src/gpu-compute/compute_unit.hh
@@ -458,13 +458,13 @@
     void processFetchReturn(PacketPtr pkt);
     void updatePageDivergenceDist(Addr addr);
 
-    MasterID masterId() { return _masterId; }
+    RequestorID requestorId() { return _requestorId; }
 
     bool isDone() const;
     bool isVectorAluIdle(uint32_t simdId) const;
 
   protected:
-    MasterID _masterId;
+    RequestorID _requestorId;
 
     LdsState &lds;
 
@@ -628,12 +628,12 @@
 
     void exitCallback();
 
-    class GMTokenPort : public TokenMasterPort
+    class GMTokenPort : public TokenRequestPort
     {
       public:
         GMTokenPort(const std::string& name, SimObject *owner,
                     PortID id = InvalidPortID)
-            : TokenMasterPort(name, owner, id)
+            : TokenRequestPort(name, owner, id)
         { }
         ~GMTokenPort() { }
 
diff --git a/src/gpu-compute/fetch_unit.cc b/src/gpu-compute/fetch_unit.cc
index 3a139f5..4e4259e 100644
--- a/src/gpu-compute/fetch_unit.cc
+++ b/src/gpu-compute/fetch_unit.cc
@@ -160,7 +160,7 @@
     // set up virtual request
     RequestPtr req = std::make_shared<Request>(
         vaddr, computeUnit.cacheLineSize(), Request::INST_FETCH,
-        computeUnit.masterId(), 0, 0, nullptr);
+        computeUnit.requestorId(), 0, 0, nullptr);
 
     PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
 
diff --git a/src/gpu-compute/gpu_tlb.cc b/src/gpu-compute/gpu_tlb.cc
index 513106f..4c35396 100644
--- a/src/gpu-compute/gpu_tlb.cc
+++ b/src/gpu-compute/gpu_tlb.cc
@@ -113,14 +113,14 @@
         missLatency1 = p->missLatency1;
         missLatency2 = p->missLatency2;
 
-        // create the slave ports based on the number of connected ports
-        for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
+        // create the response ports based on the number of connected ports
+        for (size_t i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
             cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d",
                                   name(), i), this, i));
         }
 
-        // create the master ports based on the number of connected ports
-        for (size_t i = 0; i < p->port_master_connection_count; ++i) {
+        // create the request ports based on the number of connected ports
+        for (size_t i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
             memSidePort.push_back(new MemSidePort(csprintf("%s-port%d",
                                   name(), i), this, i));
         }
@@ -136,13 +136,13 @@
     Port &
     GpuTLB::getPort(const std::string &if_name, PortID idx)
     {
-        if (if_name == "slave") {
+        if (if_name == "cpu_side_ports") {
             if (idx >= static_cast<PortID>(cpuSidePort.size())) {
                 panic("TLBCoalescer::getPort: unknown index %d\n", idx);
             }
 
             return *cpuSidePort[idx];
-        } else if (if_name == "master") {
+        } else if (if_name == "mem_side_ports") {
             if (idx >= static_cast<PortID>(memSidePort.size())) {
                 panic("TLBCoalescer::getPort: unknown index %d\n", idx);
             }
@@ -930,7 +930,7 @@
         Addr paddr = local_entry->paddr | (vaddr & (page_size - 1));
         DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
 
-        // Since this packet will be sent through the cpu side slave port,
+        // Since this packet will be sent through the cpu side port,
         // it must be converted to a response pkt if it is not one already
         if (pkt->isRequest()) {
             pkt->makeTimingResponse();
@@ -1324,7 +1324,7 @@
     AddrRangeList
     GpuTLB::CpuSidePort::getAddrRanges() const
     {
-        // currently not checked by the master
+        // currently not checked by the requestor
         AddrRangeList ranges;
 
         return ranges;
diff --git a/src/gpu-compute/shader.cc b/src/gpu-compute/shader.cc
index 9ebbd3c..cc039d2 100644
--- a/src/gpu-compute/shader.cc
+++ b/src/gpu-compute/shader.cc
@@ -206,7 +206,7 @@
         // create a request to hold INV info; the request's fields will
         // be updated in cu before use
         auto req = std::make_shared<Request>(0, 0, 0,
-                                             cuList[i_cu]->masterId(),
+                                             cuList[i_cu]->requestorId(),
                                              0, -1);
 
         _dispatcher.updateInvCounter(kernId, +1);
@@ -457,7 +457,7 @@
 
         RequestPtr req = std::make_shared<Request>(
             gen.addr(), gen.size(), 0,
-            cuList[0]->masterId(), 0, 0, nullptr);
+            cuList[0]->requestorId(), 0, 0, nullptr);
 
         doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id);
         data_buf += gen.size();
diff --git a/src/gpu-compute/tlb_coalescer.cc b/src/gpu-compute/tlb_coalescer.cc
index 08eccd8..da4030b 100644
--- a/src/gpu-compute/tlb_coalescer.cc
+++ b/src/gpu-compute/tlb_coalescer.cc
@@ -52,14 +52,14 @@
                    "Cleanup issuedTranslationsTable hashmap",
                    false, Event::Maximum_Pri)
 {
-    // create the slave ports based on the number of connected ports
-    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
+    // create the response ports based on the number of connected ports
+    for (size_t i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
         cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d", name(), i),
                                               this, i));
     }
 
-    // create the master ports based on the number of connected ports
-    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
+    // create the request ports based on the number of connected ports
+    for (size_t i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
         memSidePort.push_back(new MemSidePort(csprintf("%s-port%d", name(), i),
                                               this, i));
     }
@@ -68,13 +68,13 @@
 Port &
 TLBCoalescer::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "slave") {
+    if (if_name == "cpu_side_ports") {
         if (idx >= static_cast<PortID>(cpuSidePort.size())) {
             panic("TLBCoalescer::getPort: unknown index %d\n", idx);
         }
 
         return *cpuSidePort[idx];
-    } else  if (if_name == "master") {
+    } else  if (if_name == "mem_side_ports") {
         if (idx >= static_cast<PortID>(memSidePort.size())) {
             panic("TLBCoalescer::getPort: unknown index %d\n", idx);
         }
@@ -359,7 +359,7 @@
 AddrRangeList
 TLBCoalescer::CpuSidePort::getAddrRanges() const
 {
-    // currently not checked by the master
+    // currently not checked by the requestor
     AddrRangeList ranges;
 
     return ranges;
diff --git a/src/gpu-compute/tlb_coalescer.hh b/src/gpu-compute/tlb_coalescer.hh
index defdc86..4ab76f6 100644
--- a/src/gpu-compute/tlb_coalescer.hh
+++ b/src/gpu-compute/tlb_coalescer.hh
@@ -191,9 +191,9 @@
         }
     };
 
-    // Coalescer slave ports on the cpu Side
+    // Coalescer response ports on the cpu Side
     std::vector<CpuSidePort*> cpuSidePort;
-    // Coalescer master ports on the memory side
+    // Coalescer request ports on the memory side
     std::vector<MemSidePort*> memSidePort;
 
     Port &getPort(const std::string &if_name,
diff --git a/src/learning_gem5/part2/SimpleCache.py b/src/learning_gem5/part2/SimpleCache.py
index 40892b5..ad94b50 100644
--- a/src/learning_gem5/part2/SimpleCache.py
+++ b/src/learning_gem5/part2/SimpleCache.py
@@ -35,7 +35,7 @@
 
     # Vector port example. Both the instruction and data ports connect to this
     # port which is automatically split out into two ports.
-    cpu_side = VectorSlavePort("CPU side port, receives requests")
+    cpu_side = VectorResponsePort("CPU side port, receives requests")
     mem_side = RequestPort("Memory side port, sends requests")
 
     latency = Param.Cycles(1, "Cycles taken on a hit or to resolve a miss")
diff --git a/src/learning_gem5/part2/simple_cache.cc b/src/learning_gem5/part2/simple_cache.cc
index ad5e663..3a3cfe6 100644
--- a/src/learning_gem5/part2/simple_cache.cc
+++ b/src/learning_gem5/part2/simple_cache.cc
@@ -45,7 +45,8 @@
     // automatically created depending on the name of the vector port and
     // holds the number of connections to this port name
     for (int i = 0; i < params->port_cpu_side_connection_count; ++i) {
-        cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this);
+        cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i),
+                                                             i, this);
     }
 }
 
diff --git a/src/learning_gem5/part2/simple_cache.hh b/src/learning_gem5/part2/simple_cache.hh
index 6dae1e4..2f39f3d 100644
--- a/src/learning_gem5/part2/simple_cache.hh
+++ b/src/learning_gem5/part2/simple_cache.hh
@@ -86,7 +86,7 @@
 
         /**
          * Get a list of the non-overlapping address ranges the owner is
-         * responsible for. All slave ports must override this function
+         * responsible for. All response ports must override this function
          * and return a populated list with at least one item.
          *
          * @return a list of ranges responded to
@@ -101,14 +101,14 @@
 
       protected:
         /**
-         * Receive an atomic request packet from the master port.
+         * Receive an atomic request packet from the request port.
          * No need to implement in this simple cache.
          */
         Tick recvAtomic(PacketPtr pkt) override
         { panic("recvAtomic unimpl."); }
 
         /**
-         * Receive a functional request packet from the master port.
+         * Receive a functional request packet from the request port.
          * Performs a "debug" access updating/reading the data in place.
          *
          * @param packet the requestor sent.
@@ -116,7 +116,7 @@
         void recvFunctional(PacketPtr pkt) override;
 
         /**
-         * Receive a timing request from the master port.
+         * Receive a timing request from the request port.
          *
          * @param the packet that the requestor sent
          * @return whether this object can consume to packet. If false, we
@@ -126,9 +126,9 @@
         bool recvTimingReq(PacketPtr pkt) override;
 
         /**
-         * Called by the master port if sendTimingResp was called on this
-         * slave port (causing recvTimingResp to be called on the master
-         * port) and was unsuccesful.
+         * Called by the request port if sendTimingResp was called on this
+         * response port (causing recvTimingResp to be called on the request
+         * port) and was unsuccessful.
          */
         void recvRespRetry() override;
     };
@@ -165,19 +165,19 @@
 
       protected:
         /**
-         * Receive a timing response from the slave port.
+         * Receive a timing response from the response port.
          */
         bool recvTimingResp(PacketPtr pkt) override;
 
         /**
-         * Called by the slave port if sendTimingReq was called on this
-         * master port (causing recvTimingReq to be called on the slave
+         * Called by the response port if sendTimingReq was called on this
+         * request port (causing recvTimingReq to be called on the response
          * port) and was unsuccesful.
          */
         void recvReqRetry() override;
 
         /**
-         * Called to receive an address range change from the peer slave
+         * Called to receive an address range change from the peer response
          * port. The default implementation ignores the change and does
          * nothing. Override this function in a derived class if the owner
          * needs to be aware of the address ranges, e.g. in an
diff --git a/src/mem/AddrMapper.py b/src/mem/AddrMapper.py
index 52d7ef8..60ef3be 100644
--- a/src/mem/AddrMapper.py
+++ b/src/mem/AddrMapper.py
@@ -37,10 +37,10 @@
 from m5.SimObject import SimObject
 
 # An address mapper changes the packet addresses in going from the
-# slave port side of the mapper to the master port side. When the
-# slave port is queried for the address ranges, it also performs the
+# response port side of the mapper to the request port side. When the
+# response port is queried for the address ranges, it also performs the
 # necessary range updates. Note that snoop requests that travel from
-# the master port (i.e. the memory side) to the slave port are
+# the request port (i.e. the memory side) to the response port are
 # currently not modified.
 class AddrMapper(SimObject):
     type = 'AddrMapper'
@@ -48,9 +48,14 @@
     abstract = True
 
     # one port in each direction
-    master = RequestPort("Master port")
-    slave = ResponsePort("Slave port")
-
+    mem_side_port = RequestPort("This port sends requests and "
+                                "receives responses")
+    master   = DeprecatedParam(mem_side_port,
+                               '`master` is now called `mem_side_port`')
+    cpu_side_port = ResponsePort("This port receives requests and "
+                                 "sends responses")
+    slave    = DeprecatedParam(cpu_side_port,
+                               '`slave` is now called `cpu_side_port`')
 
 # Range address mapper that maps a set of original ranges to a set of
 # remapped ranges, where a specific range is of the same size
diff --git a/src/mem/Bridge.py b/src/mem/Bridge.py
index a89e7f9..b4eb1d0 100644
--- a/src/mem/Bridge.py
+++ b/src/mem/Bridge.py
@@ -42,8 +42,16 @@
 class Bridge(ClockedObject):
     type = 'Bridge'
     cxx_header = "mem/bridge.hh"
-    slave = ResponsePort('Slave port')
-    master = RequestPort('Master port')
+
+    mem_side_port = RequestPort("This port sends requests and "
+                                "receives responses")
+    master   = DeprecatedParam(mem_side_port,
+                              '`master` is now called `mem_side_port`')
+    cpu_side_port = ResponsePort("This port receives requests and "
+                                 "sends responses")
+    slave    = DeprecatedParam(cpu_side_port,
+                               '`slave` is now called `cpu_side_port`')
+
     req_size = Param.Unsigned(16, "The number of requests to buffer")
     resp_size = Param.Unsigned(16, "The number of responses to buffer")
     delay = Param.Latency('0ns', "The latency of this bridge")
diff --git a/src/mem/CommMonitor.py b/src/mem/CommMonitor.py
index 0fd884d..851e5a3 100644
--- a/src/mem/CommMonitor.py
+++ b/src/mem/CommMonitor.py
@@ -47,8 +47,14 @@
     system = Param.System(Parent.any, "System that the monitor belongs to.")
 
     # one port in each direction
-    master = RequestPort("Master port")
-    slave = ResponsePort("Slave port")
+    mem_side_port = RequestPort("This port sends requests and "
+                                "receives responses")
+    master   = DeprecatedParam(mem_side_port,
+                              '`master` is now called `mem_side_port`')
+    cpu_side_port = ResponsePort("This port receives requests and "
+                                 "sends responses")
+    slave    = DeprecatedParam(cpu_side_port,
+                              '`slave` is now called `cpu_side_port`')
 
     # control the sample period window length of this monitor
     sample_period = Param.Clock("1ms", "Sample period for histograms")
diff --git a/src/mem/DRAMSim2.py b/src/mem/DRAMSim2.py
index bf5143f..d5147b1 100644
--- a/src/mem/DRAMSim2.py
+++ b/src/mem/DRAMSim2.py
@@ -42,7 +42,7 @@
     cxx_header = "mem/dramsim2.hh"
 
     # A single port for now
-    port = SlavePort("Slave port")
+    port = ResponsePort("This port sends responses and receives requests")
 
     deviceConfigFile = Param.String("ini/DDR3_micron_32M_8B_x8_sg15.ini",
                                     "Device configuration file")
diff --git a/src/mem/MemChecker.py b/src/mem/MemChecker.py
index 714ea79..69dae52 100644
--- a/src/mem/MemChecker.py
+++ b/src/mem/MemChecker.py
@@ -46,10 +46,14 @@
     cxx_header = "mem/mem_checker_monitor.hh"
 
     # one port in each direction
-    master = RequestPort("Master port")
-    slave = ResponsePort("Slave port")
-    cpu_side = ResponsePort("Alias for slave")
-    mem_side = RequestPort("Alias for master")
+    mem_side_port = RequestPort("This port sends requests and receives "
+                                "responses")
+    master   = DeprecatedParam(mem_side_port,"`master` is now called "
+                               "`mem_side_port`")
+    cpu_side_port = ResponsePort("This port receives requests and sends "
+                                 "responses")
+    slave    = DeprecatedParam(cpu_side_port,"`slave` is now called "
+                               "`cpu_side_port`")
     warn_only = Param.Bool(False, "Warn about violations only")
     memchecker = Param.MemChecker("Instance shared with other monitors")
 
diff --git a/src/mem/MemCtrl.py b/src/mem/MemCtrl.py
index e0f3424..6736bb0 100644
--- a/src/mem/MemCtrl.py
+++ b/src/mem/MemCtrl.py
@@ -56,7 +56,7 @@
 
     # single-ported on the system interface side, instantiate with a
     # bus in front of the controller for multiple ports
-    port = SlavePort("Slave port")
+    port = ResponsePort("This port responds to memory requests")
 
     # Interface to volatile, DRAM media
     dram = Param.DRAMInterface(NULL, "DRAM interface")
diff --git a/src/mem/MemDelay.py b/src/mem/MemDelay.py
index 7ffb608..4b322af 100644
--- a/src/mem/MemDelay.py
+++ b/src/mem/MemDelay.py
@@ -41,8 +41,14 @@
     cxx_header = 'mem/mem_delay.hh'
     abstract = True
 
-    master = RequestPort("Master port")
-    slave = ResponsePort("Slave port")
+    mem_side_port = RequestPort("This port sends requests and "
+                                            "receives responses")
+    master   = DeprecatedParam(mem_side_port,
+                                '`master` is now called `mem_side_port`')
+    cpu_side_port = ResponsePort("This port receives requests and "
+                                                "sends responses")
+    slave    = DeprecatedParam(cpu_side_port,
+                                '`slave` is now called `cpu_side_port`')
 
 class SimpleMemDelay(MemDelay):
     type = 'SimpleMemDelay'
diff --git a/src/mem/SerialLink.py b/src/mem/SerialLink.py
index 2174bc7..7cde69f 100644
--- a/src/mem/SerialLink.py
+++ b/src/mem/SerialLink.py
@@ -46,8 +46,14 @@
 class SerialLink(ClockedObject):
     type = 'SerialLink'
     cxx_header = "mem/serial_link.hh"
-    slave = ResponsePort('Slave port')
-    master = RequestPort('Master port')
+    mem_side_port = RequestPort("This port sends requests and "
+                                            "receives responses")
+    master   = DeprecatedParam(mem_side_port,
+                                '`master` is now called `mem_side_port`')
+    cpu_side_port = ResponsePort("This port receives requests and "
+                                                    "sends responses")
+    slave    = DeprecatedParam(cpu_side_port,
+                                '`slave` is now called `cpu_side_port`')
     req_size = Param.Unsigned(16, "The number of requests to buffer")
     resp_size = Param.Unsigned(16, "The number of responses to buffer")
     delay = Param.Latency('0ns', "The latency of this serial_link")
diff --git a/src/mem/SimpleMemory.py b/src/mem/SimpleMemory.py
index 8f3c1e5..6e4b915 100644
--- a/src/mem/SimpleMemory.py
+++ b/src/mem/SimpleMemory.py
@@ -42,7 +42,7 @@
 class SimpleMemory(AbstractMemory):
     type = 'SimpleMemory'
     cxx_header = "mem/simple_mem.hh"
-    port = SlavePort("Slave ports")
+    port = ResponsePort("This port sends responses and receives requests")
     latency = Param.Latency('30ns', "Request to response latency")
     latency_var = Param.Latency('0ns', "Request to response latency variance")
     # The memory bandwidth limit default is set to 12.8GB/s which is
diff --git a/src/mem/XBar.py b/src/mem/XBar.py
index af13587..c162584 100644
--- a/src/mem/XBar.py
+++ b/src/mem/XBar.py
@@ -48,8 +48,14 @@
     abstract = True
     cxx_header = "mem/xbar.hh"
 
-    slave = VectorSlavePort("Vector port for connecting masters")
-    master = VectorMasterPort("Vector port for connecting slaves")
+    cpu_side_ports = VectorResponsePort("Vector port for connecting "
+                                                "mem side ports")
+    slave    = DeprecatedParam(cpu_side_ports,
+                                '`slave` is now called `cpu_side_ports`')
+    mem_side_ports = VectorRequestPort("Vector port for connecting "
+                                                "cpu side ports")
+    master   = DeprecatedParam(mem_side_ports,
+                                '`master` is now called `mem_side_ports`')
 
     # Latencies governing the time taken for the variuos paths a
     # packet has through the crossbar. Note that the crossbar itself
@@ -68,7 +74,7 @@
     forward_latency = Param.Cycles("Forward latency")
     response_latency = Param.Cycles("Response latency")
 
-    # The XBar uses one Layer per master. Each Layer forwards a packet
+    # The XBar uses one Layer per requestor. Each Layer forwards a packet
     # to its destination and is occupied for header_latency + size /
     # width cycles
     header_latency = Param.Cycles(1, "Header latency")
@@ -77,8 +83,8 @@
     width = Param.Unsigned("Datapath width per port (bytes)")
 
     # The default port can be left unconnected, or be used to connect
-    # a default slave port
-    default = RequestPort("Port for connecting an optional default slave")
+    # a default response port
+    default = RequestPort("Port for connecting an optional default responder")
 
     # The default port can be used unconditionally, or based on
     # address range, in which case it may overlap with other
@@ -134,7 +140,7 @@
     # Sanity check on max capacity to track, adjust if needed.
     max_capacity = Param.MemorySize('8MB', "Maximum capacity of snoop filter")
 
-# We use a coherent crossbar to connect multiple masters to the L2
+# We use a coherent crossbar to connect multiple requestors to the L2
 # caches. Normally this crossbar would be part of the cache itself.
 class L2XBar(CoherentXBar):
     # 256-bit crossbar by default
@@ -159,7 +165,7 @@
 
 # One of the key coherent crossbar instances is the system
 # interconnect, tying together the CPU clusters, GPUs, and any I/O
-# coherent masters, and DRAM controllers.
+# coherent requestors, and DRAM controllers.
 class SystemXBar(CoherentXBar):
     # 128-bit crossbar by default
     width = 16
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
index f1e9dba..a5730c7 100644
--- a/src/mem/abstract_mem.cc
+++ b/src/mem/abstract_mem.cc
@@ -145,54 +145,54 @@
 
     System *sys = mem.system();
     assert(sys);
-    const auto max_masters = sys->maxMasters();
+    const auto max_requestors = sys->maxRequestors();
 
     bytesRead
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bytesRead.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bytesRead.subname(i, sys->getRequestorName(i));
     }
 
     bytesInstRead
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bytesInstRead.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bytesInstRead.subname(i, sys->getRequestorName(i));
     }
 
     bytesWritten
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bytesWritten.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bytesWritten.subname(i, sys->getRequestorName(i));
     }
 
     numReads
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        numReads.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        numReads.subname(i, sys->getRequestorName(i));
     }
 
     numWrites
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        numWrites.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        numWrites.subname(i, sys->getRequestorName(i));
     }
 
     numOther
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        numOther.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        numOther.subname(i, sys->getRequestorName(i));
     }
 
     bwRead
@@ -200,8 +200,8 @@
         .prereq(bytesRead)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bwRead.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bwRead.subname(i, sys->getRequestorName(i));
     }
 
     bwInstRead
@@ -209,8 +209,8 @@
         .prereq(bytesInstRead)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bwInstRead.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bwInstRead.subname(i, sys->getRequestorName(i));
     }
 
     bwWrite
@@ -218,8 +218,8 @@
         .prereq(bytesWritten)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bwWrite.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bwWrite.subname(i, sys->getRequestorName(i));
     }
 
     bwTotal
@@ -227,8 +227,8 @@
         .prereq(bwTotal)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        bwTotal.subname(i, sys->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        bwTotal.subname(i, sys->getRequestorName(i));
     }
 
     bwRead = bytesRead / simSeconds;
@@ -324,10 +324,10 @@
                         i->contextId, paddr);
                 ContextID owner_cid = i->contextId;
                 assert(owner_cid != InvalidContextID);
-                ContextID requester_cid = req->hasContextId() ?
+                ContextID requestor_cid = req->hasContextId() ?
                                            req->contextId() :
                                            InvalidContextID;
-                if (owner_cid != requester_cid) {
+                if (owner_cid != requestor_cid) {
                     ThreadContext* ctx = system()->threads[owner_cid];
                     TheISA::globalClearExclusive(ctx);
                 }
@@ -350,14 +350,15 @@
     if (size == 1 || size == 2 || size == 4 || size == 8) {
         ByteOrder byte_order = sys->getGuestByteOrder();
         DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
-                "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()),
+                "%#x %c\n", label, sys->getRequestorName(pkt->req->
+                requestorId()), size, pkt->getAddr(),
                 size, pkt->getAddr(), pkt->getUintX(byte_order),
                 pkt->req->isUncacheable() ? 'U' : 'C');
         return;
     }
 #endif
     DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
-            label, sys->getMasterName(pkt->req->masterId()),
+            label, sys->getRequestorName(pkt->req->requestorId()),
             size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
     DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
 }
@@ -424,7 +425,7 @@
 
             assert(!pkt->req->isInstFetch());
             TRACE_PACKET("Read/Write");
-            stats.numOther[pkt->req->masterId()]++;
+            stats.numOther[pkt->req->requestorId()]++;
         }
     } else if (pkt->isRead()) {
         assert(!pkt->isWrite());
@@ -438,10 +439,10 @@
             pkt->setData(host_addr);
         }
         TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
-        stats.numReads[pkt->req->masterId()]++;
-        stats.bytesRead[pkt->req->masterId()] += pkt->getSize();
+        stats.numReads[pkt->req->requestorId()]++;
+        stats.bytesRead[pkt->req->requestorId()] += pkt->getSize();
         if (pkt->req->isInstFetch())
-            stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize();
+            stats.bytesInstRead[pkt->req->requestorId()] += pkt->getSize();
     } else if (pkt->isInvalidate() || pkt->isClean()) {
         assert(!pkt->isWrite());
         // in a fastmem system invalidating and/or cleaning packets
@@ -457,8 +458,8 @@
             }
             assert(!pkt->req->isInstFetch());
             TRACE_PACKET("Write");
-            stats.numWrites[pkt->req->masterId()]++;
-            stats.bytesWritten[pkt->req->masterId()] += pkt->getSize();
+            stats.numWrites[pkt->req->requestorId()]++;
+            stats.bytesWritten[pkt->req->requestorId()] += pkt->getSize();
         }
     } else {
         panic("Unexpected packet %s", pkt->print());
diff --git a/src/mem/abstract_mem.hh b/src/mem/abstract_mem.hh
index 616fd0e..fe41ddc 100644
--- a/src/mem/abstract_mem.hh
+++ b/src/mem/abstract_mem.hh
@@ -159,7 +159,7 @@
     }
 
     /** Pointer to the System object.
-     * This is used for getting the number of masters in the system which is
+     * This is used for getting the number of requestors in the system which is
      * needed when registering stats
      */
     System *_system;
diff --git a/src/mem/addr_mapper.cc b/src/mem/addr_mapper.cc
index 4facd57..08a5cac 100644
--- a/src/mem/addr_mapper.cc
+++ b/src/mem/addr_mapper.cc
@@ -39,25 +39,25 @@
 
 AddrMapper::AddrMapper(const AddrMapperParams* p)
     : SimObject(p),
-      masterPort(name() + "-master", *this),
-      slavePort(name() + "-slave", *this)
+      memSidePort(name() + "-mem_side_port", *this),
+      cpuSidePort(name() + "-cpu_side_port", *this)
 {
 }
 
 void
 AddrMapper::init()
 {
-    if (!slavePort.isConnected() || !masterPort.isConnected())
+    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
         fatal("Address mapper is not connected on both sides.\n");
 }
 
 Port &
 AddrMapper::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master") {
-        return masterPort;
-    } else if (if_name == "slave") {
-        return slavePort;
+    if (if_name == "mem_side_port") {
+        return memSidePort;
+    } else if (if_name == "cpu_side_port") {
+        return cpuSidePort;
     } else {
         return SimObject::getPort(if_name, idx);
     }
@@ -68,7 +68,7 @@
 {
     Addr orig_addr = pkt->getAddr();
     pkt->setAddr(remapAddr(orig_addr));
-    masterPort.sendFunctional(pkt);
+    memSidePort.sendFunctional(pkt);
     pkt->setAddr(orig_addr);
 }
 
@@ -77,7 +77,7 @@
 {
     Addr orig_addr = pkt->getAddr();
     pkt->setAddr(remapAddr(orig_addr));
-    slavePort.sendFunctionalSnoop(pkt);
+    cpuSidePort.sendFunctionalSnoop(pkt);
     pkt->setAddr(orig_addr);
 }
 
@@ -86,7 +86,7 @@
 {
     Addr orig_addr = pkt->getAddr();
     pkt->setAddr(remapAddr(orig_addr));
-    Tick ret_tick =  masterPort.sendAtomic(pkt);
+    Tick ret_tick =  memSidePort.sendAtomic(pkt);
     pkt->setAddr(orig_addr);
     return ret_tick;
 }
@@ -96,7 +96,7 @@
 {
     Addr orig_addr = pkt->getAddr();
     pkt->setAddr(remapAddr(orig_addr));
-    Tick ret_tick = slavePort.sendAtomicSnoop(pkt);
+    Tick ret_tick = cpuSidePort.sendAtomicSnoop(pkt);
     pkt->setAddr(orig_addr);
     return ret_tick;
 }
@@ -115,7 +115,7 @@
     pkt->setAddr(remapAddr(orig_addr));
 
     // Attempt to send the packet
-    bool successful = masterPort.sendTimingReq(pkt);
+    bool successful = memSidePort.sendTimingReq(pkt);
 
     // If not successful, restore the address and sender state
     if (!successful) {
@@ -147,7 +147,7 @@
     pkt->setAddr(receivedState->origAddr);
 
     // Attempt to send the packet
-    bool successful = slavePort.sendTimingResp(pkt);
+    bool successful = cpuSidePort.sendTimingResp(pkt);
 
     // If packet successfully sent, delete the sender state, otherwise
     // restore state
@@ -165,19 +165,19 @@
 void
 AddrMapper::recvTimingSnoopReq(PacketPtr pkt)
 {
-    slavePort.sendTimingSnoopReq(pkt);
+    cpuSidePort.sendTimingSnoopReq(pkt);
 }
 
 bool
 AddrMapper::recvTimingSnoopResp(PacketPtr pkt)
 {
-    return masterPort.sendTimingSnoopResp(pkt);
+    return memSidePort.sendTimingSnoopResp(pkt);
 }
 
 bool
 AddrMapper::isSnooping() const
 {
-    if (slavePort.isSnooping())
+    if (cpuSidePort.isSnooping())
         fatal("AddrMapper doesn't support remapping of snooping requests\n");
     return false;
 }
@@ -185,19 +185,19 @@
 void
 AddrMapper::recvReqRetry()
 {
-    slavePort.sendRetryReq();
+    cpuSidePort.sendRetryReq();
 }
 
 void
 AddrMapper::recvRespRetry()
 {
-    masterPort.sendRetryResp();
+    memSidePort.sendRetryResp();
 }
 
 void
 AddrMapper::recvRangeChange()
 {
-    slavePort.sendRangeChange();
+    cpuSidePort.sendRangeChange();
 }
 
 RangeAddrMapper::RangeAddrMapper(const RangeAddrMapperParams* p) :
diff --git a/src/mem/addr_mapper.hh b/src/mem/addr_mapper.hh
index 5e680a8..39db25a 100644
--- a/src/mem/addr_mapper.hh
+++ b/src/mem/addr_mapper.hh
@@ -45,10 +45,10 @@
 
 /**
  * An address mapper changes the packet addresses in going from the
- * slave port side of the mapper to the master port side. When the
- * slave port is queried for the address ranges, it also performs the
+ * response port side of the mapper to the request port side. When the
+ * response port is queried for the address ranges, it also performs the
  * necessary range updates. Note that snoop requests that travel from
- * the master port (i.e. the memory side) to the slave port are
+ * the request port (i.e. the memory side) to the response port are
  * currently not modified.
  */
 
@@ -98,12 +98,12 @@
 
     };
 
-    class MapperMasterPort : public RequestPort
+    class MapperRequestPort : public RequestPort
     {
 
       public:
 
-        MapperMasterPort(const std::string& _name, AddrMapper& _mapper)
+        MapperRequestPort(const std::string& _name, AddrMapper& _mapper)
             : RequestPort(_name, &_mapper), mapper(_mapper)
         { }
 
@@ -150,15 +150,15 @@
 
     };
 
-    /** Instance of master port, facing the memory side */
-    MapperMasterPort masterPort;
+    /** Instance of request port, facing the memory side */
+    MapperRequestPort memSidePort;
 
-    class MapperSlavePort : public ResponsePort
+    class MapperResponsePort : public ResponsePort
     {
 
       public:
 
-        MapperSlavePort(const std::string& _name, AddrMapper& _mapper)
+        MapperResponsePort(const std::string& _name, AddrMapper& _mapper)
             : ResponsePort(_name, &_mapper), mapper(_mapper)
         { }
 
@@ -200,8 +200,8 @@
 
     };
 
-    /** Instance of slave port, i.e. on the CPU side */
-    MapperSlavePort slavePort;
+    /** Instance of response port, i.e. on the CPU side */
+    MapperResponsePort cpuSidePort;
 
     void recvFunctional(PacketPtr pkt);
 
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc
index 3cf61f5..ae1b8ee 100644
--- a/src/mem/bridge.cc
+++ b/src/mem/bridge.cc
@@ -40,8 +40,8 @@
 
 /**
  * @file
- * Implementation of a memory-mapped bridge that connects a master
- * and a slave through a request and response queue.
+ * Implementation of a memory-mapped bridge that connects a requestor
+ * and a responder through a request and response queue.
  */
 
 #include "mem/bridge.hh"
@@ -50,23 +50,25 @@
 #include "debug/Bridge.hh"
 #include "params/Bridge.hh"
 
-Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
+Bridge::BridgeResponsePort::BridgeResponsePort(const std::string& _name,
                                          Bridge& _bridge,
-                                         BridgeMasterPort& _masterPort,
+                                         BridgeRequestPort& _memSidePort,
                                          Cycles _delay, int _resp_limit,
                                          std::vector<AddrRange> _ranges)
-    : ResponsePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort),
-      delay(_delay), ranges(_ranges.begin(), _ranges.end()),
+    : ResponsePort(_name, &_bridge), bridge(_bridge),
+      memSidePort(_memSidePort), delay(_delay),
+      ranges(_ranges.begin(), _ranges.end()),
       outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit),
       sendEvent([this]{ trySendTiming(); }, _name)
 {
 }
 
-Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
+Bridge::BridgeRequestPort::BridgeRequestPort(const std::string& _name,
                                            Bridge& _bridge,
-                                           BridgeSlavePort& _slavePort,
+                                           BridgeResponsePort& _cpuSidePort,
                                            Cycles _delay, int _req_limit)
-    : RequestPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort),
+    : RequestPort(_name, &_bridge), bridge(_bridge),
+      cpuSidePort(_cpuSidePort),
       delay(_delay), reqQueueLimit(_req_limit),
       sendEvent([this]{ trySendTiming(); }, _name)
 {
@@ -74,9 +76,9 @@
 
 Bridge::Bridge(Params *p)
     : ClockedObject(p),
-      slavePort(p->name + ".slave", *this, masterPort,
+      cpuSidePort(p->name + ".cpu_side_port", *this, memSidePort,
                 ticksToCycles(p->delay), p->resp_size, p->ranges),
-      masterPort(p->name + ".master", *this, slavePort,
+      memSidePort(p->name + ".mem_side_port", *this, cpuSidePort,
                  ticksToCycles(p->delay), p->req_size)
 {
 }
@@ -84,10 +86,10 @@
 Port &
 Bridge::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master")
-        return masterPort;
-    else if (if_name == "slave")
-        return slavePort;
+    if (if_name == "mem_side_port")
+        return memSidePort;
+    else if (if_name == "cpu_side_port")
+        return cpuSidePort;
     else
         // pass it along to our super class
         return ClockedObject::getPort(if_name, idx);
@@ -97,29 +99,29 @@
 Bridge::init()
 {
     // make sure both sides are connected and have the same block size
-    if (!slavePort.isConnected() || !masterPort.isConnected())
+    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
         fatal("Both ports of a bridge must be connected.\n");
 
-    // notify the master side  of our address ranges
-    slavePort.sendRangeChange();
+    // notify the request side  of our address ranges
+    cpuSidePort.sendRangeChange();
 }
 
 bool
-Bridge::BridgeSlavePort::respQueueFull() const
+Bridge::BridgeResponsePort::respQueueFull() const
 {
     return outstandingResponses == respQueueLimit;
 }
 
 bool
-Bridge::BridgeMasterPort::reqQueueFull() const
+Bridge::BridgeRequestPort::reqQueueFull() const
 {
     return transmitList.size() == reqQueueLimit;
 }
 
 bool
-Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
+Bridge::BridgeRequestPort::recvTimingResp(PacketPtr pkt)
 {
-    // all checks are done when the request is accepted on the slave
+    // all checks are done when the request is accepted on the response
     // side, so we are guaranteed to have space for the response
     DPRINTF(Bridge, "recvTimingResp: %s addr 0x%x\n",
             pkt->cmdString(), pkt->getAddr());
@@ -132,14 +134,14 @@
     Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
     pkt->headerDelay = pkt->payloadDelay = 0;
 
-    slavePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
+    cpuSidePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
                               receive_delay);
 
     return true;
 }
 
 bool
-Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
+Bridge::BridgeResponsePort::recvTimingReq(PacketPtr pkt)
 {
     DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
             pkt->cmdString(), pkt->getAddr());
@@ -157,7 +159,7 @@
             transmitList.size(), outstandingResponses);
 
     // if the request queue is full then there is no hope
-    if (masterPort.reqQueueFull()) {
+    if (memSidePort.reqQueueFull()) {
         DPRINTF(Bridge, "Request queue full\n");
         retryReq = true;
     } else {
@@ -186,20 +188,20 @@
             Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
             pkt->headerDelay = pkt->payloadDelay = 0;
 
-            masterPort.schedTimingReq(pkt, bridge.clockEdge(delay) +
+            memSidePort.schedTimingReq(pkt, bridge.clockEdge(delay) +
                                       receive_delay);
         }
     }
 
     // remember that we are now stalling a packet and that we have to
-    // tell the sending master to retry once space becomes available,
+    // tell the sending requestor to retry once space becomes available,
     // we make no distinction whether the stalling is due to the
     // request queue or response queue being full
     return !retryReq;
 }
 
 void
-Bridge::BridgeSlavePort::retryStalledReq()
+Bridge::BridgeResponsePort::retryStalledReq()
 {
     if (retryReq) {
         DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
@@ -209,7 +211,7 @@
 }
 
 void
-Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
+Bridge::BridgeRequestPort::schedTimingReq(PacketPtr pkt, Tick when)
 {
     // If we're about to put this packet at the head of the queue, we
     // need to schedule an event to do the transmit.  Otherwise there
@@ -226,7 +228,7 @@
 
 
 void
-Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
+Bridge::BridgeResponsePort::schedTimingResp(PacketPtr pkt, Tick when)
 {
     // If we're about to put this packet at the head of the queue, we
     // need to schedule an event to do the transmit.  Otherwise there
@@ -240,7 +242,7 @@
 }
 
 void
-Bridge::BridgeMasterPort::trySendTiming()
+Bridge::BridgeRequestPort::trySendTiming()
 {
     assert(!transmitList.empty());
 
@@ -270,7 +272,7 @@
         // then send a retry at this point, also note that if the
         // request we stalled was waiting for the response queue
         // rather than the request queue we might stall it again
-        slavePort.retryStalledReq();
+        cpuSidePort.retryStalledReq();
     }
 
     // if the send failed, then we try again once we receive a retry,
@@ -278,7 +280,7 @@
 }
 
 void
-Bridge::BridgeSlavePort::trySendTiming()
+Bridge::BridgeResponsePort::trySendTiming()
 {
     assert(!transmitList.empty());
 
@@ -310,7 +312,7 @@
         // if there is space in the request queue and we were stalling
         // a request, it will definitely be possible to accept it now
         // since there is guaranteed space in the response queue
-        if (!masterPort.reqQueueFull() && retryReq) {
+        if (!memSidePort.reqQueueFull() && retryReq) {
             DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
             retryReq = false;
             sendRetryReq();
@@ -322,28 +324,28 @@
 }
 
 void
-Bridge::BridgeMasterPort::recvReqRetry()
+Bridge::BridgeRequestPort::recvReqRetry()
 {
     trySendTiming();
 }
 
 void
-Bridge::BridgeSlavePort::recvRespRetry()
+Bridge::BridgeResponsePort::recvRespRetry()
 {
     trySendTiming();
 }
 
 Tick
-Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
+Bridge::BridgeResponsePort::recvAtomic(PacketPtr pkt)
 {
     panic_if(pkt->cacheResponding(), "Should not see packets where cache "
              "is responding");
 
-    return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt);
+    return delay * bridge.clockPeriod() + memSidePort.sendAtomic(pkt);
 }
 
 void
-Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt)
+Bridge::BridgeResponsePort::recvFunctional(PacketPtr pkt)
 {
     pkt->pushLabel(name());
 
@@ -355,19 +357,19 @@
         }
     }
 
-    // also check the master port's request queue
-    if (masterPort.trySatisfyFunctional(pkt)) {
+    // also check the request port's request queue
+    if (memSidePort.trySatisfyFunctional(pkt)) {
         return;
     }
 
     pkt->popLabel();
 
     // fall through if pkt still not satisfied
-    masterPort.sendFunctional(pkt);
+    memSidePort.sendFunctional(pkt);
 }
 
 bool
-Bridge::BridgeMasterPort::trySatisfyFunctional(PacketPtr pkt)
+Bridge::BridgeRequestPort::trySatisfyFunctional(PacketPtr pkt)
 {
     bool found = false;
     auto i = transmitList.begin();
@@ -384,7 +386,7 @@
 }
 
 AddrRangeList
-Bridge::BridgeSlavePort::getAddrRanges() const
+Bridge::BridgeResponsePort::getAddrRanges() const
 {
     return ranges;
 }
diff --git a/src/mem/bridge.hh b/src/mem/bridge.hh
index dca863d..2b03e13 100644
--- a/src/mem/bridge.hh
+++ b/src/mem/bridge.hh
@@ -40,8 +40,8 @@
 
 /**
  * @file
- * Declaration of a memory-mapped bridge that connects a master
- * and a slave through a request and response queue.
+ * Declaration of a memory-mapped bridge that connects a requestor
+ * and a responder through a request and response queue.
  */
 
 #ifndef __MEM_BRIDGE_HH__
@@ -56,11 +56,11 @@
 
 /**
  * A bridge is used to interface two different crossbars (or in general a
- * memory-mapped master and slave), with buffering for requests and
+ * memory-mapped requestor and responder), with buffering for requests and
  * responses. The bridge has a fixed delay for packets passing through
  * it and responds to a fixed set of address ranges.
  *
- * The bridge comprises a slave port and a master port, that buffer
+ * The bridge comprises a response port and a request port, that buffer
  * outgoing responses and requests respectively. Buffer space is
  * reserved when a request arrives, also reserving response space
  * before forwarding the request. If there is no space present, then
@@ -87,16 +87,16 @@
         { }
     };
 
-    // Forward declaration to allow the slave port to have a pointer
-    class BridgeMasterPort;
+    // Forward declaration to allow the response port to have a pointer
+    class BridgeRequestPort;
 
     /**
      * The port on the side that receives requests and sends
-     * responses. The slave port has a set of address ranges that it
-     * is responsible for. The slave port also has a buffer for the
+     * responses. The response port has a set of address ranges that it
+     * is responsible for. The response port also has a buffer for the
      * responses not yet sent.
      */
-    class BridgeSlavePort : public ResponsePort
+    class BridgeResponsePort : public ResponsePort
     {
 
       private:
@@ -105,9 +105,9 @@
         Bridge& bridge;
 
         /**
-         * Master port on the other side of the bridge.
+         * Request port on the other side of the bridge.
          */
-        BridgeMasterPort& masterPort;
+        BridgeRequestPort& memSidePort;
 
         /** Minimum request delay though this bridge. */
         const Cycles delay;
@@ -158,17 +158,18 @@
       public:
 
         /**
-         * Constructor for the BridgeSlavePort.
+         * Constructor for the BridgeResponsePort.
          *
          * @param _name the port name including the owner
          * @param _bridge the structural owner
-         * @param _masterPort the master port on the other side of the bridge
+         * @param _memSidePort the request port on the other
+         *                       side of the bridge
          * @param _delay the delay in cycles from receiving to sending
          * @param _resp_limit the size of the response queue
          * @param _ranges a number of address ranges to forward
          */
-        BridgeSlavePort(const std::string& _name, Bridge& _bridge,
-                        BridgeMasterPort& _masterPort, Cycles _delay,
+        BridgeResponsePort(const std::string& _name, Bridge& _bridge,
+                        BridgeRequestPort& _memSidePort, Cycles _delay,
                         int _resp_limit, std::vector<AddrRange> _ranges);
 
         /**
@@ -213,10 +214,10 @@
 
     /**
      * Port on the side that forwards requests and receives
-     * responses. The master port has a buffer for the requests not
+     * responses. The request port has a buffer for the requests not
      * yet sent.
      */
-    class BridgeMasterPort : public RequestPort
+    class BridgeRequestPort : public RequestPort
     {
 
       private:
@@ -225,9 +226,9 @@
         Bridge& bridge;
 
         /**
-         * The slave port on the other side of the bridge.
+         * The response port on the other side of the bridge.
          */
-        BridgeSlavePort& slavePort;
+        BridgeResponsePort& cpuSidePort;
 
         /** Minimum delay though this bridge. */
         const Cycles delay;
@@ -256,16 +257,17 @@
       public:
 
         /**
-         * Constructor for the BridgeMasterPort.
+         * Constructor for the BridgeRequestPort.
          *
          * @param _name the port name including the owner
          * @param _bridge the structural owner
-         * @param _slavePort the slave port on the other side of the bridge
+         * @param _cpuSidePort the response port on the other side of
+         * the bridge
          * @param _delay the delay in cycles from receiving to sending
          * @param _req_limit the size of the request queue
          */
-        BridgeMasterPort(const std::string& _name, Bridge& _bridge,
-                         BridgeSlavePort& _slavePort, Cycles _delay,
+        BridgeRequestPort(const std::string& _name, Bridge& _bridge,
+                         BridgeResponsePort& _cpuSidePort, Cycles _delay,
                          int _req_limit);
 
         /**
@@ -305,11 +307,11 @@
         void recvReqRetry();
     };
 
-    /** Slave port of the bridge. */
-    BridgeSlavePort slavePort;
+    /** Response port of the bridge. */
+    BridgeResponsePort cpuSidePort;
 
-    /** Master port of the bridge. */
-    BridgeMasterPort masterPort;
+    /** Request port of the bridge. */
+    BridgeRequestPort memSidePort;
 
   public:
 
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index a1fd151..c420714 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -63,10 +63,10 @@
 
 using namespace std;
 
-BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
+BaseCache::CacheResponsePort::CacheResponsePort(const std::string &_name,
                                           BaseCache *_cache,
                                           const std::string &_label)
-    : QueuedSlavePort(_name, _cache, queue),
+    : QueuedResponsePort(_name, _cache, queue),
       queue(*_cache, *this, true, _label),
       blocked(false), mustSendRetry(false),
       sendRetryEvent([this]{ processSendRetry(); }, _name)
@@ -75,8 +75,8 @@
 
 BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
     : ClockedObject(p),
-      cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
-      memSidePort(p->name + ".mem_side", this, "MemSidePort"),
+      cpuSidePort (p->name + ".cpu_side_port", this, "CpuSidePort"),
+      memSidePort(p->name + ".mem_side_port", this, "MemSidePort"),
       mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
       writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
       tags(p->tags),
@@ -114,7 +114,7 @@
     // buffer before committing to an MSHR
 
     // forward snoops is overridden in init() once we can query
-    // whether the connected master is actually snooping or not
+    // whether the connected requestor is actually snooping or not
 
     tempBlock = new TempCacheBlk(blkSize);
 
@@ -129,7 +129,7 @@
 }
 
 void
-BaseCache::CacheSlavePort::setBlocked()
+BaseCache::CacheResponsePort::setBlocked()
 {
     assert(!blocked);
     DPRINTF(CachePort, "Port is blocking new requests\n");
@@ -144,7 +144,7 @@
 }
 
 void
-BaseCache::CacheSlavePort::clearBlocked()
+BaseCache::CacheResponsePort::clearBlocked()
 {
     assert(blocked);
     DPRINTF(CachePort, "Port is accepting new requests\n");
@@ -156,7 +156,7 @@
 }
 
 void
-BaseCache::CacheSlavePort::processSendRetry()
+BaseCache::CacheResponsePort::processSendRetry()
 {
     DPRINTF(CachePort, "Port is sending retry\n");
 
@@ -270,8 +270,8 @@
                 DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
                         pkt->print());
 
-                assert(pkt->req->masterId() < system->maxMasters());
-                stats.cmdStats(pkt).mshr_hits[pkt->req->masterId()]++;
+                assert(pkt->req->requestorId() < system->maxRequestors());
+                stats.cmdStats(pkt).mshr_hits[pkt->req->requestorId()]++;
 
                 // We use forward_time here because it is the same
                 // considering new targets. We have multiple
@@ -294,8 +294,8 @@
         }
     } else {
         // no MSHR
-        assert(pkt->req->masterId() < system->maxMasters());
-        stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
+        assert(pkt->req->requestorId() < system->maxRequestors());
+        stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
 
         if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
             // We use forward_time here because there is an
@@ -441,13 +441,13 @@
     const QueueEntry::Target *initial_tgt = mshr->getTarget();
     const Tick miss_latency = curTick() - initial_tgt->recvTime;
     if (pkt->req->isUncacheable()) {
-        assert(pkt->req->masterId() < system->maxMasters());
+        assert(pkt->req->requestorId() < system->maxRequestors());
         stats.cmdStats(initial_tgt->pkt)
-            .mshr_uncacheable_lat[pkt->req->masterId()] += miss_latency;
+            .mshr_uncacheable_lat[pkt->req->requestorId()] += miss_latency;
     } else {
-        assert(pkt->req->masterId() < system->maxMasters());
+        assert(pkt->req->requestorId() < system->maxRequestors());
         stats.cmdStats(initial_tgt->pkt)
-            .mshr_miss_latency[pkt->req->masterId()] += miss_latency;
+            .mshr_miss_latency[pkt->req->requestorId()] += miss_latency;
     }
 
     PacketList writebacks;
@@ -774,8 +774,8 @@
                 !writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
                 // Update statistic on number of prefetches issued
                 // (hwpf_mshr_misses)
-                assert(pkt->req->masterId() < system->maxMasters());
-                stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
+                assert(pkt->req->requestorId() < system->maxRequestors());
+                stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
 
                 // allocate an MSHR and return it, note
                 // that we send the packet straight away, so do not
@@ -910,7 +910,7 @@
     // satisfying a string of Read and ReadEx requests from
     // upper-level caches, a Read will mark the block as shared but we
     // can satisfy a following ReadEx anyway since we can rely on the
-    // Read requester(s) to have buffered the ReadEx snoop and to
+    // Read requestor(s) to have buffered the ReadEx snoop and to
     // invalidate their blocks after receiving them.
     // assert(!pkt->needsWritable() || blk->isWritable());
     assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
@@ -1489,10 +1489,10 @@
                   "Writeback from read-only cache");
     assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
 
-    stats.writebacks[Request::wbMasterId]++;
+    stats.writebacks[Request::wbRequestorId]++;
 
     RequestPtr req = std::make_shared<Request>(
-        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+        regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
 
     if (blk->isSecure())
         req->setFlags(Request::SECURE);
@@ -1534,7 +1534,7 @@
 BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
 {
     RequestPtr req = std::make_shared<Request>(
-        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+        regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
 
     if (blk->isSecure()) {
         req->setFlags(Request::SECURE);
@@ -1607,7 +1607,7 @@
         assert(blk.isValid());
 
         RequestPtr request = std::make_shared<Request>(
-            regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
+            regenerateBlkAddr(&blk), blkSize, 0, Request::funcRequestorId);
 
         request->taskId(blk.task_id);
         if (blk.isSecure()) {
@@ -1869,121 +1869,121 @@
 
     Stats::Group::regStats();
     System *system = cache.system;
-    const auto max_masters = system->maxMasters();
+    const auto max_requestors = system->maxRequestors();
 
     hits
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        hits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        hits.subname(i, system->getRequestorName(i));
     }
 
     // Miss statistics
     misses
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        misses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        misses.subname(i, system->getRequestorName(i));
     }
 
     // Miss latency statistics
     missLatency
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        missLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        missLatency.subname(i, system->getRequestorName(i));
     }
 
     // access formulas
     accesses.flags(total | nozero | nonan);
     accesses = hits + misses;
-    for (int i = 0; i < max_masters; i++) {
-        accesses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        accesses.subname(i, system->getRequestorName(i));
     }
 
     // miss rate formulas
     missRate.flags(total | nozero | nonan);
     missRate = misses / accesses;
-    for (int i = 0; i < max_masters; i++) {
-        missRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        missRate.subname(i, system->getRequestorName(i));
     }
 
     // miss latency formulas
     avgMissLatency.flags(total | nozero | nonan);
     avgMissLatency = missLatency / misses;
-    for (int i = 0; i < max_masters; i++) {
-        avgMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        avgMissLatency.subname(i, system->getRequestorName(i));
     }
 
     // MSHR statistics
     // MSHR hit statistics
     mshr_hits
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        mshr_hits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_hits.subname(i, system->getRequestorName(i));
     }
 
     // MSHR miss statistics
     mshr_misses
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        mshr_misses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_misses.subname(i, system->getRequestorName(i));
     }
 
     // MSHR miss latency statistics
     mshr_miss_latency
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        mshr_miss_latency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_miss_latency.subname(i, system->getRequestorName(i));
     }
 
     // MSHR uncacheable statistics
     mshr_uncacheable
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        mshr_uncacheable.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_uncacheable.subname(i, system->getRequestorName(i));
     }
 
     // MSHR miss latency statistics
     mshr_uncacheable_lat
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        mshr_uncacheable_lat.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshr_uncacheable_lat.subname(i, system->getRequestorName(i));
     }
 
     // MSHR miss rate formulas
     mshrMissRate.flags(total | nozero | nonan);
     mshrMissRate = mshr_misses / accesses;
 
-    for (int i = 0; i < max_masters; i++) {
-        mshrMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        mshrMissRate.subname(i, system->getRequestorName(i));
     }
 
     // mshrMiss latency formulas
     avgMshrMissLatency.flags(total | nozero | nonan);
     avgMshrMissLatency = mshr_miss_latency / mshr_misses;
-    for (int i = 0; i < max_masters; i++) {
-        avgMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        avgMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
     // mshrUncacheable latency formulas
     avgMshrUncacheableLatency.flags(total | nozero | nonan);
     avgMshrUncacheableLatency = mshr_uncacheable_lat / mshr_uncacheable;
-    for (int i = 0; i < max_masters; i++) {
-        avgMshrUncacheableLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        avgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
     }
 }
 
@@ -2064,7 +2064,7 @@
     Stats::Group::regStats();
 
     System *system = cache.system;
-    const auto max_masters = system->maxMasters();
+    const auto max_requestors = system->maxRequestors();
 
     for (auto &cs : cmd)
         cs->regStatsFromParent();
@@ -2084,74 +2084,74 @@
 
     demandHits.flags(total | nozero | nonan);
     demandHits = SUM_DEMAND(hits);
-    for (int i = 0; i < max_masters; i++) {
-        demandHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandHits.subname(i, system->getRequestorName(i));
     }
 
     overallHits.flags(total | nozero | nonan);
     overallHits = demandHits + SUM_NON_DEMAND(hits);
-    for (int i = 0; i < max_masters; i++) {
-        overallHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallHits.subname(i, system->getRequestorName(i));
     }
 
     demandMisses.flags(total | nozero | nonan);
     demandMisses = SUM_DEMAND(misses);
-    for (int i = 0; i < max_masters; i++) {
-        demandMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMisses.subname(i, system->getRequestorName(i));
     }
 
     overallMisses.flags(total | nozero | nonan);
     overallMisses = demandMisses + SUM_NON_DEMAND(misses);
-    for (int i = 0; i < max_masters; i++) {
-        overallMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMisses.subname(i, system->getRequestorName(i));
     }
 
     demandMissLatency.flags(total | nozero | nonan);
     demandMissLatency = SUM_DEMAND(missLatency);
-    for (int i = 0; i < max_masters; i++) {
-        demandMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMissLatency.subname(i, system->getRequestorName(i));
     }
 
     overallMissLatency.flags(total | nozero | nonan);
     overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
-    for (int i = 0; i < max_masters; i++) {
-        overallMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMissLatency.subname(i, system->getRequestorName(i));
     }
 
     demandAccesses.flags(total | nozero | nonan);
     demandAccesses = demandHits + demandMisses;
-    for (int i = 0; i < max_masters; i++) {
-        demandAccesses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandAccesses.subname(i, system->getRequestorName(i));
     }
 
     overallAccesses.flags(total | nozero | nonan);
     overallAccesses = overallHits + overallMisses;
-    for (int i = 0; i < max_masters; i++) {
-        overallAccesses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallAccesses.subname(i, system->getRequestorName(i));
     }
 
     demandMissRate.flags(total | nozero | nonan);
     demandMissRate = demandMisses / demandAccesses;
-    for (int i = 0; i < max_masters; i++) {
-        demandMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMissRate.subname(i, system->getRequestorName(i));
     }
 
     overallMissRate.flags(total | nozero | nonan);
     overallMissRate = overallMisses / overallAccesses;
-    for (int i = 0; i < max_masters; i++) {
-        overallMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMissRate.subname(i, system->getRequestorName(i));
     }
 
     demandAvgMissLatency.flags(total | nozero | nonan);
     demandAvgMissLatency = demandMissLatency / demandMisses;
-    for (int i = 0; i < max_masters; i++) {
-        demandAvgMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandAvgMissLatency.subname(i, system->getRequestorName(i));
     }
 
     overallAvgMissLatency.flags(total | nozero | nonan);
     overallAvgMissLatency = overallMissLatency / overallMisses;
-    for (int i = 0; i < max_masters; i++) {
-        overallAvgMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallAvgMissLatency.subname(i, system->getRequestorName(i));
     }
 
     blocked_cycles.init(NUM_BLOCKED_CAUSES);
@@ -2176,55 +2176,55 @@
     unusedPrefetches.flags(nozero);
 
     writebacks
-        .init(max_masters)
+        .init(max_requestors)
         .flags(total | nozero | nonan)
         ;
-    for (int i = 0; i < max_masters; i++) {
-        writebacks.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        writebacks.subname(i, system->getRequestorName(i));
     }
 
     demandMshrHits.flags(total | nozero | nonan);
     demandMshrHits = SUM_DEMAND(mshr_hits);
-    for (int i = 0; i < max_masters; i++) {
-        demandMshrHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrHits.subname(i, system->getRequestorName(i));
     }
 
     overallMshrHits.flags(total | nozero | nonan);
     overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
-    for (int i = 0; i < max_masters; i++) {
-        overallMshrHits.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrHits.subname(i, system->getRequestorName(i));
     }
 
     demandMshrMisses.flags(total | nozero | nonan);
     demandMshrMisses = SUM_DEMAND(mshr_misses);
-    for (int i = 0; i < max_masters; i++) {
-        demandMshrMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrMisses.subname(i, system->getRequestorName(i));
     }
 
     overallMshrMisses.flags(total | nozero | nonan);
     overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
-    for (int i = 0; i < max_masters; i++) {
-        overallMshrMisses.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrMisses.subname(i, system->getRequestorName(i));
     }
 
     demandMshrMissLatency.flags(total | nozero | nonan);
     demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
-    for (int i = 0; i < max_masters; i++) {
-        demandMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
     overallMshrMissLatency.flags(total | nozero | nonan);
     overallMshrMissLatency =
         demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
-    for (int i = 0; i < max_masters; i++) {
-        overallMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
     overallMshrUncacheable.flags(total | nozero | nonan);
     overallMshrUncacheable =
         SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
-    for (int i = 0; i < max_masters; i++) {
-        overallMshrUncacheable.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrUncacheable.subname(i, system->getRequestorName(i));
     }
 
 
@@ -2232,39 +2232,39 @@
     overallMshrUncacheableLatency =
         SUM_DEMAND(mshr_uncacheable_lat) +
         SUM_NON_DEMAND(mshr_uncacheable_lat);
-    for (int i = 0; i < max_masters; i++) {
-        overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrUncacheableLatency.subname(i, system->getRequestorName(i));
     }
 
     demandMshrMissRate.flags(total | nozero | nonan);
     demandMshrMissRate = demandMshrMisses / demandAccesses;
-    for (int i = 0; i < max_masters; i++) {
-        demandMshrMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandMshrMissRate.subname(i, system->getRequestorName(i));
     }
 
     overallMshrMissRate.flags(total | nozero | nonan);
     overallMshrMissRate = overallMshrMisses / overallAccesses;
-    for (int i = 0; i < max_masters; i++) {
-        overallMshrMissRate.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallMshrMissRate.subname(i, system->getRequestorName(i));
     }
 
     demandAvgMshrMissLatency.flags(total | nozero | nonan);
     demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
-    for (int i = 0; i < max_masters; i++) {
-        demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        demandAvgMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
     overallAvgMshrMissLatency.flags(total | nozero | nonan);
     overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
-    for (int i = 0; i < max_masters; i++) {
-        overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallAvgMshrMissLatency.subname(i, system->getRequestorName(i));
     }
 
     overallAvgMshrUncacheableLatency.flags(total | nozero | nonan);
     overallAvgMshrUncacheableLatency =
         overallMshrUncacheableLatency / overallMshrUncacheable;
-    for (int i = 0; i < max_masters; i++) {
-        overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
+    for (int i = 0; i < max_requestors; i++) {
+        overallAvgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
     }
 
     dataExpansions.flags(nozero | nonan);
@@ -2291,7 +2291,7 @@
 
     assert(pkt->isResponse());
 
-    // Express snoop responses from master to slave, e.g., from L1 to L2
+    // Express snoop responses from requestor to responder, e.g., from L1 to L2
     cache->recvTimingSnoopResp(pkt);
     return true;
 }
@@ -2365,7 +2365,7 @@
 BaseCache::
 CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
                          const std::string &_label)
-    : CacheSlavePort(_name, _cache, _label), cache(_cache)
+    : CacheResponsePort(_name, _cache, _label), cache(_cache)
 {
 }
 
@@ -2452,7 +2452,7 @@
 BaseCache::MemSidePort::MemSidePort(const std::string &_name,
                                     BaseCache *_cache,
                                     const std::string &_label)
-    : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
+    : CacheRequestPort(_name, _cache, _reqQueue, _snoopRespQueue),
       _reqQueue(*_cache, *this, _snoopRespQueue, _label),
       _snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
 {
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index c129661..9986484 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -111,7 +111,7 @@
   protected:
 
     /**
-     * A cache master port is used for the memory-side port of the
+     * A cache request port is used for the memory-side port of the
      * cache, and in addition to the basic timing port that only sends
      * response packets through a transmit list, it also offers the
      * ability to schedule and send request packets (requests &
@@ -119,7 +119,7 @@
      * and the sendDeferredPacket of the timing port is modified to
      * consider both the transmit list and the requests from the MSHR.
      */
-    class CacheMasterPort : public QueuedMasterPort
+    class CacheRequestPort : public QueuedRequestPort
     {
 
       public:
@@ -136,10 +136,10 @@
 
       protected:
 
-        CacheMasterPort(const std::string &_name, BaseCache *_cache,
+        CacheRequestPort(const std::string &_name, BaseCache *_cache,
                         ReqPacketQueue &_reqQueue,
                         SnoopRespPacketQueue &_snoopRespQueue) :
-            QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
+            QueuedRequestPort(_name, _cache, _reqQueue, _snoopRespQueue)
         { }
 
         /**
@@ -202,10 +202,10 @@
 
 
     /**
-     * The memory-side port extends the base cache master port with
+     * The memory-side port extends the base cache request port with
      * access functions for functional, atomic and timing snoops.
      */
-    class MemSidePort : public CacheMasterPort
+    class MemSidePort : public CacheRequestPort
     {
       private:
 
@@ -234,14 +234,14 @@
     };
 
     /**
-     * A cache slave port is used for the CPU-side port of the cache,
+     * A cache response port is used for the CPU-side port of the cache,
      * and it is basically a simple timing port that uses a transmit
-     * list for responses to the CPU (or connected master). In
+     * list for responses to the CPU (or connected requestor). In
      * addition, it has the functionality to block the port for
      * incoming requests. If blocked, the port will issue a retry once
      * unblocked.
      */
-    class CacheSlavePort : public QueuedSlavePort
+    class CacheResponsePort : public QueuedResponsePort
     {
 
       public:
@@ -256,7 +256,7 @@
 
       protected:
 
-        CacheSlavePort(const std::string &_name, BaseCache *_cache,
+        CacheResponsePort(const std::string &_name, BaseCache *_cache,
                        const std::string &_label);
 
         /** A normal packet queue used to store responses. */
@@ -275,10 +275,10 @@
     };
 
     /**
-     * The CPU-side port extends the base cache slave port with access
+     * The CPU-side port extends the base cache response port with access
      * functions for functional, atomic and timing requests.
      */
-    class CpuSidePort : public CacheSlavePort
+    class CpuSidePort : public CacheResponsePort
     {
       private:
 
@@ -1154,7 +1154,7 @@
 
     /**
      * Marks the access path of the cache as blocked for the given cause. This
-     * also sets the blocked flag in the slave interface.
+     * also sets the blocked flag in the response interface.
      * @param cause The reason for the cache blocking.
      */
     void setBlocked(BlockedCause cause)
@@ -1219,8 +1219,8 @@
 
     void incMissCount(PacketPtr pkt)
     {
-        assert(pkt->req->masterId() < system->maxMasters());
-        stats.cmdStats(pkt).misses[pkt->req->masterId()]++;
+        assert(pkt->req->requestorId() < system->maxRequestors());
+        stats.cmdStats(pkt).misses[pkt->req->requestorId()]++;
         pkt->req->incAccessDepth();
         if (missCount) {
             --missCount;
@@ -1230,8 +1230,8 @@
     }
     void incHitCount(PacketPtr pkt)
     {
-        assert(pkt->req->masterId() < system->maxMasters());
-        stats.cmdStats(pkt).hits[pkt->req->masterId()]++;
+        assert(pkt->req->requestorId() < system->maxRequestors());
+        stats.cmdStats(pkt).hits[pkt->req->requestorId()]++;
     }
 
     /**
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 6fb6f11..b4f4238 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -96,7 +96,7 @@
             } else if (blk->isWritable() && !pending_downgrade &&
                        !pkt->hasSharers() &&
                        pkt->cmd != MemCmd::ReadCleanReq) {
-                // we can give the requester a writable copy on a read
+                // we can give the requestor a writable copy on a read
                 // request if:
                 // - we have a writable copy at this level (& below)
                 // - we don't have a pending snoop from below
@@ -326,7 +326,7 @@
         // should have flushed and have no valid block
         assert(!blk || !blk->isValid());
 
-        stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++;
+        stats.cmdStats(pkt).mshr_uncacheable[pkt->req->requestorId()]++;
 
         if (pkt->isWrite()) {
             allocateWriteBuffer(pkt, forward_time);
@@ -371,9 +371,9 @@
         if (!mshr) {
             // copy the request and create a new SoftPFReq packet
             RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
-                                                       pkt->req->getSize(),
-                                                       pkt->req->getFlags(),
-                                                       pkt->req->masterId());
+                                                    pkt->req->getSize(),
+                                                    pkt->req->getFlags(),
+                                                    pkt->req->requestorId());
             pf = new Packet(req, pkt->cmd);
             pf->allocate();
             assert(pf->matchAddr(pkt));
@@ -774,9 +774,9 @@
 
                 assert(!tgt_pkt->req->isUncacheable());
 
-                assert(tgt_pkt->req->masterId() < system->maxMasters());
+                assert(tgt_pkt->req->requestorId() < system->maxRequestors());
                 stats.cmdStats(tgt_pkt)
-                    .missLatency[tgt_pkt->req->masterId()] +=
+                    .missLatency[tgt_pkt->req->requestorId()] +=
                     completion_time - target.recvTime;
             } else if (pkt->cmd == MemCmd::UpgradeFailResp) {
                 // failed StoreCond upgrade
@@ -912,7 +912,7 @@
 
     // Creating a zero sized write, a message to the snoop filter
     RequestPtr req = std::make_shared<Request>(
-        regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+        regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
 
     if (blk->isSecure())
         req->setFlags(Request::SECURE);
@@ -1006,7 +1006,7 @@
     if (forwardSnoops) {
         // first propagate snoop upward to see if anyone above us wants to
         // handle it.  save & restore packet src since it will get
-        // rewritten to be relative to cpu-side bus (if any)
+        // rewritten to be relative to CPU-side bus (if any)
         if (is_timing) {
             // copy the packet so that we can clear any flags before
             // forwarding it upwards, we also allocate data (passing
@@ -1026,7 +1026,7 @@
 
             // If this request is a prefetch or clean evict and an upper level
             // signals block present, make sure to propagate the block
-            // presence to the requester.
+            // presence to the requestor.
             if (snoopPkt.isBlockCached()) {
                 pkt->setBlockCached();
             }
@@ -1044,7 +1044,7 @@
             cpuSidePort.sendAtomicSnoop(pkt);
             if (!already_responded && pkt->cacheResponding()) {
                 // cache-to-cache response from some upper cache:
-                // forward response to original requester
+                // forward response to original requestor
                 assert(pkt->isResponse());
             }
         }
diff --git a/src/mem/cache/cache_blk.cc b/src/mem/cache/cache_blk.cc
index c4730ca..4d7e408 100644
--- a/src/mem/cache/cache_blk.cc
+++ b/src/mem/cache/cache_blk.cc
@@ -44,7 +44,7 @@
 
 void
 CacheBlk::insert(const Addr tag, const bool is_secure,
-                 const int src_master_ID, const uint32_t task_ID)
+                 const int src_requestor_ID, const uint32_t task_ID)
 {
     // Make sure that the block has been properly invalidated
     assert(status == 0);
@@ -53,7 +53,7 @@
     this->tag = tag;
 
     // Set source requestor ID
-    srcMasterId = src_master_ID;
+    srcRequestorId = src_requestor_ID;
 
     // Set task ID
     task_id = task_ID;
diff --git a/src/mem/cache/cache_blk.hh b/src/mem/cache/cache_blk.hh
index 427d7bb..99f8545 100644
--- a/src/mem/cache/cache_blk.hh
+++ b/src/mem/cache/cache_blk.hh
@@ -114,7 +114,7 @@
     unsigned refCount;
 
     /** holds the source requestor ID for this block. */
-    int srcMasterId;
+    int srcRequestorId;
 
     /**
      * Tick on which the block was inserted in the cache. Its value is only
@@ -215,7 +215,7 @@
         status = 0;
         whenReady = MaxTick;
         refCount = 0;
-        srcMasterId = Request::invldMasterId;
+        srcRequestorId = Request::invldRequestorId;
         lockList.clear();
     }
 
@@ -296,11 +296,11 @@
      *
      * @param tag Block address tag.
      * @param is_secure Whether the block is in secure space or not.
-     * @param src_master_ID The source requestor ID.
+     * @param src_requestor_ID The source requestor ID.
      * @param task_ID The new task ID.
      */
     virtual void insert(const Addr tag, const bool is_secure,
-                        const int src_master_ID, const uint32_t task_ID);
+                        const int src_requestor_ID, const uint32_t task_ID);
 
     /**
      * Track the fact that a local locked was issued to the
@@ -469,7 +469,8 @@
     }
 
     void insert(const Addr addr, const bool is_secure,
-                const int src_master_ID=0, const uint32_t task_ID=0) override
+                const int src_requestor_ID=0, const uint32_t task_ID=0)
+                override
     {
         // Make sure that the block has been properly invalidated
         assert(status == 0);
diff --git a/src/mem/cache/noncoherent_cache.cc b/src/mem/cache/noncoherent_cache.cc
index 01c7072..5ca1da0 100644
--- a/src/mem/cache/noncoherent_cache.cc
+++ b/src/mem/cache/noncoherent_cache.cc
@@ -228,7 +228,7 @@
 NoncoherentCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
 {
     panic_if(!from_cpu_side, "Non-coherent cache received functional snoop"
-             " request\n");
+            " request\n");
 
     BaseCache::functionalAccess(pkt, from_cpu_side);
 }
@@ -269,8 +269,8 @@
             completion_time += clockEdge(responseLatency) +
                 (transfer_offset ? pkt->payloadDelay : 0);
 
-            assert(tgt_pkt->req->masterId() < system->maxMasters());
-            stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->masterId()] +=
+            assert(tgt_pkt->req->requestorId() < system->maxRequestors());
+            stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->requestorId()] +=
                 completion_time - target.recvTime;
 
             tgt_pkt->makeTimingResponse();
diff --git a/src/mem/cache/prefetch/Prefetcher.py b/src/mem/cache/prefetch/Prefetcher.py
index c163028..758803f 100644
--- a/src/mem/cache/prefetch/Prefetcher.py
+++ b/src/mem/cache/prefetch/Prefetcher.py
@@ -163,7 +163,7 @@
     confidence_threshold = Param.Percent(50,
         "Prefetch generation confidence threshold")
 
-    use_master_id = Param.Bool(True, "Use master id based history")
+    use_requestor_id = Param.Bool(True, "Use requestor id based history")
 
     degree = Param.Int(4, "Number of prefetches to generate")
 
diff --git a/src/mem/cache/prefetch/base.cc b/src/mem/cache/prefetch/base.cc
index 4e484e5..a35be33 100644
--- a/src/mem/cache/prefetch/base.cc
+++ b/src/mem/cache/prefetch/base.cc
@@ -57,7 +57,7 @@
 
 Base::PrefetchInfo::PrefetchInfo(PacketPtr pkt, Addr addr, bool miss)
   : address(addr), pc(pkt->req->hasPC() ? pkt->req->getPC() : 0),
-    masterId(pkt->req->masterId()), validPC(pkt->req->hasPC()),
+    requestorId(pkt->req->requestorId()), validPC(pkt->req->hasPC()),
     secure(pkt->isSecure()), size(pkt->req->getSize()), write(pkt->isWrite()),
     paddress(pkt->req->getPaddr()), cacheMiss(miss)
 {
@@ -72,9 +72,10 @@
 }
 
 Base::PrefetchInfo::PrefetchInfo(PrefetchInfo const &pfi, Addr addr)
-  : address(addr), pc(pfi.pc), masterId(pfi.masterId), validPC(pfi.validPC),
-    secure(pfi.secure), size(pfi.size), write(pfi.write),
-    paddress(pfi.paddress), cacheMiss(pfi.cacheMiss), data(nullptr)
+  : address(addr), pc(pfi.pc), requestorId(pfi.requestorId),
+    validPC(pfi.validPC), secure(pfi.secure), size(pfi.size),
+    write(pfi.write), paddress(pfi.paddress), cacheMiss(pfi.cacheMiss),
+    data(nullptr)
 {
 }
 
@@ -92,7 +93,8 @@
     : ClockedObject(p), listeners(), cache(nullptr), blkSize(p->block_size),
       lBlkSize(floorLog2(blkSize)), onMiss(p->on_miss), onRead(p->on_read),
       onWrite(p->on_write), onData(p->on_data), onInst(p->on_inst),
-      masterId(p->sys->getMasterId(this)), pageBytes(p->sys->getPageBytes()),
+      requestorId(p->sys->getRequestorId(this)),
+      pageBytes(p->sys->getPageBytes()),
       prefetchOnAccess(p->prefetch_on_access),
       useVirtualAddresses(p->use_virtual_addresses),
       prefetchStats(this), issuedPrefetches(0),
diff --git a/src/mem/cache/prefetch/base.hh b/src/mem/cache/prefetch/base.hh
index 4afacf0..cb52b57 100644
--- a/src/mem/cache/prefetch/base.hh
+++ b/src/mem/cache/prefetch/base.hh
@@ -93,7 +93,7 @@
         /** The program counter that generated this address. */
         Addr pc;
         /** The requestor ID that generated this address. */
-        MasterID masterId;
+        RequestorID requestorId;
         /** Validity bit for the PC of this address. */
         bool validPC;
         /** Whether this address targets the secure memory space. */
@@ -151,9 +151,9 @@
          * Gets the requestor ID that generated this address
          * @return the requestor ID that generated this address
          */
-        MasterID getMasterId() const
+        RequestorID getRequestorId() const
         {
-            return masterId;
+            return requestorId;
         }
 
         /**
@@ -280,7 +280,7 @@
     const bool onInst;
 
     /** Request id for prefetches */
-    const MasterID masterId;
+    const RequestorID requestorId;
 
     const Addr pageBytes;
 
diff --git a/src/mem/cache/prefetch/queued.cc b/src/mem/cache/prefetch/queued.cc
index 6b89c2b..90491a7 100644
--- a/src/mem/cache/prefetch/queued.cc
+++ b/src/mem/cache/prefetch/queued.cc
@@ -51,10 +51,12 @@
 
 void
 Queued::DeferredPacket::createPkt(Addr paddr, unsigned blk_size,
-                                            MasterID mid, bool tag_prefetch,
+                                            RequestorID requestor_id,
+                                            bool tag_prefetch,
                                             Tick t) {
     /* Create a prefetch memory request */
-    RequestPtr req = std::make_shared<Request>(paddr, blk_size, 0, mid);
+    RequestPtr req = std::make_shared<Request>(paddr, blk_size,
+                                                0, requestor_id);
 
     if (pfInfo.isSecure()) {
         req->setFlags(Request::SECURE);
@@ -277,7 +279,7 @@
         } else {
             Tick pf_time = curTick() + clockPeriod() * latency;
             it->createPkt(it->translationRequest->getPaddr(), blkSize,
-                    masterId, tagPrefetch, pf_time);
+                    requestorId, tagPrefetch, pf_time);
             addToQueue(pfq, *it);
         }
     } else {
@@ -328,7 +330,7 @@
                                         PacketPtr pkt)
 {
     RequestPtr translation_req = std::make_shared<Request>(
-            addr, blkSize, pkt->req->getFlags(), masterId, pfi.getPC(),
+            addr, blkSize, pkt->req->getFlags(), requestorId, pfi.getPC(),
             pkt->req->contextId());
     translation_req->setFlags(Request::PREFETCH);
     return translation_req;
@@ -417,7 +419,8 @@
     DeferredPacket dpp(this, new_pfi, 0, priority);
     if (has_target_pa) {
         Tick pf_time = curTick() + clockPeriod() * latency;
-        dpp.createPkt(target_paddr, blkSize, masterId, tagPrefetch, pf_time);
+        dpp.createPkt(target_paddr, blkSize, requestorId, tagPrefetch,
+                      pf_time);
         DPRINTF(HWPrefetch, "Prefetch queued. "
                 "addr:%#x priority: %3d tick:%lld.\n",
                 new_pfi.getAddr(), priority, pf_time);
diff --git a/src/mem/cache/prefetch/queued.hh b/src/mem/cache/prefetch/queued.hh
index 96cf311..0627c5c 100644
--- a/src/mem/cache/prefetch/queued.hh
+++ b/src/mem/cache/prefetch/queued.hh
@@ -101,12 +101,13 @@
          * Create the associated memory packet
          * @param paddr physical address of this packet
          * @param blk_size block size used by the prefetcher
-         * @param mid Requester ID of the access that generated this prefetch
+         * @param requestor_id Requestor ID of the access that generated
+         * this prefetch
          * @param tag_prefetch flag to indicate if the packet needs to be
          *        tagged
          * @param t time when the prefetch becomes ready
          */
-        void createPkt(Addr paddr, unsigned blk_size, MasterID mid,
+        void createPkt(Addr paddr, unsigned blk_size, RequestorID requestor_id,
                        bool tag_prefetch, Tick t);
 
         /**
diff --git a/src/mem/cache/prefetch/stride.cc b/src/mem/cache/prefetch/stride.cc
index 36773c6..9b58943 100644
--- a/src/mem/cache/prefetch/stride.cc
+++ b/src/mem/cache/prefetch/stride.cc
@@ -77,7 +77,7 @@
   : Queued(p),
     initConfidence(p->confidence_counter_bits, p->initial_confidence),
     threshConf(p->confidence_threshold/100.0),
-    useMasterId(p->use_master_id),
+    useRequestorId(p->use_requestor_id),
     degree(p->degree),
     pcTableInfo(p->table_assoc, p->table_entries, p->table_indexing_policy,
         p->table_replacement_policy)
@@ -124,10 +124,10 @@
     Addr pf_addr = pfi.getAddr();
     Addr pc = pfi.getPC();
     bool is_secure = pfi.isSecure();
-    MasterID master_id = useMasterId ? pfi.getMasterId() : 0;
+    RequestorID requestor_id = useRequestorId ? pfi.getRequestorId() : 0;
 
     // Get corresponding pc table
-    PCTable* pcTable = findTable(master_id);
+    PCTable* pcTable = findTable(requestor_id);
 
     // Search for entry in the pc table
     StrideEntry *entry = pcTable->findEntry(pc, is_secure);
diff --git a/src/mem/cache/prefetch/stride.hh b/src/mem/cache/prefetch/stride.hh
index 72e335b..13215c3 100644
--- a/src/mem/cache/prefetch/stride.hh
+++ b/src/mem/cache/prefetch/stride.hh
@@ -94,7 +94,7 @@
     /** Confidence threshold for prefetch generation. */
     const double threshConf;
 
-    const bool useMasterId;
+    const bool useRequestorId;
 
     const int degree;
 
diff --git a/src/mem/cache/tags/base.cc b/src/mem/cache/tags/base.cc
index faad7be..32c6d29 100644
--- a/src/mem/cache/tags/base.cc
+++ b/src/mem/cache/tags/base.cc
@@ -105,12 +105,12 @@
     // to insert the new one
 
     // Deal with what we are bringing in
-    MasterID master_id = pkt->req->masterId();
-    assert(master_id < system->maxMasters());
-    stats.occupancies[master_id]++;
+    RequestorID requestor_id = pkt->req->requestorId();
+    assert(requestor_id < system->maxRequestors());
+    stats.occupancies[requestor_id]++;
 
-    // Insert block with tag, src master id and task id
-    blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), master_id,
+    // Insert block with tag, src requestor id and task id
+    blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), requestor_id,
                 pkt->req->taskId());
 
     // Check if cache warm up is done
@@ -240,16 +240,16 @@
     avgRefs = totalRefs / sampledRefs;
 
     occupancies
-        .init(system->maxMasters())
+        .init(system->maxRequestors())
         .flags(nozero | nonan)
         ;
-    for (int i = 0; i < system->maxMasters(); i++) {
-        occupancies.subname(i, system->getMasterName(i));
+    for (int i = 0; i < system->maxRequestors(); i++) {
+        occupancies.subname(i, system->getRequestorName(i));
     }
 
     avgOccs.flags(nozero | total);
-    for (int i = 0; i < system->maxMasters(); i++) {
-        avgOccs.subname(i, system->getMasterName(i));
+    for (int i = 0; i < system->maxRequestors(); i++) {
+        avgOccs.subname(i, system->getRequestorName(i));
     }
 
     avgOccs = occupancies / Stats::constant(tags.numBlocks);
diff --git a/src/mem/cache/tags/base.hh b/src/mem/cache/tags/base.hh
index 5f02462..5e0af20 100644
--- a/src/mem/cache/tags/base.hh
+++ b/src/mem/cache/tags/base.hh
@@ -253,7 +253,7 @@
         assert(blk);
         assert(blk->isValid());
 
-        stats.occupancies[blk->srcMasterId]--;
+        stats.occupancies[blk->srcRequestorId]--;
         stats.totalRefs += blk->refCount;
         stats.sampledRefs++;
 
diff --git a/src/mem/cache/tags/sector_blk.cc b/src/mem/cache/tags/sector_blk.cc
index cea3d65..e914cef 100644
--- a/src/mem/cache/tags/sector_blk.cc
+++ b/src/mem/cache/tags/sector_blk.cc
@@ -92,7 +92,7 @@
 
 void
 SectorSubBlk::insert(const Addr tag, const bool is_secure,
-                     const int src_master_ID, const uint32_t task_ID)
+                     const int src_requestor_ID, const uint32_t task_ID)
 {
     // Make sure it is not overwriting another sector
     panic_if((_sectorBlk && _sectorBlk->isValid()) &&
@@ -100,7 +100,7 @@
               (_sectorBlk->isSecure() != is_secure)),
               "Overwriting valid sector!");
 
-    CacheBlk::insert(tag, is_secure, src_master_ID, task_ID);
+    CacheBlk::insert(tag, is_secure, src_requestor_ID, task_ID);
 
     // Set sector tag
     _sectorBlk->setTag(tag);
diff --git a/src/mem/cache/tags/sector_blk.hh b/src/mem/cache/tags/sector_blk.hh
index 0a9087e..5538aa1 100644
--- a/src/mem/cache/tags/sector_blk.hh
+++ b/src/mem/cache/tags/sector_blk.hh
@@ -122,11 +122,11 @@
      *
      * @param tag Block address tag.
      * @param is_secure Whether the block is in secure space or not.
-     * @param src_master_ID The source requestor ID.
+     * @param src_requestor_ID The source requestor ID.
      * @param task_ID The new task ID.
      */
-    void insert(const Addr tag, const bool is_secure, const int src_master_ID,
-                const uint32_t task_ID) override;
+    void insert(const Addr tag, const bool is_secure, const int
+                src_requestor_ID, const uint32_t task_ID) override;
 
     /**
      * Pretty-print sector offset and other CacheBlk information.
diff --git a/src/mem/coherent_xbar.cc b/src/mem/coherent_xbar.cc
index c5778fb..037bd32 100644
--- a/src/mem/coherent_xbar.cc
+++ b/src/mem/coherent_xbar.cc
@@ -63,27 +63,27 @@
       snoopTraffic(this, "snoopTraffic", "Total snoop traffic (bytes)"),
       snoopFanout(this, "snoop_fanout", "Request fanout histogram")
 {
-    // create the ports based on the size of the master and slave
-    // vector ports, and the presence of the default port, the ports
-    // are enumerated starting from zero
-    for (int i = 0; i < p->port_master_connection_count; ++i) {
-        std::string portName = csprintf("%s.master[%d]", name(), i);
-        RequestPort* bp = new CoherentXBarMasterPort(portName, *this, i);
-        masterPorts.push_back(bp);
+    // create the ports based on the size of the memory-side port and
+    // CPU-side port vector ports, and the presence of the default port,
+    // the ports are enumerated starting from zero
+    for (int i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
+        std::string portName = csprintf("%s.mem_side_port[%d]", name(), i);
+        RequestPort* bp = new CoherentXBarRequestPort(portName, *this, i);
+        memSidePorts.push_back(bp);
         reqLayers.push_back(new ReqLayer(*bp, *this,
                                          csprintf("reqLayer%d", i)));
         snoopLayers.push_back(
                 new SnoopRespLayer(*bp, *this, csprintf("snoopLayer%d", i)));
     }
 
-    // see if we have a default slave device connected and if so add
-    // our corresponding master port
+    // see if we have a default CPU-side-port device connected and if so add
+    // our corresponding memory-side port
     if (p->port_default_connection_count) {
-        defaultPortID = masterPorts.size();
+        defaultPortID = memSidePorts.size();
         std::string portName = name() + ".default";
-        RequestPort* bp = new CoherentXBarMasterPort(portName, *this,
+        RequestPort* bp = new CoherentXBarRequestPort(portName, *this,
                                                     defaultPortID);
-        masterPorts.push_back(bp);
+        memSidePorts.push_back(bp);
         reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d",
                                          defaultPortID)));
         snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
@@ -91,11 +91,12 @@
                                                           defaultPortID)));
     }
 
-    // create the slave ports, once again starting at zero
-    for (int i = 0; i < p->port_slave_connection_count; ++i) {
-        std::string portName = csprintf("%s.slave[%d]", name(), i);
-        QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
-        slavePorts.push_back(bp);
+    // create the CPU-side ports, once again starting at zero
+    for (int i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
+        std::string portName = csprintf("%s.cpu_side_port[%d]", name(), i);
+        QueuedResponsePort* bp = new CoherentXBarResponsePort(portName,
+                                                            *this, i);
+        cpuSidePorts.push_back(bp);
         respLayers.push_back(new RespLayer(*bp, *this,
                                            csprintf("respLayer%d", i)));
         snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
@@ -119,12 +120,13 @@
 {
     BaseXBar::init();
 
-    // iterate over our slave ports and determine which of our
-    // neighbouring master ports are snooping and add them as snoopers
-    for (const auto& p: slavePorts) {
-        // check if the connected master port is snooping
+    // iterate over our CPU-side ports and determine which of our
+    // neighbouring memory-side ports are snooping and add them as snoopers
+    for (const auto& p: cpuSidePorts) {
+        // check if the connected memory-side port is snooping
         if (p->isSnooping()) {
-            DPRINTF(AddrRanges, "Adding snooping master %s\n", p->getPeer());
+            DPRINTF(AddrRanges, "Adding snooping requestor %s\n",
+                    p->getPeer());
             snoopPorts.push_back(p);
         }
     }
@@ -132,17 +134,17 @@
     if (snoopPorts.empty())
         warn("CoherentXBar %s has no snooping ports attached!\n", name());
 
-    // inform the snoop filter about the slave ports so it can create
+    // inform the snoop filter about the CPU-side ports so it can create
     // its own internal representation
     if (snoopFilter)
-        snoopFilter->setSlavePorts(slavePorts);
+        snoopFilter->setCPUSidePorts(cpuSidePorts);
 }
 
 bool
-CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
+CoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id)
 {
     // determine the source port based on the id
-    ResponsePort *src_port = slavePorts[slave_port_id];
+    ResponsePort *src_port = cpuSidePorts[cpu_side_port_id];
 
     // remember if the packet is an express snoop
     bool is_express_snoop = pkt->isExpressSnoop();
@@ -152,11 +154,12 @@
     assert(is_express_snoop == cache_responding);
 
     // determine the destination based on the destination address range
-    PortID master_port_id = findPort(pkt->getAddrRange());
+    PortID mem_side_port_id = findPort(pkt->getAddrRange());
 
     // test if the crossbar should be considered occupied for the current
     // port, and exclude express snoops from the check
-    if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
+    if (!is_express_snoop &&
+        !reqLayers[mem_side_port_id]->tryTiming(src_port)) {
         DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
                 src_port->name(), pkt->print());
         return false;
@@ -197,12 +200,12 @@
             // before snooping we need to make sure that the memory
             // below is not busy and the cache clean request can be
             // forwarded to it
-            if (!masterPorts[master_port_id]->tryTiming(pkt)) {
+            if (!memSidePorts[mem_side_port_id]->tryTiming(pkt)) {
                 DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
                         src_port->name(), pkt->print());
 
                 // update the layer state and schedule an idle event
-                reqLayers[master_port_id]->failedTiming(src_port,
+                reqLayers[mem_side_port_id]->failedTiming(src_port,
                                                         clockEdge(Cycles(1)));
                 return false;
             }
@@ -231,10 +234,10 @@
                 if (!sf_res.first.empty())
                     pkt->setBlockCached();
             } else {
-                forwardTiming(pkt, slave_port_id, sf_res.first);
+                forwardTiming(pkt, cpu_side_port_id, sf_res.first);
             }
         } else {
-            forwardTiming(pkt, slave_port_id);
+            forwardTiming(pkt, cpu_side_port_id);
         }
 
         // add the snoop delay to our header delay, and then reset it
@@ -282,7 +285,7 @@
             }
 
             // since it is a normal request, attempt to send the packet
-            success = masterPorts[master_port_id]->sendTimingReq(pkt);
+            success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt);
         } else {
             // no need to forward, turn this packet around and respond
             // directly
@@ -311,7 +314,7 @@
                 src_port->name(), pkt->print());
 
         // update the layer state and schedule an idle event
-        reqLayers[master_port_id]->failedTiming(src_port,
+        reqLayers[mem_side_port_id]->failedTiming(src_port,
                                                 clockEdge(Cycles(1)));
     } else {
         // express snoops currently bypass the crossbar state entirely
@@ -333,7 +336,7 @@
             // remember where to route the normal response to
             if (expect_response || expect_snoop_resp) {
                 assert(routeTo.find(pkt->req) == routeTo.end());
-                routeTo[pkt->req] = slave_port_id;
+                routeTo[pkt->req] = cpu_side_port_id;
 
                 panic_if(routeTo.size() > maxRoutingTableSizeCheck,
                          "%s: Routing table exceeds %d packets\n",
@@ -341,12 +344,12 @@
             }
 
             // update the layer state and schedule an idle event
-            reqLayers[master_port_id]->succeededTiming(packetFinishTime);
+            reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime);
         }
 
         // stats updates only consider packets that were successfully sent
-        pktCount[slave_port_id][master_port_id]++;
-        pktSize[slave_port_id][master_port_id] += pkt_size;
+        pktCount[cpu_side_port_id][mem_side_port_id]++;
+        pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
         transDist[pkt_cmd]++;
 
         if (is_express_snoop) {
@@ -361,7 +364,7 @@
 
     // normally we respond to the packet we just received if we need to
     PacketPtr rsp_pkt = pkt;
-    PortID rsp_port_id = slave_port_id;
+    PortID rsp_port_id = cpu_side_port_id;
 
     // If this is the destination of the cache clean operation the
     // crossbar is responsible for responding. This crossbar will
@@ -401,7 +404,7 @@
             outstandingCMO.emplace(pkt->id, deferred_rsp);
             if (!pkt->isWrite()) {
                 assert(routeTo.find(pkt->req) == routeTo.end());
-                routeTo[pkt->req] = slave_port_id;
+                routeTo[pkt->req] = cpu_side_port_id;
 
                 panic_if(routeTo.size() > maxRoutingTableSizeCheck,
                          "%s: Routing table exceeds %d packets\n",
@@ -419,7 +422,7 @@
 
         if (snoopFilter && !system->bypassCaches()) {
             // let the snoop filter inspect the response and update its state
-            snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
+            snoopFilter->updateResponse(rsp_pkt, *cpuSidePorts[rsp_port_id]);
         }
 
         // we send the response after the current packet, even if the
@@ -429,28 +432,28 @@
         Tick response_time = clockEdge() + pkt->headerDelay;
         rsp_pkt->headerDelay = 0;
 
-        slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
+        cpuSidePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
     }
 
     return success;
 }
 
 bool
-CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvTimingResp(PacketPtr pkt, PortID mem_side_port_id)
 {
     // determine the source port based on the id
-    RequestPort *src_port = masterPorts[master_port_id];
+    RequestPort *src_port = memSidePorts[mem_side_port_id];
 
     // determine the destination
     const auto route_lookup = routeTo.find(pkt->req);
     assert(route_lookup != routeTo.end());
-    const PortID slave_port_id = route_lookup->second;
-    assert(slave_port_id != InvalidPortID);
-    assert(slave_port_id < respLayers.size());
+    const PortID cpu_side_port_id = route_lookup->second;
+    assert(cpu_side_port_id != InvalidPortID);
+    assert(cpu_side_port_id < respLayers.size());
 
     // test if the crossbar should be considered occupied for the
     // current port
-    if (!respLayers[slave_port_id]->tryTiming(src_port)) {
+    if (!respLayers[cpu_side_port_id]->tryTiming(src_port)) {
         DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
                 src_port->name(), pkt->print());
         return false;
@@ -475,33 +478,34 @@
 
     if (snoopFilter && !system->bypassCaches()) {
         // let the snoop filter inspect the response and update its state
-        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
+        snoopFilter->updateResponse(pkt, *cpuSidePorts[cpu_side_port_id]);
     }
 
-    // send the packet through the destination slave port and pay for
+    // send the packet through the destination CPU-side port and pay for
     // any outstanding header delay
     Tick latency = pkt->headerDelay;
     pkt->headerDelay = 0;
-    slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
+    cpuSidePorts[cpu_side_port_id]->schedTimingResp(pkt, curTick()
+                                        + latency);
 
     // remove the request from the routing table
     routeTo.erase(route_lookup);
 
-    respLayers[slave_port_id]->succeededTiming(packetFinishTime);
+    respLayers[cpu_side_port_id]->succeededTiming(packetFinishTime);
 
     // stats updates
-    pktCount[slave_port_id][master_port_id]++;
-    pktSize[slave_port_id][master_port_id] += pkt_size;
+    pktCount[cpu_side_port_id][mem_side_port_id]++;
+    pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
     transDist[pkt_cmd]++;
 
     return true;
 }
 
 void
-CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID mem_side_port_id)
 {
     DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
-            masterPorts[master_port_id]->name(), pkt->print());
+            memSidePorts[mem_side_port_id]->name(), pkt->print());
 
     // update stats here as we know the forwarding will succeed
     unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
@@ -530,8 +534,8 @@
         // of the snoop filter
         pkt->headerDelay += sf_res.second * clockPeriod();
         DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
-                __func__, masterPorts[master_port_id]->name(), pkt->print(),
-                sf_res.first.size(), sf_res.second);
+                __func__, memSidePorts[mem_side_port_id]->name(),
+                pkt->print(), sf_res.first.size(), sf_res.second);
 
         // forward to all snoopers
         forwardTiming(pkt, InvalidPortID, sf_res.first);
@@ -546,22 +550,22 @@
     // if we can expect a response, remember how to route it
     if (!cache_responding && pkt->cacheResponding()) {
         assert(routeTo.find(pkt->req) == routeTo.end());
-        routeTo[pkt->req] = master_port_id;
+        routeTo[pkt->req] = mem_side_port_id;
     }
 
-    // a snoop request came from a connected slave device (one of
-    // our master ports), and if it is not coming from the slave
+    // a snoop request came from a connected CPU-side-port device (one of
+    // our memory-side ports), and if it is not coming from the CPU-side-port
     // device responsible for the address range something is
     // wrong, hence there is nothing further to do as the packet
     // would be going back to where it came from
-    assert(findPort(pkt->getAddrRange()) == master_port_id);
+    assert(findPort(pkt->getAddrRange()) == mem_side_port_id);
 }
 
 bool
-CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
+CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id)
 {
     // determine the source port based on the id
-    ResponsePort* src_port = slavePorts[slave_port_id];
+    ResponsePort* src_port = cpuSidePorts[cpu_side_port_id];
 
     // get the destination
     const auto route_lookup = routeTo.find(pkt->req);
@@ -588,8 +592,8 @@
             return false;
         }
     } else {
-        // get the master port that mirrors this slave port internally
-        RequestPort* snoop_port = snoopRespPorts[slave_port_id];
+        // get the memory-side port that mirrors this CPU-side port internally
+        RequestPort* snoop_port = snoopRespPorts[cpu_side_port_id];
         assert(dest_port_id < respLayers.size());
         if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
             DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
@@ -629,44 +633,48 @@
 
         if (snoopFilter) {
             // update the probe filter so that it can properly track the line
-            snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
-                                            *masterPorts[dest_port_id]);
+            snoopFilter->updateSnoopForward(pkt,
+                            *cpuSidePorts[cpu_side_port_id],
+                            *memSidePorts[dest_port_id]);
         }
 
         bool success M5_VAR_USED =
-            masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
-        pktCount[slave_port_id][dest_port_id]++;
-        pktSize[slave_port_id][dest_port_id] += pkt_size;
+            memSidePorts[dest_port_id]->sendTimingSnoopResp(pkt);
+        pktCount[cpu_side_port_id][dest_port_id]++;
+        pktSize[cpu_side_port_id][dest_port_id] += pkt_size;
         assert(success);
 
         snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
     } else {
-        // we got a snoop response on one of our slave ports,
-        // i.e. from a coherent master connected to the crossbar, and
+        // we got a snoop response on one of our CPU-side ports,
+        // i.e. from a coherent requestor connected to the crossbar, and
         // since we created the snoop request as part of recvTiming,
         // this should now be a normal response again
         outstandingSnoop.erase(pkt->req);
 
-        // this is a snoop response from a coherent master, hence it
+        // this is a snoop response from a coherent requestor, hence it
         // should never go back to where the snoop response came from,
         // but instead to where the original request came from
-        assert(slave_port_id != dest_port_id);
+        assert(cpu_side_port_id != dest_port_id);
 
         if (snoopFilter) {
-            // update the probe filter so that it can properly track the line
-            snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
-                                    *slavePorts[dest_port_id]);
+            // update the probe filter so that it can properly track
+            // the line
+            snoopFilter->updateSnoopResponse(pkt,
+                        *cpuSidePorts[cpu_side_port_id],
+                        *cpuSidePorts[dest_port_id]);
         }
 
         DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
                 src_port->name(), pkt->print());
 
-        // as a normal response, it should go back to a master through
-        // one of our slave ports, we also pay for any outstanding
+        // as a normal response, it should go back to a requestor through
+        // one of our CPU-side ports, we also pay for any outstanding
         // header latency
         Tick latency = pkt->headerDelay;
         pkt->headerDelay = 0;
-        slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
+        cpuSidePorts[dest_port_id]->schedTimingResp(pkt,
+                                    curTick() + latency);
 
         respLayers[dest_port_id]->succeededTiming(packetFinishTime);
     }
@@ -684,8 +692,8 @@
 
 
 void
-CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
-                           const std::vector<QueuedSlavePort*>& dests)
+CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id,
+                           const std::vector<QueuedResponsePort*>& dests)
 {
     DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
 
@@ -695,12 +703,12 @@
     unsigned fanout = 0;
 
     for (const auto& p: dests) {
-        // we could have gotten this request from a snooping master
-        // (corresponding to our own slave port that is also in
+        // we could have gotten this request from a snooping requestor
+        // (corresponding to our own CPU-side port that is also in
         // snoopPorts) and should not send it back to where it came
         // from
-        if (exclude_slave_port_id == InvalidPortID ||
-            p->getId() != exclude_slave_port_id) {
+        if (exclude_cpu_side_port_id == InvalidPortID ||
+            p->getId() != exclude_cpu_side_port_id) {
             // cache is not allowed to refuse snoop
             p->sendTimingSnoopReq(pkt);
             fanout++;
@@ -712,20 +720,20 @@
 }
 
 void
-CoherentXBar::recvReqRetry(PortID master_port_id)
+CoherentXBar::recvReqRetry(PortID mem_side_port_id)
 {
     // responses and snoop responses never block on forwarding them,
     // so the retry will always be coming from a port to which we
     // tried to forward a request
-    reqLayers[master_port_id]->recvRetry();
+    reqLayers[mem_side_port_id]->recvRetry();
 }
 
 Tick
-CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
                                  MemBackdoorPtr *backdoor)
 {
     DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
-            slavePorts[slave_port_id]->name(), pkt->print());
+            cpuSidePorts[cpu_side_port_id]->name(), pkt->print());
 
     unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
     unsigned int pkt_cmd = pkt->cmdToIndex();
@@ -747,11 +755,12 @@
         if (snoopFilter) {
             // check with the snoop filter where to forward this packet
             auto sf_res =
-                snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
+                snoopFilter->lookupRequest(pkt,
+                *cpuSidePorts [cpu_side_port_id]);
             snoop_response_latency += sf_res.second * clockPeriod();
             DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
-                    __func__, slavePorts[slave_port_id]->name(), pkt->print(),
-                    sf_res.first.size(), sf_res.second);
+                    __func__, cpuSidePorts[cpu_side_port_id]->name(),
+                    pkt->print(), sf_res.first.size(), sf_res.second);
 
             // let the snoop filter know about the success of the send
             // operation, and do it even before sending it onwards to
@@ -768,11 +777,11 @@
                 if (!sf_res.first.empty())
                     pkt->setBlockCached();
             } else {
-                snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
-                                             sf_res.first);
+                snoop_result = forwardAtomic(pkt, cpu_side_port_id,
+                                            InvalidPortID, sf_res.first);
             }
         } else {
-            snoop_result = forwardAtomic(pkt, slave_port_id);
+            snoop_result = forwardAtomic(pkt, cpu_side_port_id);
         }
         snoop_response_cmd = snoop_result.first;
         snoop_response_latency += snoop_result.second;
@@ -785,7 +794,7 @@
 
     // even if we had a snoop response, we must continue and also
     // perform the actual request at the destination
-    PortID master_port_id = findPort(pkt->getAddrRange());
+    PortID mem_side_port_id = findPort(pkt->getAddrRange());
 
     if (sink_packet) {
         DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
@@ -800,10 +809,10 @@
             }
 
             // forward the request to the appropriate destination
-            auto master = masterPorts[master_port_id];
+            auto mem_side_port = memSidePorts[mem_side_port_id];
             response_latency = backdoor ?
-                master->sendAtomicBackdoor(pkt, *backdoor) :
-                master->sendAtomic(pkt);
+                mem_side_port->sendAtomicBackdoor(pkt, *backdoor) :
+                mem_side_port->sendAtomic(pkt);
         } else {
             // if it does not need a response we sink the packet above
             assert(pkt->needsResponse());
@@ -813,14 +822,14 @@
     }
 
     // stats updates for the request
-    pktCount[slave_port_id][master_port_id]++;
-    pktSize[slave_port_id][master_port_id] += pkt_size;
+    pktCount[cpu_side_port_id][mem_side_port_id]++;
+    pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
     transDist[pkt_cmd]++;
 
 
     // if lower levels have replied, tell the snoop filter
     if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
-        snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
+        snoopFilter->updateResponse(pkt, *cpuSidePorts[cpu_side_port_id]);
     }
 
     // if we got a response from a snooper, restore it here
@@ -861,8 +870,8 @@
         pkt_cmd = pkt->cmdToIndex();
 
         // stats updates
-        pktCount[slave_port_id][master_port_id]++;
-        pktSize[slave_port_id][master_port_id] += pkt_size;
+        pktCount[cpu_side_port_id][mem_side_port_id]++;
+        pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
         transDist[pkt_cmd]++;
     }
 
@@ -872,10 +881,10 @@
 }
 
 Tick
-CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID mem_side_port_id)
 {
     DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
-            masterPorts[master_port_id]->name(), pkt->print());
+            memSidePorts[mem_side_port_id]->name(), pkt->print());
 
     // add the request snoop data
     unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
@@ -889,9 +898,9 @@
         auto sf_res = snoopFilter->lookupSnoop(pkt);
         snoop_response_latency += sf_res.second * clockPeriod();
         DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
-                __func__, masterPorts[master_port_id]->name(), pkt->print(),
-                sf_res.first.size(), sf_res.second);
-        snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
+                __func__, memSidePorts[mem_side_port_id]->name(),
+                pkt->print(), sf_res.first.size(), sf_res.second);
+        snoop_result = forwardAtomic(pkt, InvalidPortID, mem_side_port_id,
                                      sf_res.first);
     } else {
         snoop_result = forwardAtomic(pkt, InvalidPortID);
@@ -913,9 +922,9 @@
 }
 
 std::pair<MemCmd, Tick>
-CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
-                           PortID source_master_port_id,
-                           const std::vector<QueuedSlavePort*>& dests)
+CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_cpu_side_port_id,
+                           PortID source_mem_side_port_id,
+                           const std::vector<QueuedResponsePort*>& dests)
 {
     // the packet may be changed on snoops, record the original
     // command to enable us to restore it between snoops so that
@@ -930,12 +939,12 @@
     unsigned fanout = 0;
 
     for (const auto& p: dests) {
-        // we could have gotten this request from a snooping master
-        // (corresponding to our own slave port that is also in
+        // we could have gotten this request from a snooping memory-side port
+        // (corresponding to our own CPU-side port that is also in
         // snoopPorts) and should not send it back to where it came
         // from
-        if (exclude_slave_port_id != InvalidPortID &&
-            p->getId() == exclude_slave_port_id)
+        if (exclude_cpu_side_port_id != InvalidPortID &&
+            p->getId() == exclude_cpu_side_port_id)
             continue;
 
         Tick latency = p->sendAtomicSnoop(pkt);
@@ -959,16 +968,16 @@
         if (snoopFilter) {
             // Handle responses by the snoopers and differentiate between
             // responses to requests from above and snoops from below
-            if (source_master_port_id != InvalidPortID) {
+            if (source_mem_side_port_id != InvalidPortID) {
                 // Getting a response for a snoop from below
-                assert(exclude_slave_port_id == InvalidPortID);
+                assert(exclude_cpu_side_port_id == InvalidPortID);
                 snoopFilter->updateSnoopForward(pkt, *p,
-                             *masterPorts[source_master_port_id]);
+                             *memSidePorts[source_mem_side_port_id]);
             } else {
                 // Getting a response for a request from above
-                assert(source_master_port_id == InvalidPortID);
+                assert(source_mem_side_port_id == InvalidPortID);
                 snoopFilter->updateSnoopResponse(pkt, *p,
-                             *slavePorts[exclude_slave_port_id]);
+                             *cpuSidePorts[exclude_cpu_side_port_id]);
             }
         }
         // restore original packet state for remaining snoopers
@@ -984,24 +993,25 @@
 }
 
 void
-CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
+CoherentXBar::recvFunctional(PacketPtr pkt, PortID cpu_side_port_id)
 {
     if (!pkt->isPrint()) {
         // don't do DPRINTFs on PrintReq as it clutters up the output
         DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
-                slavePorts[slave_port_id]->name(), pkt->print());
+                cpuSidePorts[cpu_side_port_id]->name(), pkt->print());
     }
 
     if (!system->bypassCaches()) {
         // forward to all snoopers but the source
-        forwardFunctional(pkt, slave_port_id);
+        forwardFunctional(pkt, cpu_side_port_id);
     }
 
     // there is no need to continue if the snooping has found what we
     // were looking for and the packet is already a response
     if (!pkt->isResponse()) {
-        // since our slave ports are queued ports we need to check them as well
-        for (const auto& p : slavePorts) {
+        // since our CPU-side ports are queued ports we need to check
+        // them as well
+        for (const auto& p : cpuSidePorts) {
             // if we find a response that has the data, then the
             // downstream caches/memories may be out of date, so simply stop
             // here
@@ -1014,20 +1024,20 @@
 
         PortID dest_id = findPort(pkt->getAddrRange());
 
-        masterPorts[dest_id]->sendFunctional(pkt);
+        memSidePorts[dest_id]->sendFunctional(pkt);
     }
 }
 
 void
-CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID mem_side_port_id)
 {
     if (!pkt->isPrint()) {
         // don't do DPRINTFs on PrintReq as it clutters up the output
         DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
-                masterPorts[master_port_id]->name(), pkt->print());
+                memSidePorts[mem_side_port_id]->name(), pkt->print());
     }
 
-    for (const auto& p : slavePorts) {
+    for (const auto& p : cpuSidePorts) {
         if (p->trySatisfyFunctional(pkt)) {
             if (pkt->needsResponse())
                 pkt->makeResponse();
@@ -1040,18 +1050,18 @@
 }
 
 void
-CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
+CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_cpu_side_port_id)
 {
     // snoops should only happen if the system isn't bypassing caches
     assert(!system->bypassCaches());
 
     for (const auto& p: snoopPorts) {
-        // we could have gotten this request from a snooping master
-        // (corresponding to our own slave port that is also in
+        // we could have gotten this request from a snooping requestor
+        // (corresponding to our own CPU-side port that is also in
         // snoopPorts) and should not send it back to where it came
         // from
-        if (exclude_slave_port_id == InvalidPortID ||
-            p->getId() != exclude_slave_port_id)
+        if (exclude_cpu_side_port_id == InvalidPortID ||
+            p->getId() != exclude_cpu_side_port_id)
             p->sendFunctionalSnoop(pkt);
 
         // if we get a response we are done
diff --git a/src/mem/coherent_xbar.hh b/src/mem/coherent_xbar.hh
index 300fc0c..81e2dc4 100644
--- a/src/mem/coherent_xbar.hh
+++ b/src/mem/coherent_xbar.hh
@@ -55,7 +55,7 @@
 
 /**
  * A coherent crossbar connects a number of (potentially) snooping
- * masters and slaves, and routes the request and response packets
+ * requestors and responders, and routes the request and response packets
  * based on the address, and also forwards all requests to the
  * snoopers and deals with the snoop responses.
  *
@@ -78,11 +78,11 @@
     std::vector<SnoopRespLayer*> snoopLayers;
 
     /**
-     * Declaration of the coherent crossbar slave port type, one will
-     * be instantiated for each of the master ports connecting to the
+     * Declaration of the coherent crossbar CPU-side port type, one will
+     * be instantiated for each of the mem_side_ports connecting to the
      * crossbar.
      */
-    class CoherentXBarSlavePort : public QueuedSlavePort
+    class CoherentXBarResponsePort : public QueuedResponsePort
     {
 
       private:
@@ -95,9 +95,9 @@
 
       public:
 
-        CoherentXBarSlavePort(const std::string &_name,
+        CoherentXBarResponsePort(const std::string &_name,
                              CoherentXBar &_xbar, PortID _id)
-            : QueuedSlavePort(_name, &_xbar, queue, _id), xbar(_xbar),
+            : QueuedResponsePort(_name, &_xbar, queue, _id), xbar(_xbar),
               queue(_xbar, *this)
         { }
 
@@ -142,11 +142,11 @@
     };
 
     /**
-     * Declaration of the coherent crossbar master port type, one will be
-     * instantiated for each of the slave interfaces connecting to the
+     * Declaration of the coherent crossbar memory-side port type, one will be
+     * instantiated for each of the CPU-side-port interfaces connecting to the
      * crossbar.
      */
-    class CoherentXBarMasterPort : public RequestPort
+    class CoherentXBarRequestPort : public RequestPort
     {
       private:
         /** A reference to the crossbar to which this port belongs. */
@@ -154,7 +154,7 @@
 
       public:
 
-        CoherentXBarMasterPort(const std::string &_name,
+        CoherentXBarRequestPort(const std::string &_name,
                               CoherentXBar &_xbar, PortID _id)
             : RequestPort(_name, &_xbar, _id), xbar(_xbar)
         { }
@@ -163,7 +163,7 @@
 
         /**
          * Determine if this port should be considered a snooper. For
-         * a coherent crossbar master port this is always true.
+         * a coherent crossbar memory-side port this is always true.
          *
          * @return a boolean that is true if this port is snooping
          */
@@ -200,8 +200,8 @@
 
     /**
      * Internal class to bridge between an incoming snoop response
-     * from a slave port and forwarding it through an outgoing slave
-     * port. It is effectively a dangling master port.
+     * from a CPU-side port and forwarding it through an outgoing
+     * CPU-side port. It is effectively a dangling memory-side port.
      */
     class SnoopRespPort : public RequestPort
     {
@@ -209,26 +209,27 @@
       private:
 
         /** The port which we mirror internally. */
-        QueuedSlavePort& slavePort;
+        QueuedResponsePort& cpuSidePort;
 
       public:
 
         /**
-         * Create a snoop response port that mirrors a given slave port.
+         * Create a snoop response port that mirrors a given CPU-side port.
          */
-        SnoopRespPort(QueuedSlavePort& slave_port, CoherentXBar& _xbar) :
-            RequestPort(slave_port.name() + ".snoopRespPort", &_xbar),
-            slavePort(slave_port) { }
+        SnoopRespPort(QueuedResponsePort& cpu_side_port,
+                      CoherentXBar& _xbar) :
+            RequestPort(cpu_side_port.name() + ".snoopRespPort", &_xbar),
+            cpuSidePort(cpu_side_port) { }
 
         /**
          * Override the sending of retries and pass them on through
-         * the mirrored slave port.
+         * the mirrored CPU-side port.
          */
         void
         sendRetryResp() override
         {
             // forward it as a snoop response retry
-            slavePort.sendRetrySnoopResp();
+            cpuSidePort.sendRetrySnoopResp();
         }
 
         void
@@ -247,7 +248,7 @@
 
     std::vector<SnoopRespPort*> snoopRespPorts;
 
-    std::vector<QueuedSlavePort*> snoopPorts;
+    std::vector<QueuedResponsePort*> snoopPorts;
 
     /**
      * Store the outstanding requests that we are expecting snoop
@@ -294,94 +295,95 @@
      */
     std::unique_ptr<Packet> pendingDelete;
 
-    bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
-    bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
-    void recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id);
-    bool recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id);
-    void recvReqRetry(PortID master_port_id);
+    bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id);
+    bool recvTimingResp(PacketPtr pkt, PortID mem_side_port_id);
+    void recvTimingSnoopReq(PacketPtr pkt, PortID mem_side_port_id);
+    bool recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id);
+    void recvReqRetry(PortID mem_side_port_id);
 
     /**
      * Forward a timing packet to our snoopers, potentially excluding
-     * one of the connected coherent masters to avoid sending a packet
+     * one of the connected coherent requestors to avoid sending a packet
      * back to where it came from.
      *
      * @param pkt Packet to forward
-     * @param exclude_slave_port_id Id of slave port to exclude
+     * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
      */
     void
-    forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id)
+    forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id)
     {
-        forwardTiming(pkt, exclude_slave_port_id, snoopPorts);
+        forwardTiming(pkt, exclude_cpu_side_port_id, snoopPorts);
     }
 
     /**
      * Forward a timing packet to a selected list of snoopers, potentially
-     * excluding one of the connected coherent masters to avoid sending a packet
-     * back to where it came from.
+     * excluding one of the connected coherent requestors to avoid sending
+     * a packet back to where it came from.
      *
      * @param pkt Packet to forward
-     * @param exclude_slave_port_id Id of slave port to exclude
+     * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
      * @param dests Vector of destination ports for the forwarded pkt
      */
-    void forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
-                       const std::vector<QueuedSlavePort*>& dests);
+    void forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id,
+                       const std::vector<QueuedResponsePort*>& dests);
 
-    Tick recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+    Tick recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
                             MemBackdoorPtr *backdoor=nullptr);
-    Tick recvAtomicSnoop(PacketPtr pkt, PortID master_port_id);
+    Tick recvAtomicSnoop(PacketPtr pkt, PortID mem_side_port_id);
 
     /**
      * Forward an atomic packet to our snoopers, potentially excluding
-     * one of the connected coherent masters to avoid sending a packet
+     * one of the connected coherent requestors to avoid sending a packet
      * back to where it came from.
      *
      * @param pkt Packet to forward
-     * @param exclude_slave_port_id Id of slave port to exclude
+     * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
      *
      * @return a pair containing the snoop response and snoop latency
      */
     std::pair<MemCmd, Tick>
-    forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id)
+    forwardAtomic(PacketPtr pkt, PortID exclude_cpu_side_port_id)
     {
-        return forwardAtomic(pkt, exclude_slave_port_id, InvalidPortID,
+        return forwardAtomic(pkt, exclude_cpu_side_port_id, InvalidPortID,
                              snoopPorts);
     }
 
     /**
      * Forward an atomic packet to a selected list of snoopers, potentially
-     * excluding one of the connected coherent masters to avoid sending a packet
-     * back to where it came from.
+     * excluding one of the connected coherent requestors to avoid sending a
+     * packet back to where it came from.
      *
      * @param pkt Packet to forward
-     * @param exclude_slave_port_id Id of slave port to exclude
-     * @param source_master_port_id Id of the master port for snoops from below
+     * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
+     * @param source_mem_side_port_id Id of the memory-side port for
+     * snoops from below
      * @param dests Vector of destination ports for the forwarded pkt
      *
      * @return a pair containing the snoop response and snoop latency
      */
     std::pair<MemCmd, Tick> forwardAtomic(PacketPtr pkt,
-                                          PortID exclude_slave_port_id,
-                                          PortID source_master_port_id,
-                                          const std::vector<QueuedSlavePort*>&
+                                          PortID exclude_cpu_side_port_id,
+                                          PortID source_mem_side_port_id,
+                                          const std::vector<QueuedResponsePort*>&
                                           dests);
 
-    /** Function called by the port when the crossbar is recieving a Functional
+    /** Function called by the port when the crossbar is receiving a Functional
         transaction.*/
-    void recvFunctional(PacketPtr pkt, PortID slave_port_id);
+    void recvFunctional(PacketPtr pkt, PortID cpu_side_port_id);
 
-    /** Function called by the port when the crossbar is recieving a functional
+    /** Function called by the port when the crossbar is receiving a functional
         snoop transaction.*/
-    void recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id);
+    void recvFunctionalSnoop(PacketPtr pkt, PortID mem_side_port_id);
 
     /**
      * Forward a functional packet to our snoopers, potentially
-     * excluding one of the connected coherent masters to avoid
+     * excluding one of the connected coherent requestors to avoid
      * sending a packet back to where it came from.
      *
      * @param pkt Packet to forward
-     * @param exclude_slave_port_id Id of slave port to exclude
+     * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
      */
-    void forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id);
+    void forwardFunctional(PacketPtr pkt, PortID exclude_cpu_side_port_id);
 
     /**
      * Determine if the crossbar should sink the packet, as opposed to
diff --git a/src/mem/comm_monitor.cc b/src/mem/comm_monitor.cc
index e9a5d2c..14df955 100644
--- a/src/mem/comm_monitor.cc
+++ b/src/mem/comm_monitor.cc
@@ -45,8 +45,8 @@
 
 CommMonitor::CommMonitor(Params* params)
     : SimObject(params),
-      masterPort(name() + "-master", *this),
-      slavePort(name() + "-slave", *this),
+      memSidePort(name() + "-mem_side_port", *this),
+      cpuSidePort(name() + "-cpu_side_port", *this),
       samplePeriodicEvent([this]{ samplePeriodic(); }, name()),
       samplePeriodTicks(params->sample_period),
       samplePeriod(params->sample_period / SimClock::Float::s),
@@ -67,7 +67,7 @@
 CommMonitor::init()
 {
     // make sure both sides of the monitor are connected
-    if (!slavePort.isConnected() || !masterPort.isConnected())
+    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
         fatal("Communication monitor is not connected on both sides.\n");
 }
 
@@ -81,10 +81,10 @@
 Port &
 CommMonitor::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master") {
-        return masterPort;
-    } else if (if_name == "slave") {
-        return slavePort;
+    if (if_name == "mem_side_port") {
+        return memSidePort;
+    } else if (if_name == "cpu_side_port") {
+        return cpuSidePort;
     } else {
         return SimObject::getPort(if_name, idx);
     }
@@ -93,13 +93,13 @@
 void
 CommMonitor::recvFunctional(PacketPtr pkt)
 {
-    masterPort.sendFunctional(pkt);
+    memSidePort.sendFunctional(pkt);
 }
 
 void
 CommMonitor::recvFunctionalSnoop(PacketPtr pkt)
 {
-    slavePort.sendFunctionalSnoop(pkt);
+    cpuSidePort.sendFunctionalSnoop(pkt);
 }
 
 CommMonitor::MonitorStats::MonitorStats(Stats::Group *parent,
@@ -344,7 +344,7 @@
     ProbePoints::PacketInfo req_pkt_info(pkt);
     ppPktReq->notify(req_pkt_info);
 
-    const Tick delay(masterPort.sendAtomic(pkt));
+    const Tick delay(memSidePort.sendAtomic(pkt));
 
     stats.updateReqStats(req_pkt_info, true, expects_response);
     if (expects_response)
@@ -360,7 +360,7 @@
 Tick
 CommMonitor::recvAtomicSnoop(PacketPtr pkt)
 {
-    return slavePort.sendAtomicSnoop(pkt);
+    return cpuSidePort.sendAtomicSnoop(pkt);
 }
 
 bool
@@ -385,7 +385,7 @@
     }
 
     // Attempt to send the packet
-    bool successful = masterPort.sendTimingReq(pkt);
+    bool successful = memSidePort.sendTimingReq(pkt);
 
     // If not successful, restore the sender state
     if (!successful && expects_response && !stats.disableLatencyHists) {
@@ -428,7 +428,7 @@
     }
 
     // Attempt to send the packet
-    bool successful = slavePort.sendTimingResp(pkt);
+    bool successful = cpuSidePort.sendTimingResp(pkt);
 
     if (!stats.disableLatencyHists) {
         // If packet successfully send, sample value of latency,
@@ -456,57 +456,57 @@
 void
 CommMonitor::recvTimingSnoopReq(PacketPtr pkt)
 {
-    slavePort.sendTimingSnoopReq(pkt);
+    cpuSidePort.sendTimingSnoopReq(pkt);
 }
 
 bool
 CommMonitor::recvTimingSnoopResp(PacketPtr pkt)
 {
-    return masterPort.sendTimingSnoopResp(pkt);
+    return memSidePort.sendTimingSnoopResp(pkt);
 }
 
 void
 CommMonitor::recvRetrySnoopResp()
 {
-    slavePort.sendRetrySnoopResp();
+    cpuSidePort.sendRetrySnoopResp();
 }
 
 bool
 CommMonitor::isSnooping() const
 {
-    // check if the connected master port is snooping
-    return slavePort.isSnooping();
+    // check if the connected request port is snooping
+    return cpuSidePort.isSnooping();
 }
 
 AddrRangeList
 CommMonitor::getAddrRanges() const
 {
-    // get the address ranges of the connected slave port
-    return masterPort.getAddrRanges();
+    // get the address ranges of the connected CPU-side port
+    return memSidePort.getAddrRanges();
 }
 
 void
 CommMonitor::recvReqRetry()
 {
-    slavePort.sendRetryReq();
+    cpuSidePort.sendRetryReq();
 }
 
 void
 CommMonitor::recvRespRetry()
 {
-    masterPort.sendRetryResp();
+    memSidePort.sendRetryResp();
 }
 
 bool
 CommMonitor::tryTiming(PacketPtr pkt)
 {
-    return masterPort.tryTiming(pkt);
+    return memSidePort.tryTiming(pkt);
 }
 
 void
 CommMonitor::recvRangeChange()
 {
-    slavePort.sendRangeChange();
+    cpuSidePort.sendRangeChange();
 }
 
 void
diff --git a/src/mem/comm_monitor.hh b/src/mem/comm_monitor.hh
index 33cf4d8..ed69362 100644
--- a/src/mem/comm_monitor.hh
+++ b/src/mem/comm_monitor.hh
@@ -112,17 +112,17 @@
     };
 
     /**
-     * This is the master port of the communication monitor. All recv
+     * This is the request port of the communication monitor. All recv
      * functions call a function in CommMonitor, where the
-     * send function of the slave port is called. Besides this, these
+     * send function of the CPU-side port is called. Besides this, these
      * functions can also perform actions for capturing statistics.
      */
-    class MonitorMasterPort : public RequestPort
+    class MonitorRequestPort : public RequestPort
     {
 
       public:
 
-        MonitorMasterPort(const std::string& _name, CommMonitor& _mon)
+        MonitorRequestPort(const std::string& _name, CommMonitor& _mon)
             : RequestPort(_name, &_mon), mon(_mon)
         { }
 
@@ -174,21 +174,21 @@
 
     };
 
-    /** Instance of master port, facing the memory side */
-    MonitorMasterPort masterPort;
+    /** Instance of request port, facing the memory side */
+    MonitorRequestPort memSidePort;
 
     /**
-     * This is the slave port of the communication monitor. All recv
+     * This is the CPU-side port of the communication monitor. All recv
      * functions call a function in CommMonitor, where the
-     * send function of the master port is called. Besides this, these
+     * send function of the request port is called. Besides this, these
      * functions can also perform actions for capturing statistics.
      */
-    class MonitorSlavePort : public ResponsePort
+    class MonitorResponsePort : public ResponsePort
     {
 
       public:
 
-        MonitorSlavePort(const std::string& _name, CommMonitor& _mon)
+        MonitorResponsePort(const std::string& _name, CommMonitor& _mon)
             : ResponsePort(_name, &_mon), mon(_mon)
         { }
 
@@ -235,8 +235,8 @@
 
     };
 
-    /** Instance of slave port, i.e. on the CPU side */
-    MonitorSlavePort slavePort;
+    /** Instance of response port, i.e. on the CPU side */
+    MonitorResponsePort cpuSidePort;
 
     void recvFunctional(PacketPtr pkt);
 
diff --git a/src/mem/dramsim2.cc b/src/mem/dramsim2.cc
index 2307df0..b09138c 100644
--- a/src/mem/dramsim2.cc
+++ b/src/mem/dramsim2.cc
@@ -254,7 +254,7 @@
     // response
     access(pkt);
 
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to requestor if response expected
     if (needsResponse) {
         // access already turned the packet into a response
         assert(pkt->isResponse());
@@ -352,7 +352,7 @@
 
 DRAMSim2::MemoryPort::MemoryPort(const std::string& _name,
                                  DRAMSim2& _memory)
-    : SlavePort(_name, &_memory), memory(_memory)
+    : ResponsePort(_name, &_memory), memory(_memory)
 { }
 
 AddrRangeList
diff --git a/src/mem/dramsim2.hh b/src/mem/dramsim2.hh
index 9503d9b..3259e85 100644
--- a/src/mem/dramsim2.hh
+++ b/src/mem/dramsim2.hh
@@ -59,7 +59,7 @@
      * having unbounded storage that is implicitly created in the port
      * itself.
      */
-    class MemoryPort : public SlavePort
+    class MemoryPort : public ResponsePort
     {
 
       private:
diff --git a/src/mem/dramsim3.cc b/src/mem/dramsim3.cc
index fc2eaca..4b2f796 100644
--- a/src/mem/dramsim3.cc
+++ b/src/mem/dramsim3.cc
@@ -258,7 +258,7 @@
     // response
     access(pkt);
 
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to requestor if response expected
     if (needsResponse) {
         // access already turned the packet into a response
         assert(pkt->isResponse());
@@ -352,7 +352,7 @@
 
 DRAMsim3::MemoryPort::MemoryPort(const std::string& _name,
                                  DRAMsim3& _memory)
-    : SlavePort(_name, &_memory), memory(_memory)
+    : ResponsePort(_name, &_memory), memory(_memory)
 { }
 
 AddrRangeList
diff --git a/src/mem/dramsim3.hh b/src/mem/dramsim3.hh
index 1b4a8a6..fc3cd1a 100644
--- a/src/mem/dramsim3.hh
+++ b/src/mem/dramsim3.hh
@@ -61,7 +61,7 @@
      * having unbounded storage that is implicitly created in the port
      * itself.
      */
-    class MemoryPort : public SlavePort
+    class MemoryPort : public ResponsePort
     {
 
       private:
diff --git a/src/mem/external_master.cc b/src/mem/external_master.cc
index 1a1ec40..81b7a52 100644
--- a/src/mem/external_master.cc
+++ b/src/mem/external_master.cc
@@ -53,7 +53,7 @@
     portName(params->name + ".port"),
     portType(params->port_type),
     portData(params->port_data),
-    masterId(params->system->getMasterId(this))
+    id(params->system->getRequestorId(this))
 {}
 
 Port &
diff --git a/src/mem/external_master.hh b/src/mem/external_master.hh
index d7aec11..a4d5b03 100644
--- a/src/mem/external_master.hh
+++ b/src/mem/external_master.hh
@@ -39,7 +39,7 @@
  * @file
  *
  * ExternalMaster is a memory object representing a binding from
- * a gem5 slave to a master port in a system external to gem5.
+ * a gem5 responder to a request port in a system external to gem5.
  *
  * During initialisation, a `handler' for the port type specified in the
  * port's port_type parameter is found from the registered port handlers
@@ -128,7 +128,7 @@
 
     void init() override;
 
-    const MasterID masterId;
+    const RequestorID id;
 };
 
 
diff --git a/src/mem/external_slave.hh b/src/mem/external_slave.hh
index 0a5e01a..c0f01f8 100644
--- a/src/mem/external_slave.hh
+++ b/src/mem/external_slave.hh
@@ -39,7 +39,7 @@
  * @file
  *
  * ExternalSlave is a memory object representing a binding from
- * a gem5 master to a slave port in a system external to gem5.
+ * a gem5 requestor to a response port in a system external to gem5.
  *
  * During initialisation, a `handler' for the port type specified in the
  * port's port_type parameter is found from the registered port handlers
diff --git a/src/mem/hmc_controller.cc b/src/mem/hmc_controller.cc
index e7b3f6a..20f6725 100644
--- a/src/mem/hmc_controller.cc
+++ b/src/mem/hmc_controller.cc
@@ -6,10 +6,10 @@
 
 HMCController::HMCController(const HMCControllerParams* p) :
     NoncoherentXBar(p),
-    n_master_ports(p->port_master_connection_count),
+    numMemSidePorts(p->port_mem_side_ports_connection_count),
     rr_counter(0)
 {
-    assert(p->port_slave_connection_count == 1);
+    assert(p->port_cpu_side_ports_connection_count == 1);
 }
 
 HMCController*
@@ -18,43 +18,43 @@
     return new HMCController(this);
 }
 
-// Since this module is a load distributor, all its master ports have the same
+// Since this module is a load distributor, all its request ports have the same
 //  range so we should keep only one of the ranges and ignore the others
-void HMCController::recvRangeChange(PortID master_port_id)
+void HMCController::recvRangeChange(PortID mem_side_port_id)
 {
-    if (master_port_id == 0)
+    if (mem_side_port_id == 0)
     {
        gotAllAddrRanges = true;
-       BaseXBar::recvRangeChange(master_port_id);
+       BaseXBar::recvRangeChange(mem_side_port_id);
     }
     else
-        gotAddrRanges[master_port_id] = true;
+        gotAddrRanges[mem_side_port_id] = true;
 }
 
 int HMCController::rotate_counter()
 {
     int current_value = rr_counter;
     rr_counter++;
-    if (rr_counter == n_master_ports)
+    if (rr_counter == numMemSidePorts)
         rr_counter = 0;
     return current_value;
 }
 
-bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
+bool HMCController::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id)
 {
     // determine the source port based on the id
-    ResponsePort *src_port = slavePorts[slave_port_id];
+    ResponsePort *src_port = cpuSidePorts[cpu_side_port_id];
 
     // we should never see express snoops on a non-coherent component
     assert(!pkt->isExpressSnoop());
 
     // For now, this is a simple round robin counter, for distribution the
     //  load among the serial links
-    PortID master_port_id = rotate_counter();
+    PortID mem_side_port_id = rotate_counter();
 
     // test if the layer should be considered occupied for the current
     // port
-    if (!reqLayers[master_port_id]->tryTiming(src_port)) {
+    if (!reqLayers[mem_side_port_id]->tryTiming(src_port)) {
         DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x BUSY\n",
                 src_port->name(), pkt->cmdString(), pkt->getAddr());
         return false;
@@ -86,7 +86,7 @@
         !pkt->cacheResponding();
 
     // since it is a normal request, attempt to send the packet
-    bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
+    bool success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt);
 
     if (!success)  {
         DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x RETRY\n",
@@ -96,7 +96,7 @@
         pkt->headerDelay = old_header_delay;
 
         // occupy until the header is sent
-        reqLayers[master_port_id]->failedTiming(src_port,
+        reqLayers[mem_side_port_id]->failedTiming(src_port,
                                                 clockEdge(Cycles(1)));
 
         return false;
@@ -105,14 +105,14 @@
     // remember where to route the response to
     if (expect_response) {
         assert(routeTo.find(pkt->req) == routeTo.end());
-        routeTo[pkt->req] = slave_port_id;
+        routeTo[pkt->req] = cpu_side_port_id;
     }
 
-    reqLayers[master_port_id]->succeededTiming(packetFinishTime);
+    reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime);
 
     // stats updates
-    pktCount[slave_port_id][master_port_id]++;
-    pktSize[slave_port_id][master_port_id] += pkt_size;
+    pktCount[cpu_side_port_id][mem_side_port_id]++;
+    pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
     transDist[pkt_cmd]++;
 
     return true;
diff --git a/src/mem/hmc_controller.hh b/src/mem/hmc_controller.hh
index 2ab3101..8206ee0 100644
--- a/src/mem/hmc_controller.hh
+++ b/src/mem/hmc_controller.hh
@@ -80,14 +80,14 @@
 
     // Receive range change only on one of the ports (because they all have
     //  the same range)
-    virtual void recvRangeChange(PortID master_port_id);
+    virtual void recvRangeChange(PortID mem_side_port_id);
 
-    // Receive a request and distribute it among slave ports
+    // Receive a request and distribute it among response ports
     //  Simply forwards the packet to the next serial link based on a
     //  Round-robin counter
-    virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
+    virtual bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id);
 
-    int n_master_ports;
+    int numMemSidePorts;
 
     // The round-robin counter
     int rr_counter;
diff --git a/src/mem/mem_checker_monitor.cc b/src/mem/mem_checker_monitor.cc
index f5800cf..82ca83b 100644
--- a/src/mem/mem_checker_monitor.cc
+++ b/src/mem/mem_checker_monitor.cc
@@ -46,8 +46,8 @@
 
 MemCheckerMonitor::MemCheckerMonitor(Params* params)
     : SimObject(params),
-      masterPort(name() + "-master", *this),
-      slavePort(name() + "-slave", *this),
+      memSidePort(name() + "-memSidePort", *this),
+      cpuSidePort(name() + "-cpuSidePort", *this),
       warnOnly(params->warn_only),
       memchecker(params->memchecker)
 {}
@@ -65,17 +65,17 @@
 MemCheckerMonitor::init()
 {
     // make sure both sides of the monitor are connected
-    if (!slavePort.isConnected() || !masterPort.isConnected())
+    if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
         fatal("Communication monitor is not connected on both sides.\n");
 }
 
 Port &
 MemCheckerMonitor::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master" || if_name == "mem_side") {
-        return masterPort;
-    } else if (if_name == "slave" || if_name == "cpu_side") {
-        return slavePort;
+    if (if_name == "request" || if_name == "mem_side_port") {
+        return memSidePort;
+    } else if (if_name == "response" || if_name == "cpu_side_port") {
+        return cpuSidePort;
     } else {
         return SimObject::getPort(if_name, idx);
     }
@@ -92,7 +92,7 @@
     // reads/writes to these location from other devices we do not see.
     memchecker->reset(addr, size);
 
-    masterPort.sendFunctional(pkt);
+    memSidePort.sendFunctional(pkt);
 
     DPRINTF(MemCheckerMonitor,
             "Forwarded functional access: addr = %#llx, size = %d\n",
@@ -108,7 +108,7 @@
     // See above.
     memchecker->reset(addr, size);
 
-    slavePort.sendFunctionalSnoop(pkt);
+    cpuSidePort.sendFunctionalSnoop(pkt);
 
     DPRINTF(MemCheckerMonitor,
             "Received functional snoop: addr = %#llx, size = %d\n",
@@ -164,7 +164,7 @@
     }
 
     // Attempt to send the packet
-    bool successful = masterPort.sendTimingReq(pkt);
+    bool successful = memSidePort.sendTimingReq(pkt);
 
     // If not successful, restore the sender state
     if (!successful && expects_response && (is_read || is_write)) {
@@ -180,9 +180,9 @@
             // At the time where we push the sender-state, we do not yet know
             // the serial the MemChecker class will assign to this request. We
             // cannot call startRead at the time we push the sender-state, as
-            // the masterPort may not be successful in executing sendTimingReq,
-            // and in case of a failure, we must not modify the state of the
-            // MemChecker.
+            // the memSidePort may not be successful in executing
+            // sendTimingReq, and in case of a failure, we must not
+            // modify the state of the MemChecker.
             //
             // Once we know that sendTimingReq was successful, we can set the
             // serial of the newly constructed sender-state. This is legal, as
@@ -256,7 +256,7 @@
     }
 
     // Attempt to send the packet
-    bool successful = slavePort.sendTimingResp(pkt);
+    bool successful = cpuSidePort.sendTimingResp(pkt);
 
     // If packet successfully send, complete transaction in MemChecker
     // instance, and delete sender state, otherwise restore state.
@@ -318,43 +318,43 @@
 void
 MemCheckerMonitor::recvTimingSnoopReq(PacketPtr pkt)
 {
-    slavePort.sendTimingSnoopReq(pkt);
+    cpuSidePort.sendTimingSnoopReq(pkt);
 }
 
 bool
 MemCheckerMonitor::recvTimingSnoopResp(PacketPtr pkt)
 {
-    return masterPort.sendTimingSnoopResp(pkt);
+    return memSidePort.sendTimingSnoopResp(pkt);
 }
 
 bool
 MemCheckerMonitor::isSnooping() const
 {
-    // check if the connected master port is snooping
-    return slavePort.isSnooping();
+    // check if the connected memSidePort is snooping
+    return cpuSidePort.isSnooping();
 }
 
 AddrRangeList
 MemCheckerMonitor::getAddrRanges() const
 {
-    // get the address ranges of the connected slave port
-    return masterPort.getAddrRanges();
+    // get the address ranges of the connected cpuSidePort
+    return memSidePort.getAddrRanges();
 }
 
 void
 MemCheckerMonitor::recvReqRetry()
 {
-    slavePort.sendRetryReq();
+    cpuSidePort.sendRetryReq();
 }
 
 void
 MemCheckerMonitor::recvRespRetry()
 {
-    masterPort.sendRetryResp();
+    memSidePort.sendRetryResp();
 }
 
 void
 MemCheckerMonitor::recvRangeChange()
 {
-    slavePort.sendRangeChange();
+    cpuSidePort.sendRangeChange();
 }
diff --git a/src/mem/mem_checker_monitor.hh b/src/mem/mem_checker_monitor.hh
index c2fb80d..8e5dab1 100644
--- a/src/mem/mem_checker_monitor.hh
+++ b/src/mem/mem_checker_monitor.hh
@@ -83,17 +83,17 @@
     };
 
     /**
-     * This is the master port of the communication monitor. All recv
+     * This is the request port of the communication monitor. All recv
      * functions call a function in MemCheckerMonitor, where the
-     * send function of the slave port is called. Besides this, these
+     * send function of the response port is called. Besides this, these
      * functions can also perform actions for capturing statistics.
      */
-    class MonitorMasterPort : public RequestPort
+    class MonitorRequestPort : public RequestPort
     {
 
       public:
 
-        MonitorMasterPort(const std::string& _name, MemCheckerMonitor& _mon)
+        MonitorRequestPort(const std::string& _name, MemCheckerMonitor& _mon)
             : RequestPort(_name, &_mon), mon(_mon)
         { }
 
@@ -140,21 +140,21 @@
 
     };
 
-    /** Instance of master port, facing the memory side */
-    MonitorMasterPort masterPort;
+    /** Instance of request port, facing the memory side */
+    MonitorRequestPort memSidePort;
 
     /**
-     * This is the slave port of the communication monitor. All recv
+     * This is the response port of the communication monitor. All recv
      * functions call a function in MemCheckerMonitor, where the
-     * send function of the master port is called. Besides this, these
+     * send function of the request port is called. Besides this, these
      * functions can also perform actions for capturing statistics.
      */
-    class MonitorSlavePort : public ResponsePort
+    class MonitorResponsePort : public ResponsePort
     {
 
       public:
 
-        MonitorSlavePort(const std::string& _name, MemCheckerMonitor& _mon)
+        MonitorResponsePort(const std::string& _name, MemCheckerMonitor& _mon)
             : ResponsePort(_name, &_mon), mon(_mon)
         { }
 
@@ -196,8 +196,8 @@
 
     };
 
-    /** Instance of slave port, i.e. on the CPU side */
-    MonitorSlavePort slavePort;
+    /** Instance of response port, i.e. on the CPU side */
+    MonitorResponsePort cpuSidePort;
 
     void recvFunctional(PacketPtr pkt);
 
diff --git a/src/mem/mem_ctrl.cc b/src/mem/mem_ctrl.cc
index 66d3c2a..1c0d4b1 100644
--- a/src/mem/mem_ctrl.cc
+++ b/src/mem/mem_ctrl.cc
@@ -204,7 +204,7 @@
                         base_addr + pkt->getSize()) - addr;
         stats.readPktSize[ceilLog2(size)]++;
         stats.readBursts++;
-        stats.masterReadAccesses[pkt->masterId()]++;
+        stats.requestorReadAccesses[pkt->requestorId()]++;
 
         // First check write buffer to see if the data is already at
         // the controller
@@ -267,7 +267,7 @@
             readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
 
             // log packet
-            logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
+            logRequest(MemCtrl::READ, pkt->requestorId(), pkt->qosValue(),
                        mem_pkt->addr, 1);
 
             // Update stats
@@ -314,7 +314,7 @@
                         base_addr + pkt->getSize()) - addr;
         stats.writePktSize[ceilLog2(size)]++;
         stats.writeBursts++;
-        stats.masterWriteAccesses[pkt->masterId()]++;
+        stats.requestorWriteAccesses[pkt->requestorId()]++;
 
         // see if we can merge with an existing item in the write
         // queue and keep track of whether we have merged or not
@@ -341,7 +341,7 @@
             isInWriteQueue.insert(burstAlign(addr, is_dram));
 
             // log packet
-            logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
+            logRequest(MemCtrl::WRITE, pkt->requestorId(), pkt->qosValue(),
                        mem_pkt->addr, 1);
 
             assert(totalWriteQueueSize == isInWriteQueue.size());
@@ -498,7 +498,7 @@
         if (mem_pkt->burstHelper->burstsServiced ==
             mem_pkt->burstHelper->burstCount) {
             // we have now serviced all children packets of a system packet
-            // so we can now respond to the requester
+            // so we can now respond to the requestor
             // @todo we probably want to have a different front end and back
             // end latency for split packets
             accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency);
@@ -635,7 +635,7 @@
               pkt->print());
     }
 
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to requestor if response expected
     if (needsResponse) {
         // access already turned the packet into a response
         assert(pkt->isResponse());
@@ -842,13 +842,13 @@
     if (mem_pkt->isRead()) {
         ++readsThisTime;
         // Update latency stats
-        stats.masterReadTotalLat[mem_pkt->masterId()] +=
+        stats.requestorReadTotalLat[mem_pkt->requestorId()] +=
             mem_pkt->readyTime - mem_pkt->entryTime;
-        stats.masterReadBytes[mem_pkt->masterId()] += mem_pkt->size;
+        stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
     } else {
         ++writesThisTime;
-        stats.masterWriteBytes[mem_pkt->masterId()] += mem_pkt->size;
-        stats.masterWriteTotalLat[mem_pkt->masterId()] +=
+        stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
+        stats.requestorWriteTotalLat[mem_pkt->requestorId()] +=
             mem_pkt->readyTime - mem_pkt->entryTime;
     }
 }
@@ -1005,13 +1005,13 @@
             assert(mem_pkt->readyTime >= curTick());
 
             // log the response
-            logResponse(MemCtrl::READ, (*to_read)->masterId(),
+            logResponse(MemCtrl::READ, (*to_read)->requestorId(),
                         mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
                         mem_pkt->readyTime - mem_pkt->entryTime);
 
 
             // Insert into response queue. It will be sent back to the
-            // requester at its readyTime
+            // requestor at its readyTime
             if (respQueue.empty()) {
                 assert(!respondEvent.scheduled());
                 schedule(respondEvent, mem_pkt->readyTime);
@@ -1090,7 +1090,7 @@
         isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
 
         // log the response
-        logResponse(MemCtrl::WRITE, mem_pkt->masterId(),
+        logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
                     mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
                     mem_pkt->readyTime - mem_pkt->entryTime);
 
@@ -1220,24 +1220,24 @@
     ADD_STAT(totGap, "Total gap between requests"),
     ADD_STAT(avgGap, "Average gap between requests"),
 
-    ADD_STAT(masterReadBytes, "Per-master bytes read from memory"),
-    ADD_STAT(masterWriteBytes, "Per-master bytes write to memory"),
-    ADD_STAT(masterReadRate,
-             "Per-master bytes read from memory rate (Bytes/sec)"),
-    ADD_STAT(masterWriteRate,
-             "Per-master bytes write to memory rate (Bytes/sec)"),
-    ADD_STAT(masterReadAccesses,
-             "Per-master read serviced memory accesses"),
-    ADD_STAT(masterWriteAccesses,
-             "Per-master write serviced memory accesses"),
-    ADD_STAT(masterReadTotalLat,
-             "Per-master read total memory access latency"),
-    ADD_STAT(masterWriteTotalLat,
-             "Per-master write total memory access latency"),
-    ADD_STAT(masterReadAvgLat,
-             "Per-master read average memory access latency"),
-    ADD_STAT(masterWriteAvgLat,
-             "Per-master write average memory access latency")
+    ADD_STAT(requestorReadBytes, "Per-requestor bytes read from memory"),
+    ADD_STAT(requestorWriteBytes, "Per-requestor bytes write to memory"),
+    ADD_STAT(requestorReadRate,
+             "Per-requestor bytes read from memory rate (Bytes/sec)"),
+    ADD_STAT(requestorWriteRate,
+             "Per-requestor bytes write to memory rate (Bytes/sec)"),
+    ADD_STAT(requestorReadAccesses,
+             "Per-requestor read serviced memory accesses"),
+    ADD_STAT(requestorWriteAccesses,
+             "Per-requestor write serviced memory accesses"),
+    ADD_STAT(requestorReadTotalLat,
+             "Per-requestor read total memory access latency"),
+    ADD_STAT(requestorWriteTotalLat,
+             "Per-requestor write total memory access latency"),
+    ADD_STAT(requestorReadAvgLat,
+             "Per-requestor read average memory access latency"),
+    ADD_STAT(requestorWriteAvgLat,
+             "Per-requestor write average memory access latency")
 
 {
 }
@@ -1248,7 +1248,7 @@
     using namespace Stats;
 
     assert(ctrl.system());
-    const auto max_masters = ctrl.system()->maxMasters();
+    const auto max_requestors = ctrl.system()->maxRequestors();
 
     avgRdQLen.precision(2);
     avgWrQLen.precision(2);
@@ -1270,60 +1270,60 @@
     avgWrBWSys.precision(2);
     avgGap.precision(2);
 
-    // per-master bytes read and written to memory
-    masterReadBytes
-        .init(max_masters)
+    // per-requestor bytes read and written to memory
+    requestorReadBytes
+        .init(max_requestors)
         .flags(nozero | nonan);
 
-    masterWriteBytes
-        .init(max_masters)
+    requestorWriteBytes
+        .init(max_requestors)
         .flags(nozero | nonan);
 
-    // per-master bytes read and written to memory rate
-    masterReadRate
+    // per-requestor bytes read and written to memory rate
+    requestorReadRate
         .flags(nozero | nonan)
         .precision(12);
 
-    masterReadAccesses
-        .init(max_masters)
+    requestorReadAccesses
+        .init(max_requestors)
         .flags(nozero);
 
-    masterWriteAccesses
-        .init(max_masters)
+    requestorWriteAccesses
+        .init(max_requestors)
         .flags(nozero);
 
-    masterReadTotalLat
-        .init(max_masters)
+    requestorReadTotalLat
+        .init(max_requestors)
         .flags(nozero | nonan);
 
-    masterReadAvgLat
+    requestorReadAvgLat
         .flags(nonan)
         .precision(2);
 
-    masterWriteRate
+    requestorWriteRate
         .flags(nozero | nonan)
         .precision(12);
 
-    masterWriteTotalLat
-        .init(max_masters)
+    requestorWriteTotalLat
+        .init(max_requestors)
         .flags(nozero | nonan);
 
-    masterWriteAvgLat
+    requestorWriteAvgLat
         .flags(nonan)
         .precision(2);
 
-    for (int i = 0; i < max_masters; i++) {
-        const std::string master = ctrl.system()->getMasterName(i);
-        masterReadBytes.subname(i, master);
-        masterReadRate.subname(i, master);
-        masterWriteBytes.subname(i, master);
-        masterWriteRate.subname(i, master);
-        masterReadAccesses.subname(i, master);
-        masterWriteAccesses.subname(i, master);
-        masterReadTotalLat.subname(i, master);
-        masterReadAvgLat.subname(i, master);
-        masterWriteTotalLat.subname(i, master);
-        masterWriteAvgLat.subname(i, master);
+    for (int i = 0; i < max_requestors; i++) {
+        const std::string requestor = ctrl.system()->getRequestorName(i);
+        requestorReadBytes.subname(i, requestor);
+        requestorReadRate.subname(i, requestor);
+        requestorWriteBytes.subname(i, requestor);
+        requestorWriteRate.subname(i, requestor);
+        requestorReadAccesses.subname(i, requestor);
+        requestorWriteAccesses.subname(i, requestor);
+        requestorReadTotalLat.subname(i, requestor);
+        requestorReadAvgLat.subname(i, requestor);
+        requestorWriteTotalLat.subname(i, requestor);
+        requestorWriteAvgLat.subname(i, requestor);
     }
 
     // Formula stats
@@ -1332,10 +1332,10 @@
 
     avgGap = totGap / (readReqs + writeReqs);
 
-    masterReadRate = masterReadBytes / simSeconds;
-    masterWriteRate = masterWriteBytes / simSeconds;
-    masterReadAvgLat = masterReadTotalLat / masterReadAccesses;
-    masterWriteAvgLat = masterWriteTotalLat / masterWriteAccesses;
+    requestorReadRate = requestorReadBytes / simSeconds;
+    requestorWriteRate = requestorWriteBytes / simSeconds;
+    requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
+    requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
 }
 
 void
@@ -1421,7 +1421,7 @@
 }
 
 MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
-    : QueuedSlavePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
+    : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
       ctrl(_ctrl)
 { }
 
diff --git a/src/mem/mem_ctrl.hh b/src/mem/mem_ctrl.hh
index 834cb5c..2e3cf8c 100644
--- a/src/mem/mem_ctrl.hh
+++ b/src/mem/mem_ctrl.hh
@@ -101,8 +101,8 @@
     /** This comes from the outside world */
     const PacketPtr pkt;
 
-    /** MasterID associated with the packet */
-    const MasterID _masterId;
+    /** RequestorID associated with the packet */
+    const RequestorID _requestorId;
 
     const bool read;
 
@@ -159,10 +159,10 @@
     inline uint8_t qosValue() const { return _qosValue; }
 
     /**
-     * Get the packet MasterID
+     * Get the packet RequestorID
      * (interface compatibility with Packet)
      */
-    inline MasterID masterId() const { return _masterId; }
+    inline RequestorID requestorId() const { return _requestorId; }
 
     /**
      * Get the packet size
@@ -197,7 +197,7 @@
                uint8_t _bank, uint32_t _row, uint16_t bank_id, Addr _addr,
                unsigned int _size)
         : entryTime(curTick()), readyTime(curTick()), pkt(_pkt),
-          _masterId(pkt->masterId()),
+          _requestorId(pkt->requestorId()),
           read(is_read), dram(is_dram), rank(_rank), bank(_bank), row(_row),
           bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
           _qosValue(_pkt->qosValue())
@@ -237,9 +237,9 @@
 {
   private:
 
-    // For now, make use of a queued slave port to avoid dealing with
+    // For now, make use of a queued response port to avoid dealing with
     // flow control for the responses being sent back
-    class MemoryPort : public QueuedSlavePort
+    class MemoryPort : public QueuedResponsePort
     {
 
         RespPacketQueue queue;
@@ -562,25 +562,25 @@
         Stats::Scalar totGap;
         Stats::Formula avgGap;
 
-        // per-master bytes read and written to memory
-        Stats::Vector masterReadBytes;
-        Stats::Vector masterWriteBytes;
+        // per-requestor bytes read and written to memory
+        Stats::Vector requestorReadBytes;
+        Stats::Vector requestorWriteBytes;
 
-        // per-master bytes read and written to memory rate
-        Stats::Formula masterReadRate;
-        Stats::Formula masterWriteRate;
+        // per-requestor bytes read and written to memory rate
+        Stats::Formula requestorReadRate;
+        Stats::Formula requestorWriteRate;
 
-        // per-master read and write serviced memory accesses
-        Stats::Vector masterReadAccesses;
-        Stats::Vector masterWriteAccesses;
+        // per-requestor read and write serviced memory accesses
+        Stats::Vector requestorReadAccesses;
+        Stats::Vector requestorWriteAccesses;
 
-        // per-master read and write total memory access latency
-        Stats::Vector masterReadTotalLat;
-        Stats::Vector masterWriteTotalLat;
+        // per-requestor read and write total memory access latency
+        Stats::Vector requestorReadTotalLat;
+        Stats::Vector requestorWriteTotalLat;
 
-        // per-master raed and write average memory access latency
-        Stats::Formula masterReadAvgLat;
-        Stats::Formula masterWriteAvgLat;
+        // per-requestor raed and write average memory access latency
+        Stats::Formula requestorReadAvgLat;
+        Stats::Formula requestorWriteAvgLat;
     };
 
     CtrlStats stats;
diff --git a/src/mem/mem_delay.cc b/src/mem/mem_delay.cc
index 9b4e7b6..8120a99 100644
--- a/src/mem/mem_delay.cc
+++ b/src/mem/mem_delay.cc
@@ -42,18 +42,18 @@
 
 MemDelay::MemDelay(const MemDelayParams *p)
     : ClockedObject(p),
-      masterPort(name() + "-master", *this),
-      slavePort(name() + "-slave", *this),
-      reqQueue(*this, masterPort),
-      respQueue(*this, slavePort),
-      snoopRespQueue(*this, masterPort)
+      requestPort(name() + "-mem_side_port", *this),
+      responsePort(name() + "-cpu_side_port", *this),
+      reqQueue(*this, requestPort),
+      respQueue(*this, responsePort),
+      snoopRespQueue(*this, requestPort)
 {
 }
 
 void
 MemDelay::init()
 {
-    if (!slavePort.isConnected() || !masterPort.isConnected())
+    if (!responsePort.isConnected() || !requestPort.isConnected())
         fatal("Memory delay is not connected on both sides.\n");
 }
 
@@ -61,10 +61,10 @@
 Port &
 MemDelay::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master") {
-        return masterPort;
-    } else if (if_name == "slave") {
-        return slavePort;
+    if (if_name == "mem_side_port") {
+        return requestPort;
+    } else if (if_name == "cpu_side_port") {
+        return responsePort;
     } else {
         return ClockedObject::getPort(if_name, idx);
     }
@@ -73,12 +73,12 @@
 bool
 MemDelay::trySatisfyFunctional(PacketPtr pkt)
 {
-    return slavePort.trySatisfyFunctional(pkt) ||
-        masterPort.trySatisfyFunctional(pkt);
+    return responsePort.trySatisfyFunctional(pkt) ||
+        requestPort.trySatisfyFunctional(pkt);
 }
 
 MemDelay::RequestPort::RequestPort(const std::string &_name, MemDelay &_parent)
-    : QueuedMasterPort(_name, &_parent,
+    : QueuedRequestPort(_name, &_parent,
                        _parent.reqQueue, _parent.snoopRespQueue),
       parent(_parent)
 {
@@ -94,7 +94,7 @@
 
     const Tick when = curTick() + parent.delayResp(pkt) + receive_delay;
 
-    parent.slavePort.schedTimingResp(pkt, when);
+    parent.responsePort.schedTimingResp(pkt, when);
 
     return true;
 }
@@ -105,7 +105,7 @@
     if (parent.trySatisfyFunctional(pkt)) {
         pkt->makeResponse();
     } else {
-        parent.slavePort.sendFunctionalSnoop(pkt);
+        parent.responsePort.sendFunctionalSnoop(pkt);
     }
 }
 
@@ -114,19 +114,19 @@
 {
     const Tick delay = parent.delaySnoopResp(pkt);
 
-    return delay + parent.slavePort.sendAtomicSnoop(pkt);
+    return delay + parent.responsePort.sendAtomicSnoop(pkt);
 }
 
 void
 MemDelay::RequestPort::recvTimingSnoopReq(PacketPtr pkt)
 {
-    parent.slavePort.sendTimingSnoopReq(pkt);
+    parent.responsePort.sendTimingSnoopReq(pkt);
 }
 
 
 MemDelay::ResponsePort::
 ResponsePort(const std::string &_name, MemDelay &_parent)
-    : QueuedSlavePort(_name, &_parent, _parent.respQueue),
+    : QueuedResponsePort(_name, &_parent, _parent.respQueue),
       parent(_parent)
 {
 }
@@ -136,7 +136,7 @@
 {
     const Tick delay = parent.delayReq(pkt) + parent.delayResp(pkt);
 
-    return delay + parent.masterPort.sendAtomic(pkt);
+    return delay + parent.requestPort.sendAtomic(pkt);
 }
 
 bool
@@ -150,7 +150,7 @@
 
     const Tick when = curTick() + parent.delayReq(pkt) + receive_delay;
 
-    parent.masterPort.schedTimingReq(pkt, when);
+    parent.requestPort.schedTimingReq(pkt, when);
 
     return true;
 }
@@ -161,7 +161,7 @@
     if (parent.trySatisfyFunctional(pkt)) {
         pkt->makeResponse();
     } else {
-        parent.masterPort.sendFunctional(pkt);
+        parent.requestPort.sendFunctional(pkt);
     }
 }
 
@@ -170,7 +170,7 @@
 {
     const Tick when = curTick() + parent.delaySnoopResp(pkt);
 
-    parent.masterPort.schedTimingSnoopResp(pkt, when);
+    parent.requestPort.schedTimingSnoopResp(pkt, when);
 
     return true;
 }
diff --git a/src/mem/mem_delay.hh b/src/mem/mem_delay.hh
index d337b3d..50929ea 100644
--- a/src/mem/mem_delay.hh
+++ b/src/mem/mem_delay.hh
@@ -71,7 +71,7 @@
     Port &getPort(const std::string &if_name,
                   PortID idx=InvalidPortID) override;
 
-    class RequestPort : public QueuedMasterPort
+    class RequestPort : public QueuedRequestPort
     {
       public:
         RequestPort(const std::string &_name, MemDelay &_parent);
@@ -86,18 +86,18 @@
         void recvTimingSnoopReq(PacketPtr pkt) override;
 
         void recvRangeChange() override {
-            parent.slavePort.sendRangeChange();
+            parent.responsePort.sendRangeChange();
         }
 
         bool isSnooping() const override {
-            return parent.slavePort.isSnooping();
+            return parent.responsePort.isSnooping();
         }
 
       private:
         MemDelay& parent;
     };
 
-    class ResponsePort : public QueuedSlavePort
+    class ResponsePort : public QueuedResponsePort
     {
       public:
         ResponsePort(const std::string &_name, MemDelay &_parent);
@@ -109,7 +109,7 @@
         bool recvTimingSnoopResp(PacketPtr pkt) override;
 
         AddrRangeList getAddrRanges() const override {
-            return parent.masterPort.getAddrRanges();
+            return parent.requestPort.getAddrRanges();
         }
 
         bool tryTiming(PacketPtr pkt) override { return true; }
@@ -122,8 +122,8 @@
 
     bool trySatisfyFunctional(PacketPtr pkt);
 
-    RequestPort masterPort;
-    ResponsePort slavePort;
+    RequestPort requestPort;
+    ResponsePort responsePort;
 
     ReqPacketQueue reqQueue;
     RespPacketQueue respQueue;
diff --git a/src/mem/mem_interface.cc b/src/mem/mem_interface.cc
index 307cf92..d65c5d9 100644
--- a/src/mem/mem_interface.cc
+++ b/src/mem/mem_interface.cc
@@ -2265,7 +2265,6 @@
     }
 }
 
-
 bool
 NVMInterface::burstReady(MemPacket* pkt) const {
     bool read_rdy =  pkt->isRead() && (ctrl->inReadBusState(true)) &&
diff --git a/src/mem/mem_interface.hh b/src/mem/mem_interface.hh
index b67cdb0..f150f77 100644
--- a/src/mem/mem_interface.hh
+++ b/src/mem/mem_interface.hh
@@ -83,7 +83,6 @@
     {
 
       public:
-
         static const uint32_t NO_ROW = -1;
 
         uint32_t openRow;
@@ -751,6 +750,7 @@
     const Tick wrToRdDlySameBG;
     const Tick rdToWrDlySameBG;
 
+
     Enums::PageManage pageMgmt;
     /**
      * Max column accesses (read and write) per row, before forefully
@@ -1098,7 +1098,6 @@
         Stats::Histogram pendingWrites;
         Stats::Histogram bytesPerBank;
     };
-
     NVMStats stats;
 
     void processWriteRespondEvent();
diff --git a/src/mem/mem_master.hh b/src/mem/mem_requestor.hh
similarity index 80%
rename from src/mem/mem_master.hh
rename to src/mem/mem_requestor.hh
index 468cd11..9a52d87 100644
--- a/src/mem/mem_master.hh
+++ b/src/mem/mem_requestor.hh
@@ -37,34 +37,34 @@
 
 /**
  * @file
- * MasterInfo declaration.
+ * RequestorInfo declaration.
  */
 
-#ifndef __MEM_MEM_MASTER_HH__
-#define __MEM_MEM_MASTER_HH__
+#ifndef __MEM_MEM_REQUESTOR_HH__
+#define __MEM_MEM_REQUESTOR_HH__
 
 #include "mem/request.hh"
 #include "sim/sim_object.hh"
 
 /**
- * The MasterInfo class contains data about a specific master.
+ * The RequestorInfo class contains data about a specific requestor.
  */
-struct MasterInfo
+struct RequestorInfo
 {
-    MasterInfo(const SimObject* _obj,
-               std::string master_name,
-               MasterID master_id)
-      : obj(_obj), masterName(master_name), masterId(master_id)
+    RequestorInfo(const SimObject* _obj,
+               std::string requestor_name,
+               RequestorID requestor_id)
+      : obj(_obj), req_name(requestor_name), id(requestor_id)
     {}
 
-    /** SimObject related to the Master */
+    /** SimObject related to the Requestor */
     const SimObject* obj;
 
-    /** Master Name */
-    std::string masterName;
+    /** Requestor Name */
+    std::string req_name;
 
-    /** Master ID */
-    MasterID masterId;
+    /** Requestor id */
+    RequestorID id;
 };
 
-#endif // __MEM_MEM_MASTER_HH__
+#endif // __MEM_MEM_REQUESTOR_HH__
diff --git a/src/mem/noncoherent_xbar.cc b/src/mem/noncoherent_xbar.cc
index 47be023..72d894f 100644
--- a/src/mem/noncoherent_xbar.cc
+++ b/src/mem/noncoherent_xbar.cc
@@ -53,34 +53,35 @@
 NoncoherentXBar::NoncoherentXBar(const NoncoherentXBarParams *p)
     : BaseXBar(p)
 {
-    // create the ports based on the size of the master and slave
-    // vector ports, and the presence of the default port, the ports
-    // are enumerated starting from zero
-    for (int i = 0; i < p->port_master_connection_count; ++i) {
-        std::string portName = csprintf("%s.master[%d]", name(), i);
-        RequestPort* bp = new NoncoherentXBarMasterPort(portName, *this, i);
-        masterPorts.push_back(bp);
+    // create the ports based on the size of the memory-side port and
+    // CPU-side port vector ports, and the presence of the default port,
+    // the ports are enumerated starting from zero
+    for (int i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
+        std::string portName = csprintf("%s.mem_side_port[%d]", name(), i);
+        RequestPort* bp = new NoncoherentXBarRequestPort(portName, *this, i);
+        memSidePorts.push_back(bp);
         reqLayers.push_back(new ReqLayer(*bp, *this,
                                          csprintf("reqLayer%d", i)));
     }
 
-    // see if we have a default slave device connected and if so add
-    // our corresponding master port
+    // see if we have a default CPU-side-port device connected and if so add
+    // our corresponding memory-side port
     if (p->port_default_connection_count) {
-        defaultPortID = masterPorts.size();
+        defaultPortID = memSidePorts.size();
         std::string portName = name() + ".default";
-        RequestPort* bp = new NoncoherentXBarMasterPort(portName, *this,
+        RequestPort* bp = new NoncoherentXBarRequestPort(portName, *this,
                                                       defaultPortID);
-        masterPorts.push_back(bp);
+        memSidePorts.push_back(bp);
         reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d",
                                                               defaultPortID)));
     }
 
-    // create the slave ports, once again starting at zero
-    for (int i = 0; i < p->port_slave_connection_count; ++i) {
-        std::string portName = csprintf("%s.slave[%d]", name(), i);
-        QueuedSlavePort* bp = new NoncoherentXBarSlavePort(portName, *this, i);
-        slavePorts.push_back(bp);
+    // create the CPU-side ports, once again starting at zero
+    for (int i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
+        std::string portName = csprintf("%s.cpu_side_ports[%d]", name(), i);
+        QueuedResponsePort* bp = new NoncoherentXBarResponsePort(portName,
+                                                                *this, i);
+        cpuSidePorts.push_back(bp);
         respLayers.push_back(new RespLayer(*bp, *this,
                                            csprintf("respLayer%d", i)));
     }
@@ -95,20 +96,20 @@
 }
 
 bool
-NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
+NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id)
 {
     // determine the source port based on the id
-    ResponsePort *src_port = slavePorts[slave_port_id];
+    ResponsePort *src_port = cpuSidePorts[cpu_side_port_id];
 
     // we should never see express snoops on a non-coherent crossbar
     assert(!pkt->isExpressSnoop());
 
     // determine the destination based on the address
-    PortID master_port_id = findPort(pkt->getAddrRange());
+    PortID mem_side_port_id = findPort(pkt->getAddrRange());
 
     // test if the layer should be considered occupied for the current
     // port
-    if (!reqLayers[master_port_id]->tryTiming(src_port)) {
+    if (!reqLayers[mem_side_port_id]->tryTiming(src_port)) {
         DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x BUSY\n",
                 src_port->name(), pkt->cmdString(), pkt->getAddr());
         return false;
@@ -140,7 +141,7 @@
         !pkt->cacheResponding();
 
     // since it is a normal request, attempt to send the packet
-    bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
+    bool success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt);
 
     if (!success)  {
         DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x RETRY\n",
@@ -150,7 +151,7 @@
         pkt->headerDelay = old_header_delay;
 
         // occupy until the header is sent
-        reqLayers[master_port_id]->failedTiming(src_port,
+        reqLayers[mem_side_port_id]->failedTiming(src_port,
                                                 clockEdge(Cycles(1)));
 
         return false;
@@ -159,35 +160,35 @@
     // remember where to route the response to
     if (expect_response) {
         assert(routeTo.find(pkt->req) == routeTo.end());
-        routeTo[pkt->req] = slave_port_id;
+        routeTo[pkt->req] = cpu_side_port_id;
     }
 
-    reqLayers[master_port_id]->succeededTiming(packetFinishTime);
+    reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime);
 
     // stats updates
-    pktCount[slave_port_id][master_port_id]++;
-    pktSize[slave_port_id][master_port_id] += pkt_size;
+    pktCount[cpu_side_port_id][mem_side_port_id]++;
+    pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
     transDist[pkt_cmd]++;
 
     return true;
 }
 
 bool
-NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
+NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID mem_side_port_id)
 {
     // determine the source port based on the id
-    RequestPort *src_port = masterPorts[master_port_id];
+    RequestPort *src_port = memSidePorts[mem_side_port_id];
 
     // determine the destination
     const auto route_lookup = routeTo.find(pkt->req);
     assert(route_lookup != routeTo.end());
-    const PortID slave_port_id = route_lookup->second;
-    assert(slave_port_id != InvalidPortID);
-    assert(slave_port_id < respLayers.size());
+    const PortID cpu_side_port_id = route_lookup->second;
+    assert(cpu_side_port_id != InvalidPortID);
+    assert(cpu_side_port_id < respLayers.size());
 
     // test if the layer should be considered occupied for the current
     // port
-    if (!respLayers[slave_port_id]->tryTiming(src_port)) {
+    if (!respLayers[cpu_side_port_id]->tryTiming(src_port)) {
         DPRINTF(NoncoherentXBar, "recvTimingResp: src %s %s 0x%x BUSY\n",
                 src_port->name(), pkt->cmdString(), pkt->getAddr());
         return false;
@@ -210,57 +211,59 @@
     // determine how long to be crossbar layer is busy
     Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
 
-    // send the packet through the destination slave port, and pay for
+    // send the packet through the destination CPU-side port, and pay for
     // any outstanding latency
     Tick latency = pkt->headerDelay;
     pkt->headerDelay = 0;
-    slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
+    cpuSidePorts[cpu_side_port_id]->schedTimingResp(pkt,
+                                        curTick() + latency);
 
     // remove the request from the routing table
     routeTo.erase(route_lookup);
 
-    respLayers[slave_port_id]->succeededTiming(packetFinishTime);
+    respLayers[cpu_side_port_id]->succeededTiming(packetFinishTime);
 
     // stats updates
-    pktCount[slave_port_id][master_port_id]++;
-    pktSize[slave_port_id][master_port_id] += pkt_size;
+    pktCount[cpu_side_port_id][mem_side_port_id]++;
+    pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
     transDist[pkt_cmd]++;
 
     return true;
 }
 
 void
-NoncoherentXBar::recvReqRetry(PortID master_port_id)
+NoncoherentXBar::recvReqRetry(PortID mem_side_port_id)
 {
     // responses never block on forwarding them, so the retry will
     // always be coming from a port to which we tried to forward a
     // request
-    reqLayers[master_port_id]->recvRetry();
+    reqLayers[mem_side_port_id]->recvRetry();
 }
 
 Tick
-NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
                                     MemBackdoorPtr *backdoor)
 {
     DPRINTF(NoncoherentXBar, "recvAtomic: packet src %s addr 0x%x cmd %s\n",
-            slavePorts[slave_port_id]->name(), pkt->getAddr(),
+            cpuSidePorts[cpu_side_port_id]->name(), pkt->getAddr(),
             pkt->cmdString());
 
     unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
     unsigned int pkt_cmd = pkt->cmdToIndex();
 
     // determine the destination port
-    PortID master_port_id = findPort(pkt->getAddrRange());
+    PortID mem_side_port_id = findPort(pkt->getAddrRange());
 
     // stats updates for the request
-    pktCount[slave_port_id][master_port_id]++;
-    pktSize[slave_port_id][master_port_id] += pkt_size;
+    pktCount[cpu_side_port_id][mem_side_port_id]++;
+    pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
     transDist[pkt_cmd]++;
 
     // forward the request to the appropriate destination
-    auto master = masterPorts[master_port_id];
+    auto mem_side_port = memSidePorts[mem_side_port_id];
     Tick response_latency = backdoor ?
-        master->sendAtomicBackdoor(pkt, *backdoor) : master->sendAtomic(pkt);
+        mem_side_port->sendAtomicBackdoor(pkt, *backdoor) :
+        mem_side_port->sendAtomic(pkt);
 
     // add the response data
     if (pkt->isResponse()) {
@@ -268,8 +271,8 @@
         pkt_cmd = pkt->cmdToIndex();
 
         // stats updates
-        pktCount[slave_port_id][master_port_id]++;
-        pktSize[slave_port_id][master_port_id] += pkt_size;
+        pktCount[cpu_side_port_id][mem_side_port_id]++;
+        pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
         transDist[pkt_cmd]++;
     }
 
@@ -279,18 +282,18 @@
 }
 
 void
-NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
+NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID cpu_side_port_id)
 {
     if (!pkt->isPrint()) {
         // don't do DPRINTFs on PrintReq as it clutters up the output
         DPRINTF(NoncoherentXBar,
                 "recvFunctional: packet src %s addr 0x%x cmd %s\n",
-                slavePorts[slave_port_id]->name(), pkt->getAddr(),
+                cpuSidePorts[cpu_side_port_id]->name(), pkt->getAddr(),
                 pkt->cmdString());
     }
 
-    // since our slave ports are queued ports we need to check them as well
-    for (const auto& p : slavePorts) {
+    // since our CPU-side ports are queued ports we need to check them as well
+    for (const auto& p : cpuSidePorts) {
         // if we find a response that has the data, then the
         // downstream caches/memories may be out of date, so simply stop
         // here
@@ -305,7 +308,7 @@
     PortID dest_id = findPort(pkt->getAddrRange());
 
     // forward the request to the appropriate destination
-    masterPorts[dest_id]->sendFunctional(pkt);
+    memSidePorts[dest_id]->sendFunctional(pkt);
 }
 
 NoncoherentXBar*
diff --git a/src/mem/noncoherent_xbar.hh b/src/mem/noncoherent_xbar.hh
index 1d3541c..c2fd95a 100644
--- a/src/mem/noncoherent_xbar.hh
+++ b/src/mem/noncoherent_xbar.hh
@@ -50,13 +50,13 @@
 #include "params/NoncoherentXBar.hh"
 
 /**
- * A non-coherent crossbar connects a number of non-snooping masters
- * and slaves, and routes the request and response packets based on
- * the address. The request packets issued by the master connected to
+ * A non-coherent crossbar connects a number of non-snooping memory-side ports
+ * and cpu_sides, and routes the request and response packets based on
+ * the address. The request packets issued by the memory-side port connected to
  * a non-coherent crossbar could still snoop in caches attached to a
  * coherent crossbar, as is the case with the I/O bus and memory bus
  * in most system configurations. No snoops will, however, reach any
- * master on the non-coherent crossbar itself.
+ * memory-side port on the non-coherent crossbar itself.
  *
  * The non-coherent crossbar can be used as a template for modelling
  * PCIe, and non-coherent AMBA and OCP buses, and is typically used
@@ -75,11 +75,11 @@
     std::vector<RespLayer*> respLayers;
 
     /**
-     * Declaration of the non-coherent crossbar slave port type, one
-     * will be instantiated for each of the master ports connecting to
+     * Declaration of the non-coherent crossbar CPU-side port type, one
+     * will be instantiated for each of the memory-side ports connecting to
      * the crossbar.
      */
-    class NoncoherentXBarSlavePort : public QueuedSlavePort
+    class NoncoherentXBarResponsePort : public QueuedResponsePort
     {
       private:
 
@@ -91,9 +91,9 @@
 
       public:
 
-        NoncoherentXBarSlavePort(const std::string &_name,
+        NoncoherentXBarResponsePort(const std::string &_name,
                                 NoncoherentXBar &_xbar, PortID _id)
-            : QueuedSlavePort(_name, &_xbar, queue, _id), xbar(_xbar),
+            : QueuedResponsePort(_name, &_xbar, queue, _id), xbar(_xbar),
               queue(_xbar, *this)
         { }
 
@@ -131,11 +131,11 @@
     };
 
     /**
-     * Declaration of the crossbar master port type, one will be
-     * instantiated for each of the slave ports connecting to the
+     * Declaration of the crossbar memory-side port type, one will be
+     * instantiated for each of the CPU-side ports connecting to the
      * crossbar.
      */
-    class NoncoherentXBarMasterPort : public RequestPort
+    class NoncoherentXBarRequestPort : public RequestPort
     {
       private:
 
@@ -144,7 +144,7 @@
 
       public:
 
-        NoncoherentXBarMasterPort(const std::string &_name,
+        NoncoherentXBarRequestPort(const std::string &_name,
                                  NoncoherentXBar &_xbar, PortID _id)
             : RequestPort(_name, &_xbar, _id), xbar(_xbar)
         { }
@@ -170,12 +170,12 @@
         }
     };
 
-    virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
-    virtual bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
-    void recvReqRetry(PortID master_port_id);
-    Tick recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+    virtual bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id);
+    virtual bool recvTimingResp(PacketPtr pkt, PortID mem_side_port_id);
+    void recvReqRetry(PortID mem_side_port_id);
+    Tick recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
                             MemBackdoorPtr *backdoor=nullptr);
-    void recvFunctional(PacketPtr pkt, PortID slave_port_id);
+    void recvFunctional(PacketPtr pkt, PortID cpu_side_port_id);
 
   public:
 
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 4ded3b3..0f14816 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -250,7 +250,7 @@
 /**
  * A Packet is used to encapsulate a transfer between two objects in
  * the memory system (e.g., the L1 and L2 cache).  (In contrast, a
- * single Request travels all the way from the requester to the
+ * single Request travels all the way from the requestor to the
  * ultimate destination and back, possibly being conveyed by several
  * different Packets along the way.)
  */
@@ -737,7 +737,7 @@
     inline void qosValue(const uint8_t qos_value)
     { _qosValue = qos_value; }
 
-    inline MasterID masterId() const { return req->masterId(); }
+    inline RequestorID requestorId() const { return req->requestorId(); }
 
     // Network error conditions... encapsulate them as methods since
     // their encoding keeps changing (from result field to command
diff --git a/src/mem/packet_queue.cc b/src/mem/packet_queue.cc
index fe08d49..ecc6653 100644
--- a/src/mem/packet_queue.cc
+++ b/src/mem/packet_queue.cc
@@ -232,44 +232,45 @@
     }
 }
 
-ReqPacketQueue::ReqPacketQueue(EventManager& _em, RequestPort& _masterPort,
+ReqPacketQueue::ReqPacketQueue(EventManager& _em, RequestPort& _mem_side_port,
                                const std::string _label)
-    : PacketQueue(_em, _label, name(_masterPort, _label)),
-      masterPort(_masterPort)
+    : PacketQueue(_em, _label, name(_mem_side_port, _label)),
+      memSidePort(_mem_side_port)
 {
 }
 
 bool
 ReqPacketQueue::sendTiming(PacketPtr pkt)
 {
-    return masterPort.sendTimingReq(pkt);
+    return memSidePort.sendTimingReq(pkt);
 }
 
 SnoopRespPacketQueue::SnoopRespPacketQueue(EventManager& _em,
-                                           RequestPort& _masterPort,
+                                           RequestPort& _mem_side_port,
                                            bool force_order,
                                            const std::string _label)
-    : PacketQueue(_em, _label, name(_masterPort, _label), force_order),
-      masterPort(_masterPort)
+    : PacketQueue(_em, _label, name(_mem_side_port, _label), force_order),
+      memSidePort(_mem_side_port)
 {
 }
 
 bool
 SnoopRespPacketQueue::sendTiming(PacketPtr pkt)
 {
-    return masterPort.sendTimingSnoopResp(pkt);
+    return memSidePort.sendTimingSnoopResp(pkt);
 }
 
-RespPacketQueue::RespPacketQueue(EventManager& _em, ResponsePort& _slavePort,
+RespPacketQueue::RespPacketQueue(EventManager& _em,
+                                 ResponsePort& _cpu_side_port,
                                  bool force_order,
                                  const std::string _label)
-    : PacketQueue(_em, _label, name(_slavePort, _label), force_order),
-      slavePort(_slavePort)
+    : PacketQueue(_em, _label, name(_cpu_side_port, _label), force_order),
+      cpuSidePort(_cpu_side_port)
 {
 }
 
 bool
 RespPacketQueue::sendTiming(PacketPtr pkt)
 {
-    return slavePort.sendTimingResp(pkt);
+    return cpuSidePort.sendTimingResp(pkt);
 }
diff --git a/src/mem/packet_queue.hh b/src/mem/packet_queue.hh
index 1fc92a2..baf6152 100644
--- a/src/mem/packet_queue.hh
+++ b/src/mem/packet_queue.hh
@@ -57,7 +57,7 @@
 
 /**
  * A packet queue is a class that holds deferred packets and later
- * sends them using the associated slave port or master port.
+ * sends them using the associated CPU-side port or memory-side port.
  */
 class PacketQueue : public Drainable
 {
@@ -124,7 +124,7 @@
 
     /**
      * Send a packet using the appropriate method for the specific
-     * subclass (reuest, response or snoop response).
+     * subclass (request, response or snoop response).
      */
     virtual bool sendTiming(PacketPtr pkt) = 0;
 
@@ -224,32 +224,32 @@
 
   protected:
 
-    RequestPort& masterPort;
+    RequestPort& memSidePort;
 
     // Static definition so it can be called when constructing the parent
     // without us being completely initialized.
-    static const std::string name(const RequestPort& masterPort,
+    static const std::string name(const RequestPort& memSidePort,
                                   const std::string& label)
-    { return masterPort.name() + "-" + label; }
+    { return memSidePort.name() + "-" + label; }
 
   public:
 
     /**
      * Create a request packet queue, linked to an event manager, a
-     * master port, and a label that will be used for functional print
+     * memory-side port, and a label that will be used for functional print
      * request packets.
      *
      * @param _em Event manager used for scheduling this queue
-     * @param _masterPort Master port used to send the packets
+     * @param _mem_side_port Mem_side port used to send the packets
      * @param _label Label to push on the label stack for print request packets
      */
-    ReqPacketQueue(EventManager& _em, RequestPort& _masterPort,
+    ReqPacketQueue(EventManager& _em, RequestPort& _mem_side_port,
                    const std::string _label = "ReqPacketQueue");
 
     virtual ~ReqPacketQueue() { }
 
     const std::string name() const
-    { return name(masterPort, label); }
+    { return name(memSidePort, label); }
 
     bool sendTiming(PacketPtr pkt);
 
@@ -260,34 +260,34 @@
 
   protected:
 
-    RequestPort& masterPort;
+    RequestPort& memSidePort;
 
     // Static definition so it can be called when constructing the parent
     // without us being completely initialized.
-    static const std::string name(const RequestPort& masterPort,
+    static const std::string name(const RequestPort& memSidePort,
                                   const std::string& label)
-    { return masterPort.name() + "-" + label; }
+    { return memSidePort.name() + "-" + label; }
 
   public:
 
     /**
      * Create a snoop response packet queue, linked to an event
-     * manager, a master port, and a label that will be used for
+     * manager, a memory-side port, and a label that will be used for
      * functional print request packets.
      *
      * @param _em Event manager used for scheduling this queue
-     * @param _masterPort Master port used to send the packets
+     * @param _mem_side_port memory-side port used to send the packets
      * @param force_order Force insertion order for packets with same address
      * @param _label Label to push on the label stack for print request packets
      */
-    SnoopRespPacketQueue(EventManager& _em, RequestPort& _masterPort,
+    SnoopRespPacketQueue(EventManager& _em, RequestPort& _mem_side_port,
                          bool force_order = false,
                          const std::string _label = "SnoopRespPacketQueue");
 
     virtual ~SnoopRespPacketQueue() { }
 
     const std::string name() const
-    { return name(masterPort, label); }
+    { return name(memSidePort, label); }
 
     bool sendTiming(PacketPtr pkt);
 
@@ -298,34 +298,34 @@
 
   protected:
 
-    ResponsePort& slavePort;
+    ResponsePort& cpuSidePort;
 
     // Static definition so it can be called when constructing the parent
     // without us being completely initialized.
-    static const std::string name(const ResponsePort& slavePort,
+    static const std::string name(const ResponsePort& cpuSidePort,
                                   const std::string& label)
-    { return slavePort.name() + "-" + label; }
+    { return cpuSidePort.name() + "-" + label; }
 
   public:
 
     /**
      * Create a response packet queue, linked to an event manager, a
-     * slave port, and a label that will be used for functional print
+     * CPU-side port, and a label that will be used for functional print
      * request packets.
      *
      * @param _em Event manager used for scheduling this queue
-     * @param _slavePort Slave port used to send the packets
+     * @param _cpu_side_port Cpu_side port used to send the packets
      * @param force_order Force insertion order for packets with same address
      * @param _label Label to push on the label stack for print request packets
      */
-    RespPacketQueue(EventManager& _em, ResponsePort& _slavePort,
+    RespPacketQueue(EventManager& _em, ResponsePort& _cpu_side_port,
                     bool force_order = false,
                     const std::string _label = "RespPacketQueue");
 
     virtual ~RespPacketQueue() { }
 
     const std::string name() const
-    { return name(slavePort, label); }
+    { return name(cpuSidePort, label); }
 
     bool sendTiming(PacketPtr pkt);
 
diff --git a/src/mem/port.cc b/src/mem/port.cc
index b901571..e5d8308 100644
--- a/src/mem/port.cc
+++ b/src/mem/port.cc
@@ -154,7 +154,7 @@
 RequestPort::printAddr(Addr a)
 {
     auto req = std::make_shared<Request>(
-        a, 1, 0, Request::funcMasterId);
+        a, 1, 0, Request::funcRequestorId);
 
     Packet pkt(req, MemCmd::PrintReq);
     Packet::PrintReqState prs(std::cerr);
diff --git a/src/mem/port_proxy.cc b/src/mem/port_proxy.cc
index b5fce19..f12ba8b 100644
--- a/src/mem/port_proxy.cc
+++ b/src/mem/port_proxy.cc
@@ -47,7 +47,7 @@
          gen.next()) {
 
         auto req = std::make_shared<Request>(
-            gen.addr(), gen.size(), flags, Request::funcMasterId);
+            gen.addr(), gen.size(), flags, Request::funcRequestorId);
 
         Packet pkt(req, MemCmd::ReadReq);
         pkt.dataStatic(static_cast<uint8_t *>(p));
@@ -64,7 +64,7 @@
          gen.next()) {
 
         auto req = std::make_shared<Request>(
-            gen.addr(), gen.size(), flags, Request::funcMasterId);
+            gen.addr(), gen.size(), flags, Request::funcRequestorId);
 
         Packet pkt(req, MemCmd::WriteReq);
         pkt.dataStaticConst(static_cast<const uint8_t *>(p));
diff --git a/src/mem/probes/MemTraceProbe.py b/src/mem/probes/MemTraceProbe.py
index ba791eb..2225c36 100644
--- a/src/mem/probes/MemTraceProbe.py
+++ b/src/mem/probes/MemTraceProbe.py
@@ -50,5 +50,5 @@
     # packet trace output file, disabled by default
     trace_file = Param.String("", "Packet trace output file")
 
-    # System object to look up the name associated with a master ID
+    # System object to look up the name associated with a requestor ID
     system = Param.System(Parent.any, "System the probe belongs to")
diff --git a/src/mem/probes/mem_trace.cc b/src/mem/probes/mem_trace.cc
index a11b9e6..dbfb685 100644
--- a/src/mem/probes/mem_trace.cc
+++ b/src/mem/probes/mem_trace.cc
@@ -86,10 +86,10 @@
     header_msg.set_obj_id(name());
     header_msg.set_tick_freq(SimClock::Frequency);
 
-    for (int i = 0; i < system->maxMasters(); i++) {
+    for (int i = 0; i < system->maxRequestors(); i++) {
         auto id_string = header_msg.add_id_strings();
         id_string->set_key(i);
-        id_string->set_value(system->getMasterName(i));
+        id_string->set_value(system->getRequestorName(i));
     }
 
     traceStream->write(header_msg);
@@ -114,7 +114,7 @@
     pkt_msg.set_size(pkt_info.size);
     if (withPC && pkt_info.pc != 0)
         pkt_msg.set_pc(pkt_info.pc);
-    pkt_msg.set_pkt_id(pkt_info.master);
+    pkt_msg.set_pkt_id(pkt_info.id);
 
     traceStream->write(pkt_msg);
 }
diff --git a/src/mem/qos/QoSMemCtrl.py b/src/mem/qos/QoSMemCtrl.py
index f55105b..e4826d6 100644
--- a/src/mem/qos/QoSMemCtrl.py
+++ b/src/mem/qos/QoSMemCtrl.py
@@ -69,7 +69,7 @@
         "Memory Controller Requests same-QoS selection policy")
 
     # flag to select QoS syncronised scheduling
-    # (calls the scheduler on all masters at every packet arrival)
+    # (calls the scheduler on all requestors at every packet arrival)
     qos_syncro_scheduler = Param.Bool(False,
         "Enables QoS syncronized scheduling")
 
@@ -77,6 +77,8 @@
     qos_priority_escalation = Param.Bool(False,
         "Enables QoS priority escalation")
 
-    # Master ID to be mapped to service parameters in QoS schedulers
-    qos_masters = VectorParam.String(['']* 16,
-        "Master Names to be mapped to service parameters in QoS scheduler")
+    # Requestor ID to be mapped to service parameters in QoS schedulers
+    qos_requestors = VectorParam.String(['']* 16,
+        "Requestor Names to be mapped to service parameters in QoS scheduler")
+    qos_masters = DeprecatedParam(qos_requestors,
+        '`qos_master` is now called `qos_requestors`')
diff --git a/src/mem/qos/QoSPolicy.py b/src/mem/qos/QoSPolicy.py
index ed72043..0945cd7 100644
--- a/src/mem/qos/QoSPolicy.py
+++ b/src/mem/qos/QoSPolicy.py
@@ -51,36 +51,43 @@
     cxx_class = 'QoS::FixedPriorityPolicy'
 
     cxx_exports = [
-        PyBindMethod('initMasterName'),
-        PyBindMethod('initMasterObj'),
+        PyBindMethod('initRequestorName'),
+        PyBindMethod('initRequestorObj'),
     ]
 
-    _mpriorities = None
+    _requestor_priorities = None
 
-    def setMasterPriority(self, master, priority):
-        if not self._mpriorities:
-            self._mpriorities = []
+    def setRequestorPriority(self, request_port, priority):
+        if not self._requestor_priorities:
+            self._requestor_priorities = []
 
-        self._mpriorities.append([master, priority])
+        self._requestor_priorities.append([request_port, priority])
+
+    def setMasterPriority(self, request_port, priority):
+        warn('QosFixedPriority.setMasterPriority is deprecated in favor of '
+            'setRequestorPriority. See src/mem/qos/QoSPolicy.py for more '
+            'information')
+        self.setRequestorPriority(request_port, priority)
 
     def init(self):
-        if not self._mpriorities:
-            print("Error, use setMasterPriority to init masters/priorities\n");
+        if not self._requestor_priorities:
+            print("Error,"
+                 "use setRequestorPriority to init requestors/priorities\n");
             exit(1)
         else:
-            for mprio in self._mpriorities:
-                master = mprio[0]
-                priority = mprio[1]
-                if isinstance(master, string_types):
-                    self.getCCObject().initMasterName(
-                        master, int(priority))
+            for prio in self._requestor_priorities:
+                request_port = prio[0]
+                priority = prio[1]
+                if isinstance(request_port, string_types):
+                    self.getCCObject().initRequestorName(
+                        request_port, int(priority))
                 else:
-                    self.getCCObject().initMasterObj(
-                        master.getCCObject(), priority)
+                    self.getCCObject().initRequestorObj(
+                        request_port.getCCObject(), priority)
 
-    # default fixed priority value for non-listed Masters
+    # default fixed priority value for non-listed Requestors
     qos_fixed_prio_default_prio = Param.UInt8(0,
-        "Default priority for non-listed Masters")
+        "Default priority for non-listed Requestors")
 
 class QoSPropFairPolicy(QoSPolicy):
     type = 'QoSPropFairPolicy'
@@ -88,31 +95,31 @@
     cxx_class = 'QoS::PropFairPolicy'
 
     cxx_exports = [
-        PyBindMethod('initMasterName'),
-        PyBindMethod('initMasterObj'),
+        PyBindMethod('initRequestorName'),
+        PyBindMethod('initRequestorObj'),
     ]
 
-    _mscores = None
+    _requestor_scores = None
 
-    def setInitialScore(self, master, score):
-        if not self._mscores:
-            self._mscores = []
+    def setInitialScore(self, request_port, score):
+        if not self._requestor_scores:
+            self._requestor_scores = []
 
-        self._mscores.append([master, score])
+        self._requestor_scores.append([request_port, score])
 
     def init(self):
-        if not self._mscores:
-            print("Error, use setInitialScore to init masters/scores\n");
+        if not self._requestor_scores:
+            print("Error, use setInitialScore to init requestors/scores\n");
             exit(1)
         else:
-            for mprio in self._mscores:
-                master = mprio[0]
-                score = mprio[1]
-                if isinstance(master, string_types):
-                    self.getCCObject().initMasterName(
-                        master, float(score))
+            for prio in self._requestor_scores:
+                request_port = prio[0]
+                score = prio[1]
+                if isinstance(request_port, string_types):
+                    self.getCCObject().initRequestorName(
+                        request_port, float(score))
                 else:
-                    self.getCCObject().initMasterObj(
-                        master.getCCObject(), float(score))
+                    self.getCCObject().initRequestorObj(
+                        request_port.getCCObject(), float(score))
 
     weight = Param.Float(0.5, "Pf score weight")
diff --git a/src/mem/qos/mem_ctrl.cc b/src/mem/qos/mem_ctrl.cc
index 190960b..b5caf6e 100644
--- a/src/mem/qos/mem_ctrl.cc
+++ b/src/mem/qos/mem_ctrl.cc
@@ -78,18 +78,18 @@
 {}
 
 void
-MemCtrl::logRequest(BusState dir, MasterID m_id, uint8_t qos,
+MemCtrl::logRequest(BusState dir, RequestorID id, uint8_t qos,
                     Addr addr, uint64_t entries)
 {
     // If needed, initialize all counters and statistics
-    // for this master
-    addMaster(m_id);
+    // for this requestor
+    addRequestor(id);
 
     DPRINTF(QOS,
-            "QoSMemCtrl::logRequest MASTER %s [id %d] address %d"
-            " prio %d this master q packets %d"
+            "QoSMemCtrl::logRequest REQUESTOR %s [id %d] address %d"
+            " prio %d this requestor q packets %d"
             " - queue size %d - requested entries %d\n",
-            masters[m_id], m_id, addr, qos, packetPriorities[m_id][qos],
+            requestors[id], id, addr, qos, packetPriorities[id][qos],
             (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos],
             entries);
 
@@ -101,51 +101,51 @@
         totalWriteQueueSize += entries;
     }
 
-    packetPriorities[m_id][qos] += entries;
+    packetPriorities[id][qos] += entries;
     for (auto j = 0; j < entries; ++j) {
-        requestTimes[m_id][addr].push_back(curTick());
+        requestTimes[id][addr].push_back(curTick());
     }
 
     // Record statistics
-    stats.avgPriority[m_id].sample(qos);
+    stats.avgPriority[id].sample(qos);
 
     // Compute avg priority distance
 
-    for (uint8_t i = 0; i < packetPriorities[m_id].size(); ++i) {
+    for (uint8_t i = 0; i < packetPriorities[id].size(); ++i) {
         uint8_t distance =
-            (abs(int(qos) - int(i))) * packetPriorities[m_id][i];
+            (abs(int(qos) - int(i))) * packetPriorities[id][i];
 
         if (distance > 0) {
-            stats.avgPriorityDistance[m_id].sample(distance);
+            stats.avgPriorityDistance[id].sample(distance);
             DPRINTF(QOS,
-                    "QoSMemCtrl::logRequest MASTER %s [id %d]"
+                    "QoSMemCtrl::logRequest REQUESTOR %s [id %d]"
                     " registering priority distance %d for priority %d"
                     " (packets %d)\n",
-                    masters[m_id], m_id, distance, i,
-                    packetPriorities[m_id][i]);
+                    requestors[id], id, distance, i,
+                    packetPriorities[id][i]);
         }
     }
 
     DPRINTF(QOS,
-            "QoSMemCtrl::logRequest MASTER %s [id %d] prio %d "
-            "this master q packets %d - new queue size %d\n",
-            masters[m_id], m_id, qos, packetPriorities[m_id][qos],
+            "QoSMemCtrl::logRequest REQUESTOR %s [id %d] prio %d "
+            "this requestor q packets %d - new queue size %d\n",
+            requestors[id], id, qos, packetPriorities[id][qos],
             (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos]);
 
 }
 
 void
-MemCtrl::logResponse(BusState dir, MasterID m_id, uint8_t qos,
+MemCtrl::logResponse(BusState dir, RequestorID id, uint8_t qos,
                      Addr addr, uint64_t entries, double delay)
 {
-    panic_if(!hasMaster(m_id),
-        "Logging response with invalid master\n");
+    panic_if(!hasRequestor(id),
+        "Logging response with invalid requestor\n");
 
     DPRINTF(QOS,
-            "QoSMemCtrl::logResponse MASTER %s [id %d] address %d prio"
-            " %d this master q packets %d"
+            "QoSMemCtrl::logResponse REQUESTOR %s [id %d] address %d prio"
+            " %d this requestor q packets %d"
             " - queue size %d - requested entries %d\n",
-            masters[m_id], m_id, addr, qos, packetPriorities[m_id][qos],
+            requestors[id], id, addr, qos, packetPriorities[id][qos],
             (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos],
             entries);
 
@@ -157,17 +157,17 @@
         totalWriteQueueSize -= entries;
     }
 
-    panic_if(packetPriorities[m_id][qos] == 0,
-             "QoSMemCtrl::logResponse master %s negative packets for priority"
-             " %d", masters[m_id], qos);
+    panic_if(packetPriorities[id][qos] == 0,
+             "QoSMemCtrl::logResponse requestor %s negative packets "
+             "for priority %d", requestors[id], qos);
 
-    packetPriorities[m_id][qos] -= entries;
+    packetPriorities[id][qos] -= entries;
 
     for (auto j = 0; j < entries; ++j) {
-        auto it = requestTimes[m_id].find(addr);
-        panic_if(it == requestTimes[m_id].end(),
-                 "QoSMemCtrl::logResponse master %s unmatched response for"
-                 " address %d received", masters[m_id], addr);
+        auto it = requestTimes[id].find(addr);
+        panic_if(it == requestTimes[id].end(),
+                 "QoSMemCtrl::logResponse requestor %s unmatched response for"
+                 " address %d received", requestors[id], addr);
 
         // Load request time
         uint64_t requestTime = it->second.front();
@@ -177,7 +177,7 @@
 
         // Remove whole address entry if last one
         if (it->second.empty()) {
-            requestTimes[m_id].erase(it);
+            requestTimes[id].erase(it);
         }
         // Compute latency
         double latency = (double) (curTick() + delay - requestTime)
@@ -197,22 +197,22 @@
     }
 
     DPRINTF(QOS,
-            "QoSMemCtrl::logResponse MASTER %s [id %d] prio %d "
-            "this master q packets %d - new queue size %d\n",
-            masters[m_id], m_id, qos, packetPriorities[m_id][qos],
+            "QoSMemCtrl::logResponse REQUESTOR %s [id %d] prio %d "
+            "this requestor q packets %d - new queue size %d\n",
+            requestors[id], id, qos, packetPriorities[id][qos],
             (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos]);
 }
 
 uint8_t
-MemCtrl::schedule(MasterID m_id, uint64_t data)
+MemCtrl::schedule(RequestorID id, uint64_t data)
 {
     if (policy) {
-        return policy->schedule(m_id, data);
+        return policy->schedule(id, data);
     } else {
         DPRINTF(QOS,
-                "QoSScheduler::schedule master ID [%d] "
+                "QoSScheduler::schedule requestor id [%d] "
                 "data received [%d], but QoS scheduler not initialized\n",
-                m_id,data);
+                id,data);
         return 0;
     }
 }
@@ -223,7 +223,7 @@
     assert(pkt->req);
 
     if (policy) {
-        return schedule(pkt->req->masterId(), pkt->getSize());
+        return schedule(pkt->req->requestorId(), pkt->getSize());
     } else {
         DPRINTF(QOS, "QoSScheduler::schedule Packet received [Qv %d], "
                 "but QoS scheduler not initialized\n",
@@ -261,16 +261,16 @@
 }
 
 void
-MemCtrl::addMaster(MasterID m_id)
+MemCtrl::addRequestor(RequestorID id)
 {
-    if (!hasMaster(m_id)) {
-        masters.emplace(m_id, _system->getMasterName(m_id));
-        packetPriorities[m_id].resize(numPriorities(), 0);
+    if (!hasRequestor(id)) {
+        requestors.emplace(id, _system->getRequestorName(id));
+        packetPriorities[id].resize(numPriorities(), 0);
 
         DPRINTF(QOS,
-                "QoSMemCtrl::addMaster registering"
-                " Master %s [id %d]\n",
-                masters[m_id], m_id);
+                "QoSMemCtrl::addRequestor registering"
+                " Requestor %s [id %d]\n",
+                requestors[id], id);
     }
 }
 
@@ -307,18 +307,18 @@
     using namespace Stats;
 
     System *system = memCtrl._system;
-    const auto max_masters = system->maxMasters();
+    const auto max_requestors = system->maxRequestors();
     const auto num_priorities = memCtrl.numPriorities();
 
-    // Initializes per master statistics
+    // Initializes per requestor statistics
     avgPriority
-        .init(max_masters)
+        .init(max_requestors)
         .flags(nozero | nonan)
         .precision(2)
         ;
 
     avgPriorityDistance
-        .init(max_masters)
+        .init(max_requestors)
         .flags(nozero | nonan)
         ;
 
@@ -332,10 +332,10 @@
         .precision(12)
         ;
 
-    for (int i = 0; i < max_masters; i++) {
-        const std::string master = system->getMasterName(i);
-        avgPriority.subname(i, master);
-        avgPriorityDistance.subname(i, master);
+    for (int i = 0; i < max_requestors; i++) {
+        const std::string name = system->getRequestorName(i);
+        avgPriority.subname(i, name);
+        avgPriorityDistance.subname(i, name);
     }
 
     for (int j = 0; j < num_priorities; ++j) {
diff --git a/src/mem/qos/mem_ctrl.hh b/src/mem/qos/mem_ctrl.hh
index 5d7c9d6..d472f20 100644
--- a/src/mem/qos/mem_ctrl.hh
+++ b/src/mem/qos/mem_ctrl.hh
@@ -80,18 +80,18 @@
 
     /**
      * Enables QoS synchronized scheduling invokes the QoS scheduler
-     * on all masters, at every packet arrival.
+     * on all requestors, at every packet arrival.
      */
     const bool qosSyncroScheduler;
 
-    /** Hash of master ID - master name */
-    std::unordered_map<MasterID, const std::string> masters;
+    /** Hash of requestor ID - requestor name */
+    std::unordered_map<RequestorID, const std::string> requestors;
 
-    /** Hash of masters - number of packets queued per priority */
-    std::unordered_map<MasterID, std::vector<uint64_t> > packetPriorities;
+    /** Hash of requestors - number of packets queued per priority */
+    std::unordered_map<RequestorID, std::vector<uint64_t> > packetPriorities;
 
-    /** Hash of masters - address of request - queue of times of request */
-    std::unordered_map<MasterID,
+    /** Hash of requestors - address of request - queue of times of request */
+    std::unordered_map<RequestorID,
             std::unordered_map<uint64_t, std::deque<uint64_t>> > requestTimes;
 
     /**
@@ -129,10 +129,10 @@
 
         const MemCtrl &memCtrl;
 
-        /** per-master average QoS priority */
+        /** per-requestor average QoS priority */
         Stats::VectorStandardDeviation avgPriority;
         /**
-         * per-master average QoS distance between assigned and
+         * per-requestor average QoS distance between assigned and
          * queued values
          */
         Stats::VectorStandardDeviation avgPriorityDistance;
@@ -156,23 +156,23 @@
 
     /**
      * Initializes dynamically counters and
-     * statistics for a given Master
+     * statistics for a given Requestor
      *
-     * @param m_id the master ID
+     * @param id the requestor's ID
      */
-    void addMaster(const MasterID m_id);
+    void addRequestor(const RequestorID id);
 
     /**
      * Called upon receiving a request or
      * updates statistics and updates queues status
      *
      * @param dir request direction
-     * @param m_id master id
+     * @param id requestor id
      * @param qos packet qos value
      * @param addr packet address
      * @param entries number of entries to record
      */
-    void logRequest(BusState dir, MasterID m_id, uint8_t qos,
+    void logRequest(BusState dir, RequestorID id, uint8_t qos,
                     Addr addr, uint64_t entries);
 
     /**
@@ -180,13 +180,13 @@
      * updates statistics and updates queues status
      *
      * @param dir response direction
-     * @param m_id master id
+     * @param id requestor id
      * @param qos packet qos value
      * @param addr packet address
      * @param entries number of entries to record
      * @param delay response delay
      */
-    void logResponse(BusState dir, MasterID m_id, uint8_t qos,
+    void logResponse(BusState dir, RequestorID id, uint8_t qos,
                      Addr addr, uint64_t entries, double delay);
 
     /**
@@ -203,7 +203,7 @@
                         uint64_t queue_entry_size, const PacketPtr pkt);
 
     using SimObject::schedule;
-    uint8_t schedule(MasterID m_id, uint64_t data);
+    uint8_t schedule(RequestorID id, uint64_t data);
     uint8_t schedule(const PacketPtr pkt);
 
     /**
@@ -226,22 +226,22 @@
 
     /**
      * Escalates/demotes priority of all packets
-     * belonging to the passed master to given
+     * belonging to the passed requestor to given
      * priority value
      *
      * @param queues list of pointers to packet queues
      * @param queue_entry_size size of an entry in the queue
-     * @param m_id master whose packets priority will change
+     * @param id requestor whose packets priority will change
      * @param tgt_prio target priority value
      */
     template<typename Queues>
     void escalate(std::initializer_list<Queues*> queues,
                   uint64_t queue_entry_size,
-                  MasterID m_id, uint8_t tgt_prio);
+                  RequestorID id, uint8_t tgt_prio);
 
     /**
      * Escalates/demotes priority of all packets
-     * belonging to the passed master to given
+     * belonging to the passed requestor to given
      * priority value in a specified cluster of queues
      * (e.g. read queues or write queues) which is passed
      * as an argument to the function.
@@ -250,13 +250,13 @@
      *
      * @param queues reference to packet queues
      * @param queue_entry_size size of an entry in the queue
-     * @param m_id master whose packets priority will change
+     * @param id requestor whose packets priority will change
      * @param curr_prio source queue priority value
      * @param tgt_prio target queue priority value
      */
     template<typename Queues>
     void escalateQueues(Queues& queues, uint64_t queue_entry_size,
-                        MasterID m_id, uint8_t curr_prio, uint8_t tgt_prio);
+                        RequestorID id, uint8_t curr_prio, uint8_t tgt_prio);
 
   public:
     /**
@@ -283,18 +283,18 @@
     BusState getBusStateNext() const { return busStateNext; }
 
     /**
-     * hasMaster returns true if the selected master(ID) has
+     * hasRequestor returns true if the selected requestor(ID) has
      * been registered in the memory controller, which happens if
      * the memory controller has received at least a packet from
-     * that master.
+     * that requestor.
      *
-     * @param m_id master id to lookup
+     * @param id requestor id to lookup
      * @return true if the memory controller has received a packet
-     *         from the master, false otherwise.
+     *         from the requestor, false otherwise.
      */
-    bool hasMaster(MasterID m_id) const
+    bool hasRequestor(RequestorID id) const
     {
-        return masters.find(m_id) != masters.end();
+        return requestors.find(id) != requestors.end();
     }
 
     /**
@@ -353,50 +353,50 @@
 template<typename Queues>
 void
 MemCtrl::escalateQueues(Queues& queues, uint64_t queue_entry_size,
-                        MasterID m_id, uint8_t curr_prio, uint8_t tgt_prio)
+                        RequestorID id, uint8_t curr_prio, uint8_t tgt_prio)
 {
     auto it = queues[curr_prio].begin();
     while (it != queues[curr_prio].end()) {
         // No packets left to move
-        if (packetPriorities[m_id][curr_prio] == 0)
+        if (packetPriorities[id][curr_prio] == 0)
             break;
 
         auto pkt = *it;
 
         DPRINTF(QOS,
                 "QoSMemCtrl::escalate checking priority %d packet "
-                "m_id %d address %d\n", curr_prio,
-                pkt->masterId(), pkt->getAddr());
+                "id %d address %d\n", curr_prio,
+                pkt->requestorId(), pkt->getAddr());
 
         // Found a packet to move
-        if (pkt->masterId() == m_id) {
+        if (pkt->requestorId() == id) {
 
             uint64_t moved_entries = divCeil(pkt->getSize(),
                                              queue_entry_size);
 
             DPRINTF(QOS,
-                    "QoSMemCtrl::escalate Master %s [id %d] moving "
+                    "QoSMemCtrl::escalate Requestor %s [id %d] moving "
                     "packet addr %d size %d (p size %d) from priority %d "
                     "to priority %d - "
-                    "this master packets %d (entries to move %d)\n",
-                    masters[m_id], m_id, pkt->getAddr(),
+                    "this requestor packets %d (entries to move %d)\n",
+                    requestors[id], id, pkt->getAddr(),
                     pkt->getSize(),
                     queue_entry_size, curr_prio, tgt_prio,
-                    packetPriorities[m_id][curr_prio], moved_entries);
+                    packetPriorities[id][curr_prio], moved_entries);
 
 
             if (pkt->isRead()) {
                 panic_if(readQueueSizes[curr_prio] < moved_entries,
-                         "QoSMemCtrl::escalate master %s negative READ "
+                         "QoSMemCtrl::escalate requestor %s negative READ "
                          "packets for priority %d",
-                        masters[m_id], tgt_prio);
+                        requestors[id], tgt_prio);
                 readQueueSizes[curr_prio] -= moved_entries;
                 readQueueSizes[tgt_prio] += moved_entries;
             } else if (pkt->isWrite()) {
                 panic_if(writeQueueSizes[curr_prio] < moved_entries,
-                         "QoSMemCtrl::escalate master %s negative WRITE "
+                         "QoSMemCtrl::escalate requestor %s negative WRITE "
                          "packets for priority %d",
-                        masters[m_id], tgt_prio);
+                        requestors[id], tgt_prio);
                 writeQueueSizes[curr_prio] -= moved_entries;
                 writeQueueSizes[tgt_prio] += moved_entries;
             }
@@ -408,13 +408,13 @@
             // Erase element from source packet queue, this will
             // increment the iterator
             it = queues[curr_prio].erase(it);
-            panic_if(packetPriorities[m_id][curr_prio] < moved_entries,
-                     "QoSMemCtrl::escalate master %s negative packets "
+            panic_if(packetPriorities[id][curr_prio] < moved_entries,
+                     "QoSMemCtrl::escalate requestor %s negative packets "
                      "for priority %d",
-                     masters[m_id], tgt_prio);
+                     requestors[id], tgt_prio);
 
-            packetPriorities[m_id][curr_prio] -= moved_entries;
-            packetPriorities[m_id][tgt_prio] += moved_entries;
+            packetPriorities[id][curr_prio] -= moved_entries;
+            packetPriorities[id][tgt_prio] += moved_entries;
         } else {
             // Increment iterator to next location in the queue
             it++;
@@ -426,16 +426,16 @@
 void
 MemCtrl::escalate(std::initializer_list<Queues*> queues,
                   uint64_t queue_entry_size,
-                  MasterID m_id, uint8_t tgt_prio)
+                  RequestorID id, uint8_t tgt_prio)
 {
     // If needed, initialize all counters and statistics
-    // for this master
-    addMaster(m_id);
+    // for this requestor
+    addRequestor(id);
 
     DPRINTF(QOS,
-            "QoSMemCtrl::escalate Master %s [id %d] to priority "
-            "%d (currently %d packets)\n",masters[m_id], m_id, tgt_prio,
-            packetPriorities[m_id][tgt_prio]);
+            "QoSMemCtrl::escalate Requestor %s [id %d] to priority "
+            "%d (currently %d packets)\n",requestors[id], id, tgt_prio,
+            packetPriorities[id][tgt_prio]);
 
     for (uint8_t curr_prio = 0; curr_prio < numPriorities(); ++curr_prio) {
         // Skip target priority
@@ -443,30 +443,30 @@
             continue;
 
         // Process other priority packet
-        while (packetPriorities[m_id][curr_prio] > 0) {
+        while (packetPriorities[id][curr_prio] > 0) {
             DPRINTF(QOS,
                     "QoSMemCtrl::escalate MID %d checking priority %d "
                     "(packets %d)- current packets in prio %d:  %d\n"
                     "\t(source read %d source write %d target read %d, "
                     "target write %d)\n",
-                    m_id, curr_prio, packetPriorities[m_id][curr_prio],
-                    tgt_prio, packetPriorities[m_id][tgt_prio],
+                    id, curr_prio, packetPriorities[id][curr_prio],
+                    tgt_prio, packetPriorities[id][tgt_prio],
                     readQueueSizes[curr_prio],
                     writeQueueSizes[curr_prio], readQueueSizes[tgt_prio],
                     writeQueueSizes[tgt_prio]);
 
             // Check both read and write queue
             for (auto q : queues) {
-                escalateQueues(*q, queue_entry_size, m_id,
+                escalateQueues(*q, queue_entry_size, id,
                                curr_prio, tgt_prio);
             }
         }
     }
 
     DPRINTF(QOS,
-            "QoSMemCtrl::escalate Completed master %s [id %d] to priority %d "
-            "(now %d packets)\n\t(total read %d, total write %d)\n",
-            masters[m_id], m_id, tgt_prio, packetPriorities[m_id][tgt_prio],
+            "QoSMemCtrl::escalate Completed requestor %s [id %d] to priority "
+            "%d (now %d packets)\n\t(total read %d, total write %d)\n",
+            requestors[id], id, tgt_prio, packetPriorities[id][tgt_prio],
             readQueueSizes[tgt_prio], writeQueueSizes[tgt_prio]);
 }
 
@@ -484,21 +484,21 @@
     pkt->qosValue(pkt_priority);
 
     if (qosSyncroScheduler) {
-        // Call the scheduling function on all other masters.
-        for (const auto& m : masters) {
+        // Call the scheduling function on all other requestors.
+        for (const auto& requestor : requestors) {
 
-            if (m.first == pkt->masterId())
+            if (requestor.first == pkt->requestorId())
                 continue;
 
-            uint8_t prio = schedule(m.first, 0);
+            uint8_t prio = schedule(requestor.first, 0);
 
             if (qosPriorityEscalation) {
                 DPRINTF(QOS,
                         "QoSMemCtrl::qosSchedule: (syncro) escalating "
-                        "MASTER %s to assigned priority %d\n",
-                        _system->getMasterName(m.first),
+                        "REQUESTOR %s to assigned priority %d\n",
+                        _system->getRequestorName(requestor.first),
                         prio);
-                escalate(queues, queue_entry_size, m.first, prio);
+                escalate(queues, queue_entry_size, requestor.first, prio);
             }
         }
     }
@@ -506,10 +506,10 @@
     if (qosPriorityEscalation) {
         DPRINTF(QOS,
                 "QoSMemCtrl::qosSchedule: escalating "
-                "MASTER %s to assigned priority %d\n",
-                _system->getMasterName(pkt->masterId()),
+                "REQUESTOR %s to assigned priority %d\n",
+                _system->getRequestorName(pkt->requestorId()),
                 pkt_priority);
-        escalate(queues, queue_entry_size, pkt->masterId(), pkt_priority);
+        escalate(queues, queue_entry_size, pkt->requestorId(), pkt_priority);
     }
 
     // Update last service tick for selected priority
diff --git a/src/mem/qos/mem_sink.cc b/src/mem/qos/mem_sink.cc
index dbdf548..e931429 100644
--- a/src/mem/qos/mem_sink.cc
+++ b/src/mem/qos/mem_sink.cc
@@ -136,9 +136,9 @@
              __func__);
 
     DPRINTF(QOS,
-            "%s: MASTER %s request %s addr %lld size %d\n",
+            "%s: REQUESTOR %s request %s addr %lld size %d\n",
             __func__,
-            _system->getMasterName(pkt->req->masterId()),
+            _system->getRequestorName(pkt->req->requestorId()),
             pkt->cmdString(), pkt->getAddr(), pkt->getSize());
 
     uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
@@ -182,7 +182,7 @@
     if (req_accepted) {
         // The packet is accepted - log it
         logRequest(pkt->isRead()? READ : WRITE,
-                   pkt->req->masterId(),
+                   pkt->req->requestorId(),
                    pkt->qosValue(),
                    pkt->getAddr(),
                    required_entries);
@@ -225,7 +225,7 @@
         for (uint8_t i = 0; i < numPriorities(); ++i) {
             std::string plist = "";
             for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) {
-                plist += (std::to_string(e->req->masterId())) + " ";
+                plist += (std::to_string(e->req->requestorId())) + " ";
             }
             DPRINTF(QOS,
                     "%s priority Queue [%i] contains %i elements, "
@@ -255,9 +255,9 @@
             queue->erase(p_it);
 
             DPRINTF(QOS,
-                    "%s scheduling packet address %d for master %s from "
+                    "%s scheduling packet address %d for requestor %s from "
                     "priority queue %d\n", __func__, pkt->getAddr(),
-                    _system->getMasterName(pkt->req->masterId()),
+                    _system->getRequestorName(pkt->req->requestorId()),
                     curr_prio);
             break;
         }
@@ -272,9 +272,9 @@
     uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize);
 
     DPRINTF(QOS,
-            "%s scheduled packet address %d for master %s size is %d, "
+            "%s scheduled packet address %d for requestor %s size is %d, "
             "corresponds to %d memory packets\n", __func__, pkt->getAddr(),
-            _system->getMasterName(pkt->req->masterId()),
+            _system->getRequestorName(pkt->req->requestorId()),
             pkt->getSize(), removed_entries);
 
     // Schedule response
@@ -287,7 +287,7 @@
 
     // Log the response
     logResponse(pkt->isRead()? READ : WRITE,
-                pkt->req->masterId(),
+                pkt->req->requestorId(),
                 pkt->qosValue(),
                 pkt->getAddr(),
                 removed_entries, responseLatency);
@@ -348,7 +348,8 @@
 
 MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n,
                                     MemSinkCtrl& m)
-  : QueuedSlavePort(n, &m, queue, true), memory(m), queue(memory, *this, true)
+  : QueuedResponsePort(n, &m, queue, true),
+   memory(m), queue(memory, *this, true)
 {}
 
 AddrRangeList
diff --git a/src/mem/qos/mem_sink.hh b/src/mem/qos/mem_sink.hh
index 5f6c1be..93783ae 100644
--- a/src/mem/qos/mem_sink.hh
+++ b/src/mem/qos/mem_sink.hh
@@ -68,7 +68,7 @@
     using PacketQueue = std::deque<PacketPtr>;
 
   private:
-    class MemoryPort : public QueuedSlavePort
+    class MemoryPort : public QueuedResponsePort
     {
       private:
         /** reference to parent memory object */
@@ -135,11 +135,11 @@
     DrainState drain() override;
 
     /**
-     * Getter method to access this memory's slave port
+     * Getter method to access this memory's response port
      *
      * @param if_name interface name
      * @param idx port ID number
-     * @return reference to this memory's slave port
+     * @return reference to this memory's response port
      */
     Port &getPort(const std::string &if_name, PortID=InvalidPortID) override;
 
@@ -164,7 +164,7 @@
     /** Write request packets queue buffer size in #packets */
     const uint64_t writeBufferSize;
 
-    /** Memory slave port */
+    /** Memory response port */
     MemoryPort port;
 
     /**
diff --git a/src/mem/qos/policy.cc b/src/mem/qos/policy.cc
index b5431d2..93c841d 100644
--- a/src/mem/qos/policy.cc
+++ b/src/mem/qos/policy.cc
@@ -51,7 +51,7 @@
 Policy::schedule(const PacketPtr pkt)
 {
     assert(pkt->req);
-    return schedule(pkt->req->masterId(), pkt->getSize());
+    return schedule(pkt->req->requestorId(), pkt->getSize());
 }
 
 } // namespace QoS
diff --git a/src/mem/qos/policy.hh b/src/mem/qos/policy.hh
index 3ffe481..7bf6c12 100644
--- a/src/mem/qos/policy.hh
+++ b/src/mem/qos/policy.hh
@@ -74,31 +74,32 @@
     void setMemCtrl(MemCtrl* mem) { memCtrl = mem; };
 
     /**
-     * Builds a MasterID/value pair given a master input.
-     * This will be lookuped in the system list of masters in order
-     * to retrieve the associated MasterID.
-     * In case the master name/object cannot be resolved, the pairing
+     * Builds a RequestorID/value pair given a requestor input.
+     * This will be looked up in the system list of requestors in order
+     * to retrieve the associated RequestorID.
+     * In case the requestor name/object cannot be resolved, the pairing
      * method will panic.
      *
-     * @param master Master to lookup in the system
-     * @param value Value to be associated with the MasterID
-     * @return A MasterID/Value pair.
+     * @param requestor Requestor to lookup in the system
+     * @param value Value to be associated with the RequestorID
+     * @return A RequestorID/Value pair.
      */
-    template <typename M, typename T>
-    std::pair<MasterID, T> pair(M master, T value);
+    template <typename Requestor, typename T>
+    std::pair<RequestorID, T> pair(Requestor requestor, T value);
 
     /**
      * Schedules data - must be defined by derived class
      *
-     * @param mId master id to schedule
+     * @param requestor_id requestor id to schedule
      * @param data data to schedule
      * @return QoS priority value
      */
-    virtual uint8_t schedule(const MasterID mId, const uint64_t data) = 0;
+    virtual uint8_t schedule(const RequestorID requestor_id,
+                              const uint64_t data) = 0;
 
     /**
      * Schedules a packet. Non virtual interface for the scheduling
-     * method requiring a master ID.
+     * method requiring a requestor id.
      *
      * @param pkt pointer to packet to schedule
      * @return QoS priority value
@@ -110,20 +111,20 @@
     MemCtrl* memCtrl;
 };
 
-template <typename M, typename T>
-std::pair<MasterID, T>
-Policy::pair(M master, T value)
+template <typename Requestor, typename T>
+std::pair<RequestorID, T>
+Policy::pair(Requestor requestor, T value)
 {
-    auto id = memCtrl->system()->lookupMasterId(master);
+    auto id = memCtrl->system()->lookupRequestorId(requestor);
 
-    panic_if(id == Request::invldMasterId,
-             "Unable to find master %s\n", master);
+    panic_if(id == Request::invldRequestorId,
+             "Unable to find requestor %s\n", requestor);
 
     DPRINTF(QOS,
-            "Master %s [id %d] associated with QoS data %d\n",
-            master, id, value);
+            "Requestor %s [id %d] associated with QoS data %d\n",
+            requestor, id, value);
 
-    return std::pair<MasterID, T>(id, value);
+    return std::pair<RequestorID, T>(id, value);
 }
 
 } // namespace QoS
diff --git a/src/mem/qos/policy_fixed_prio.cc b/src/mem/qos/policy_fixed_prio.cc
index 449a25d..d00048b 100644
--- a/src/mem/qos/policy_fixed_prio.cc
+++ b/src/mem/qos/policy_fixed_prio.cc
@@ -59,35 +59,36 @@
 }
 
 void
-FixedPriorityPolicy::initMasterName(std::string master, uint8_t priority)
+FixedPriorityPolicy::initRequestorName(std::string requestor, uint8_t priority)
 {
     priorityMap.insert(
-        this->pair<std::string, uint8_t>(master, priority));
+        this->pair<std::string, uint8_t>(requestor, priority));
 }
 
 void
-FixedPriorityPolicy::initMasterObj(const SimObject* master, uint8_t priority)
+FixedPriorityPolicy::initRequestorObj(const SimObject* requestor,
+                                   uint8_t priority)
 {
     priorityMap.insert(
-        this->pair<const SimObject*, uint8_t>(master, priority));
+        this->pair<const SimObject*, uint8_t>(requestor, priority));
 }
 
 uint8_t
-FixedPriorityPolicy::schedule(const MasterID mId, const uint64_t data)
+FixedPriorityPolicy::schedule(const RequestorID id, const uint64_t data)
 {
-    // Reads a packet's MasterID contained in its encapsulated request
+    // Reads a packet's RequestorID contained in its encapsulated request
     // if a match is found in the configured priority map, returns the
     // matching priority, else returns zero
 
-    auto ret = priorityMap.find(mId);
+    auto ret = priorityMap.find(id);
 
     if (ret != priorityMap.end()) {
         return ret->second;
     } else {
-        DPRINTF(QOS, "Master %s (MasterID %d) not present in priorityMap, "
-                     "assigning default priority %d\n",
-                      memCtrl->system()->getMasterName(mId),
-                      mId, defaultPriority);
+        DPRINTF(QOS, "Requestor %s (RequestorID %d) not present in "
+                     "priorityMap, assigning default priority %d\n",
+                      memCtrl->system()->getRequestorName(id),
+                      id, defaultPriority);
         return defaultPriority;
     }
 }
diff --git a/src/mem/qos/policy_fixed_prio.hh b/src/mem/qos/policy_fixed_prio.hh
index 08f46a4..b25c34b 100644
--- a/src/mem/qos/policy_fixed_prio.hh
+++ b/src/mem/qos/policy_fixed_prio.hh
@@ -48,8 +48,8 @@
 /**
  * Fixed Priority QoS Policy
  *
- * Fixed Priority Policy: based on a configured MasterID to priority map,
- * it returns a fixed QoS priority value: every master has a fixed priority.
+ * Fixed Priority Policy: based on a configured RequestorID to priority map,
+ * it returns a fixed QoS priority value: every requestor has a fixed priority.
  */
 class FixedPriorityPolicy : public Policy
 {
@@ -62,42 +62,42 @@
     void init() override;
 
     /**
-     * Initialize the fixed master's priority by providing
-     * the master's name and priority value.
-     * The master's name has to match a name in the system.
+     * Initialize the fixed requestor's priority by providing
+     * the requestor's name and priority value.
+     * The requestor's name has to match a name in the system.
      *
-     * @param master master's name to lookup.
-     * @param priority priority value for the master
+     * @param requestor requestor's name to lookup.
+     * @param priority priority value for the requestor
      */
-    void initMasterName(std::string master, uint8_t priority);
+    void initRequestorName(std::string requestor, uint8_t priority);
 
     /**
-     * Initialize the fixed master's priority by providing
-     * the master's SimObject pointer and priority value.
+     * Initialize the fixed requestor's priority by providing
+     * the requestor's SimObject pointer and priority value.
      *
-     * @param master master's SimObject pointer to lookup.
-     * @param priority priority value for the master
+     * @param requestor requestor's SimObject pointer to lookup.
+     * @param priority priority value for the requestor
      */
-    void initMasterObj(const SimObject* master, uint8_t priority);
+    void initRequestorObj(const SimObject* requestor, uint8_t priority);
 
     /**
      * Schedules a packet based on fixed priority configuration
      *
-     * @param mId master id to schedule
+     * @param id requestor id to schedule
      * @param data data to schedule
      * @return QoS priority value
      */
-    virtual uint8_t schedule(const MasterID, const uint64_t) override;
+    virtual uint8_t schedule(const RequestorID, const uint64_t) override;
 
   protected:
-    /** Default fixed priority value for non-listed masters */
+    /** Default fixed priority value for non-listed requestors */
     const uint8_t defaultPriority;
 
     /**
-     * Priority map, associates configured masters with
+     * Priority map, associates configured requestors with
      * a fixed QoS priority value
      */
-    std::map<MasterID, uint8_t> priorityMap;
+    std::map<RequestorID, uint8_t> priorityMap;
 };
 
 } // namespace QoS
diff --git a/src/mem/qos/policy_pf.cc b/src/mem/qos/policy_pf.cc
index 58c4e38..47e2096 100644
--- a/src/mem/qos/policy_pf.cc
+++ b/src/mem/qos/policy_pf.cc
@@ -53,32 +53,32 @@
 PropFairPolicy::~PropFairPolicy()
 {}
 
-template <typename Master>
+template <typename Requestor>
 void
-PropFairPolicy::initMaster(const Master master, const double score)
+PropFairPolicy::initRequestor(const Requestor requestor, const double score)
 {
-    MasterID m_id = memCtrl->system()->lookupMasterId(master);
+    RequestorID id = memCtrl->system()->lookupRequestorId(requestor);
 
-    assert(m_id != Request::invldMasterId);
+    assert(id != Request::invldRequestorId);
 
-    // Setting the Initial score for the selected master.
-    history.push_back(std::make_pair(m_id, score));
+    // Setting the Initial score for the selected requestor.
+    history.push_back(std::make_pair(id, score));
 
     fatal_if(history.size() > memCtrl->numPriorities(),
-        "Policy's maximum number of masters is currently dictated "
+        "Policy's maximum number of requestors is currently dictated "
         "by the maximum number of priorities\n");
 }
 
 void
-PropFairPolicy::initMasterName(const std::string master, const double score)
+PropFairPolicy::initRequestorName(const std::string requestor, const double score)
 {
-    initMaster(master, score);
+    initRequestor(requestor, score);
 }
 
 void
-PropFairPolicy::initMasterObj(const SimObject* master, const double score)
+PropFairPolicy::initRequestorObj(const SimObject* requestor, const double score)
 {
-    initMaster(master, score);
+    initRequestor(requestor, score);
 }
 
 double
@@ -89,10 +89,10 @@
 }
 
 uint8_t
-PropFairPolicy::schedule(const MasterID pkt_mid, const uint64_t pkt_size)
+PropFairPolicy::schedule(const RequestorID pkt_id, const uint64_t pkt_size)
 {
     auto sort_pred =
-    [] (const MasterHistory& lhs, const MasterHistory& rhs)
+    [] (const RequestorHistory& lhs, const RequestorHistory& rhs)
     { return lhs.second > rhs.second; };
 
     // Sorting in reverse in base of personal history:
@@ -105,10 +105,10 @@
     uint8_t pkt_priority = 0;
     for (auto m_hist = history.begin(); m_hist != history.end(); m_hist++) {
 
-        MasterID curr_mid = m_hist->first;
+        RequestorID curr_id = m_hist->first;
         double& curr_score = m_hist->second;
 
-        if (curr_mid == pkt_mid) {
+        if (curr_id == pkt_id) {
             // The qos priority is the position in the sorted vector.
             pkt_priority = std::distance(history.begin(), m_hist);
 
diff --git a/src/mem/qos/policy_pf.hh b/src/mem/qos/policy_pf.hh
index c741e9c..429e85b 100644
--- a/src/mem/qos/policy_pf.hh
+++ b/src/mem/qos/policy_pf.hh
@@ -67,38 +67,38 @@
     virtual ~PropFairPolicy();
 
     /**
-     * Initialize the master's score by providing
-     * the master's name and initial score value.
-     * The master's name has to match a name in the system.
+     * Initialize the requestor's score by providing
+     * the requestor's name and initial score value.
+     * The requestor's name has to match a name in the system.
      *
-     * @param master master's name to lookup.
-     * @param score initial score value for the master
+     * @param requestor requestor's name to lookup.
+     * @param score initial score value for the requestor
      */
-    void initMasterName(const std::string master, const double score);
+    void initRequestorName(const std::string requestor, const double score);
 
     /**
-     * Initialize the master's score by providing
-     * the master's SimObject pointer and initial score value.
-     * The master's pointer has to match a master in the system.
+     * Initialize the requestor's score by providing
+     * the requestor's SimObject pointer and initial score value.
+     * The requestor's pointer has to match a requestor in the system.
      *
-     * @param master master's SimObject pointer to lookup.
-     * @param score initial score value for the master
+     * @param requestor requestor's SimObject pointer to lookup.
+     * @param score initial score value for the requestor
      */
-    void initMasterObj(const SimObject* master, const double score);
+    void initRequestorObj(const SimObject* requestor, const double score);
 
     /**
      * Schedules a packet based on proportional fair configuration
      *
-     * @param m_id master id to schedule
+     * @param id requestor id to schedule
      * @param pkt_size size of the packet
      * @return QoS priority value
      */
     virtual uint8_t
-    schedule(const MasterID m_id, const uint64_t pkt_size) override;
+    schedule(const RequestorID id, const uint64_t pkt_size) override;
 
   protected:
-    template <typename Master>
-    void initMaster(const Master master, const double score);
+    template <typename Requestor>
+    void initRequestor(const Requestor requestor, const double score);
 
     inline double
     updateScore(const double old_score, const uint64_t served_bytes) const;
@@ -107,9 +107,9 @@
     /** PF Policy weight */
     const double weight;
 
-    /** history is keeping track of every master's score */
-    using MasterHistory = std::pair<MasterID, double>;
-    std::vector<MasterHistory> history;
+    /** history is keeping track of every requestor's score */
+    using RequestorHistory = std::pair<RequestorID, double>;
+    std::vector<RequestorHistory> history;
 };
 
 } // namespace QoS
diff --git a/src/mem/qos/q_policy.cc b/src/mem/qos/q_policy.cc
index 88ce95d..df16c51 100644
--- a/src/mem/qos/q_policy.cc
+++ b/src/mem/qos/q_policy.cc
@@ -67,8 +67,8 @@
 {
     QueuePolicy::PacketQueue::iterator ret = q->end();
 
-    // Tracks one packet per master in the queue
-    std::unordered_map<MasterID, QueuePolicy::PacketQueue::iterator> track;
+    // Tracks one packet per requestor in the queue
+    std::unordered_map<RequestorID, QueuePolicy::PacketQueue::iterator> track;
 
     // Cycle queue only once
     for (auto pkt_it = q->begin(); pkt_it != q->end(); ++pkt_it) {
@@ -78,51 +78,51 @@
         panic_if(!pkt->req,
                  "QoSQPolicy::lrg detected packet without request");
 
-        // Get Request MasterID
-        MasterID m_id = pkt->req->masterId();
+        // Get Request RequestorID
+        RequestorID requestor_id = pkt->req->requestorId();
         DPRINTF(QOS, "QoSQPolicy::lrg checking packet "
-                     "from queue with id %d\n", m_id);
+                     "from queue with id %d\n", requestor_id);
 
-        // Check if this is a known master.
-        panic_if(memCtrl->hasMaster(m_id),
-                 "%s: Unrecognized Master\n", __func__);
+        // Check if this is a known requestor.
+        panic_if(memCtrl->hasRequestor(requestor_id),
+                 "%s: Unrecognized Requestor\n", __func__);
 
         panic_if(toServe.size() > 0,
                  "%s: toServe list is empty\n", __func__);
 
-        if (toServe.front() == m_id) {
+        if (toServe.front() == requestor_id) {
             DPRINTF(QOS, "QoSQPolicy::lrg matched to served "
-                         "master id %d\n", m_id);
-            // This packet matches the MasterID to be served next
+                         "requestor id %d\n", requestor_id);
+            // This packet matches the RequestorID to be served next
             // move toServe front to back
-            toServe.push_back(m_id);
+            toServe.push_back(requestor_id);
             toServe.pop_front();
 
             return pkt_it;
         }
 
-        // The master generating the packet is not first in the toServe list
-        // (Doesn't have the highest priority among masters)
-        // Check if this is the first packet seen with its master ID
+        // The requestor generating the packet is not first in the toServe list
+        // (Doesn't have the highest priority among requestors)
+        // Check if this is the first packet seen with its requestor id
         // and remember it. Then keep looping over the remaining packets
         // in the queue.
-        if (track.find(m_id) == track.end()) {
-            track[m_id] = pkt_it;
+        if (track.find(requestor_id) == track.end()) {
+            track[requestor_id] = pkt_it;
             DPRINTF(QOS, "QoSQPolicy::lrg tracking a packet for "
-                         "master id %d\n", m_id);
+                         "requestor id %d\n", requestor_id);
         }
     }
 
-    // If here, the current master to be serviced doesn't have a pending
-    // packet in the queue: look for the next master in the list.
-    for (const auto& masterId : toServe) {
+    // If here, the current requestor to be serviced doesn't have a pending
+    // packet in the queue: look for the next requestor in the list.
+    for (const auto& requestorId : toServe) {
         DPRINTF(QOS, "QoSQPolicy::lrg evaluating alternative "
-                     "master id %d\n", masterId);
+                     "requestor id %d\n", requestorId);
 
-        if (track.find(masterId) != track.end()) {
-            ret = track[masterId];
-            DPRINTF(QOS, "QoSQPolicy::lrg master id "
-                         "%d selected for service\n", masterId);
+        if (track.find(requestorId) != track.end()) {
+            ret = track[requestorId];
+            DPRINTF(QOS, "QoSQPolicy::lrg requestor id "
+                         "%d selected for service\n", requestorId);
 
             return ret;
         }
@@ -138,9 +138,9 @@
 void
 LrgQueuePolicy::enqueuePacket(PacketPtr pkt)
 {
-    MasterID m_id = pkt->masterId();
-    if (!memCtrl->hasMaster(m_id)) {
-        toServe.push_back(m_id);
+    RequestorID requestor_id = pkt->requestorId();
+    if (!memCtrl->hasRequestor(requestor_id)) {
+        toServe.push_back(requestor_id);
     }
 };
 
diff --git a/src/mem/qos/q_policy.hh b/src/mem/qos/q_policy.hh
index ba36b43..f932b91 100644
--- a/src/mem/qos/q_policy.hh
+++ b/src/mem/qos/q_policy.hh
@@ -155,7 +155,7 @@
 /**
  * Least Recently Granted Queue Policy
  * It selects packets from the queue with a round
- * robin-like policy: using the master id as a switching
+ * robin-like policy: using the requestor id as a switching
  * parameter rather than switching over a time quantum.
  */
 class LrgQueuePolicy : public QueuePolicy
@@ -179,10 +179,10 @@
   protected:
     /**
      * Support structure for lrg algorithms:
-     * keeps track of serviced masters,
+     * keeps track of serviced requestors,
      * always serve the front element.
      */
-    std::list<MasterID> toServe;
+    std::list<RequestorID> toServe;
 };
 
 } // namespace QoS
diff --git a/src/mem/qport.hh b/src/mem/qport.hh
index 97a2065..a70fbd3 100644
--- a/src/mem/qport.hh
+++ b/src/mem/qport.hh
@@ -51,11 +51,11 @@
  * A queued port is a port that has an infinite queue for outgoing
  * packets and thus decouples the module that wants to send
  * request/responses from the flow control (retry mechanism) of the
- * port. A queued port can be used by both a master and a slave. The
+ * port. A queued port can be used by both a requestor and a responder. The
  * queue is a parameter to allow tailoring of the queue implementation
  * (used in the cache).
  */
-class QueuedSlavePort : public ResponsePort
+class QueuedResponsePort : public ResponsePort
 {
 
   protected:
@@ -74,12 +74,12 @@
      * behaviuor in a subclass, and provide the latter to the
      * QueuePort constructor.
      */
-    QueuedSlavePort(const std::string& name, SimObject* owner,
+    QueuedResponsePort(const std::string& name, SimObject* owner,
                     RespPacketQueue &resp_queue, PortID id = InvalidPortID) :
         ResponsePort(name, owner, id), respQueue(resp_queue)
     { }
 
-    virtual ~QueuedSlavePort() { }
+    virtual ~QueuedResponsePort() { }
 
     /**
      * Schedule the sending of a timing response.
@@ -97,13 +97,13 @@
 };
 
 /**
- * The QueuedMasterPort combines two queues, a request queue and a
+ * The QueuedRequestPort combines two queues, a request queue and a
  * snoop response queue, that both share the same port. The flow
  * control for requests and snoop responses are completely
  * independent, and so each queue manages its own flow control
  * (retries).
  */
-class QueuedMasterPort : public RequestPort
+class QueuedRequestPort : public RequestPort
 {
 
   protected:
@@ -127,7 +127,7 @@
      * behaviuor in a subclass, and provide the latter to the
      * QueuePort constructor.
      */
-    QueuedMasterPort(const std::string& name, SimObject* owner,
+    QueuedRequestPort(const std::string& name, SimObject* owner,
                      ReqPacketQueue &req_queue,
                      SnoopRespPacketQueue &snoop_resp_queue,
                      PortID id = InvalidPortID) :
@@ -135,7 +135,7 @@
         snoopRespQueue(snoop_resp_queue)
     { }
 
-    virtual ~QueuedMasterPort() { }
+    virtual ~QueuedRequestPort() { }
 
     /**
      * Schedule the sending of a timing request.
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 7f0ddcb..43f54e6 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -82,7 +82,7 @@
 class ThreadContext;
 
 typedef std::shared_ptr<Request> RequestPtr;
-typedef uint16_t MasterID;
+typedef uint16_t RequestorID;
 
 class Request
 {
@@ -236,23 +236,23 @@
     static const FlagsType HTM_CMD = HTM_START | HTM_COMMIT |
         HTM_CANCEL | HTM_ABORT;
 
-    /** Master Ids that are statically allocated
+    /** Requestor Ids that are statically allocated
      * @{*/
-    enum : MasterID {
-        /** This master id is used for writeback requests by the caches */
-        wbMasterId = 0,
+    enum : RequestorID {
+        /** This requestor id is used for writeback requests by the caches */
+        wbRequestorId = 0,
         /**
-         * This master id is used for functional requests that
+         * This requestor id is used for functional requests that
          * don't come from a particular device
          */
-        funcMasterId = 1,
-        /** This master id is used for message signaled interrupts */
-        intMasterId = 2,
+        funcRequestorId = 1,
+        /** This requestor id is used for message signaled interrupts */
+        intRequestorId = 2,
         /**
-         * Invalid master id for assertion checking only. It is
+         * Invalid requestor id for assertion checking only. It is
          * invalid behavior to ever send this id as part of a request.
          */
-        invldMasterId = std::numeric_limits<MasterID>::max()
+        invldRequestorId = std::numeric_limits<RequestorID>::max()
     };
     /** @} */
 
@@ -347,7 +347,7 @@
     /** The requestor ID which is unique in the system for all ports
      * that are capable of issuing a transaction
      */
-    MasterID _masterId = invldMasterId;
+    RequestorID _requestorId = invldRequestorId;
 
     /** Flag structure for the request. */
     Flags _flags;
@@ -427,25 +427,25 @@
      * just physical address, size, flags, and timestamp (to curTick()).
      * These fields are adequate to perform a request.
      */
-    Request(Addr paddr, unsigned size, Flags flags, MasterID mid) :
-        _paddr(paddr), _size(size), _masterId(mid), _time(curTick())
+    Request(Addr paddr, unsigned size, Flags flags, RequestorID id) :
+        _paddr(paddr), _size(size), _requestorId(id), _time(curTick())
     {
         _flags.set(flags);
         privateFlags.set(VALID_PADDR|VALID_SIZE);
     }
 
     Request(Addr vaddr, unsigned size, Flags flags,
-            MasterID mid, Addr pc, ContextID cid,
+            RequestorID id, Addr pc, ContextID cid,
             AtomicOpFunctorPtr atomic_op=nullptr)
     {
-        setVirt(vaddr, size, flags, mid, pc, std::move(atomic_op));
+        setVirt(vaddr, size, flags, id, pc, std::move(atomic_op));
         setContext(cid);
     }
 
     Request(const Request& other)
         : _paddr(other._paddr), _size(other._size),
           _byteEnable(other._byteEnable),
-          _masterId(other._masterId),
+          _requestorId(other._requestorId),
           _flags(other._flags),
           _cacheCoherenceFlags(other._cacheCoherenceFlags),
           privateFlags(other.privateFlags),
@@ -493,12 +493,12 @@
      * allocated Request object.
      */
     void
-    setVirt(Addr vaddr, unsigned size, Flags flags, MasterID mid, Addr pc,
+    setVirt(Addr vaddr, unsigned size, Flags flags, RequestorID id, Addr pc,
             AtomicOpFunctorPtr amo_op=nullptr)
     {
         _vaddr = vaddr;
         _size = size;
-        _masterId = mid;
+        _requestorId = id;
         _pc = pc;
         _time = curTick();
 
@@ -737,10 +737,10 @@
     }
 
     /** Accesssor for the requestor id. */
-    MasterID
-    masterId() const
+    RequestorID
+    requestorId() const
     {
-        return _masterId;
+        return _requestorId;
     }
 
     uint32_t
diff --git a/src/mem/ruby/network/MessageBuffer.py b/src/mem/ruby/network/MessageBuffer.py
index c796960..a0a208f 100644
--- a/src/mem/ruby/network/MessageBuffer.py
+++ b/src/mem/ruby/network/MessageBuffer.py
@@ -40,5 +40,7 @@
                                        random delays if RubySystem \
                                        randomization flag is True)")
 
-    master = RequestPort("Master port to MessageBuffer receiver")
-    slave = ResponsePort("Slave port from MessageBuffer sender")
+    out_port = RequestPort("Request port to MessageBuffer receiver")
+    master = DeprecatedParam(out_port, '`master` is now called `out_port`')
+    in_port = ResponsePort("Response port from MessageBuffer sender")
+    slave = DeprecatedParam(in_port, '`slave` is now called `in_port`')
diff --git a/src/mem/ruby/network/Network.py b/src/mem/ruby/network/Network.py
index 5acad60..8999ff1 100644
--- a/src/mem/ruby/network/Network.py
+++ b/src/mem/ruby/network/Network.py
@@ -49,5 +49,7 @@
     ext_links = VectorParam.BasicExtLink("Links to external nodes")
     int_links = VectorParam.BasicIntLink("Links between internal nodes")
 
-    slave = VectorSlavePort("CPU slave port")
-    master = VectorMasterPort("CPU master port")
+    in_port = VectorResponsePort("CPU input port")
+    slave = DeprecatedParam(in_port, '`slave` is now called `in_port`')
+    out_port = VectorRequestPort("CPU output port")
+    master = DeprecatedParam(out_port, '`master` is now called `out_port`')
diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc
index dd3a9e7..5d9e5f6 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.cc
+++ b/src/mem/ruby/slicc_interface/AbstractController.cc
@@ -50,7 +50,7 @@
 AbstractController::AbstractController(const Params *p)
     : ClockedObject(p), Consumer(this), m_version(p->version),
       m_clusterID(p->cluster_id),
-      m_masterId(p->system->getMasterId(this)), m_is_blocking(false),
+      m_id(p->system->getRequestorId(this)), m_is_blocking(false),
       m_number_of_TBEs(p->number_of_TBEs),
       m_transitions_per_cycle(p->transitions_per_cycle),
       m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
@@ -219,7 +219,7 @@
     }
 
     RequestPtr req
-        = std::make_shared<Request>(mem_msg->m_addr, req_size, 0, m_masterId);
+        = std::make_shared<Request>(mem_msg->m_addr, req_size, 0, m_id);
     PacketPtr pkt;
     if (mem_msg->getType() == MemoryRequestType_MEMORY_WB) {
         pkt = Packet::createWrite(req);
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index daa52da..98cb0a7 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -147,7 +147,7 @@
 
   public:
     MachineID getMachineID() const { return m_machineID; }
-    MasterID getMasterId() const { return m_masterId; }
+    RequestorID getRequestorId() const { return m_id; }
 
     Stats::Histogram& getDelayHist() { return m_delayHistogram; }
     Stats::Histogram& getDelayVCHist(uint32_t index)
@@ -185,8 +185,8 @@
     MachineID m_machineID;
     const NodeID m_clusterID;
 
-    // MasterID used by some components of gem5.
-    const MasterID m_masterId;
+    // RequestorID used by some components of gem5.
+    const RequestorID m_id;
 
     Network *m_net_ptr;
     bool m_is_blocking;
@@ -237,7 +237,7 @@
         void recvReqRetry();
     };
 
-    /* Master port to the memory controller. */
+    /* Request port to the memory controller. */
     MemoryPort memoryPort;
 
     // State that is stored in packets sent to the memory controller.
diff --git a/src/mem/ruby/system/CacheRecorder.cc b/src/mem/ruby/system/CacheRecorder.cc
index 1fc7bb8..3fb5c2f 100644
--- a/src/mem/ruby/system/CacheRecorder.cc
+++ b/src/mem/ruby/system/CacheRecorder.cc
@@ -87,7 +87,7 @@
         m_records_flushed++;
         auto req = std::make_shared<Request>(rec->m_data_address,
                                              m_block_size_bytes, 0,
-                                             Request::funcMasterId);
+                                             Request::funcRequestorId);
         MemCmd::Command requestType = MemCmd::FlushReq;
         Packet *pkt = new Packet(req, requestType);
 
@@ -119,18 +119,20 @@
                 requestType = MemCmd::ReadReq;
                 req = std::make_shared<Request>(
                     traceRecord->m_data_address + rec_bytes_read,
-                    RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
+                    RubySystem::getBlockSizeBytes(), 0,
+                                    Request::funcRequestorId);
             }   else if (traceRecord->m_type == RubyRequestType_IFETCH) {
                 requestType = MemCmd::ReadReq;
                 req = std::make_shared<Request>(
                         traceRecord->m_data_address + rec_bytes_read,
                         RubySystem::getBlockSizeBytes(),
-                        Request::INST_FETCH, Request::funcMasterId);
+                        Request::INST_FETCH, Request::funcRequestorId);
             }   else {
                 requestType = MemCmd::WriteReq;
                 req = std::make_shared<Request>(
                     traceRecord->m_data_address + rec_bytes_read,
-                    RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
+                    RubySystem::getBlockSizeBytes(), 0,
+                                Request::funcRequestorId);
             }
 
             Packet *pkt = new Packet(req, requestType);
diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc
index bad49c9..938044a 100644
--- a/src/mem/ruby/system/DMASequencer.cc
+++ b/src/mem/ruby/system/DMASequencer.cc
@@ -57,8 +57,8 @@
     RubyPort::init();
     m_data_block_mask = mask(RubySystem::getBlockSizeBits());
 
-    for (const auto &s_port : slave_ports)
-        s_port->sendRangeChange();
+    for (const auto &response_port : response_ports)
+        response_port->sendRangeChange();
 }
 
 RequestStatus
diff --git a/src/mem/ruby/system/GPUCoalescer.cc b/src/mem/ruby/system/GPUCoalescer.cc
index 80bc19a..03c392f 100644
--- a/src/mem/ruby/system/GPUCoalescer.cc
+++ b/src/mem/ruby/system/GPUCoalescer.cc
@@ -669,7 +669,7 @@
             // back the requesting CU when we receive write
             // complete callbacks for all issued Ruby requests of this
             // instruction.
-            RubyPort::MemSlavePort* mem_slave_port = ss->port;
+            RubyPort::MemResponsePort* mem_response_port = ss->port;
 
             GPUDynInstPtr gpuDynInst = nullptr;
 
@@ -686,7 +686,8 @@
             }
 
             PendingWriteInst& inst = pendingWriteInsts[seqNum];
-            inst.addPendingReq(mem_slave_port, gpuDynInst, m_usingRubyTester);
+            inst.addPendingReq(mem_response_port, gpuDynInst,
+                               m_usingRubyTester);
         }
 
         return true;
@@ -783,7 +784,7 @@
     for (auto& pkt : mylist) {
         RubyPort::SenderState *ss =
             safe_cast<RubyPort::SenderState *>(pkt->senderState);
-        MemSlavePort *port = ss->port;
+        MemResponsePort *port = ss->port;
         assert(port != NULL);
 
         pkt->senderState = ss->predecessor;
diff --git a/src/mem/ruby/system/GPUCoalescer.hh b/src/mem/ruby/system/GPUCoalescer.hh
index 401f70b..3b1b7af 100644
--- a/src/mem/ruby/system/GPUCoalescer.hh
+++ b/src/mem/ruby/system/GPUCoalescer.hh
@@ -120,7 +120,7 @@
 
 // PendingWriteInst tracks the number of outstanding Ruby requests
 // per write instruction. Once all requests associated with one instruction
-// are completely done in Ruby, we call back the requester to mark
+// are completely done in Ruby, we call back the requestor to mark
 // that this instruction is complete.
 class PendingWriteInst
 {
@@ -135,7 +135,7 @@
     {}
 
     void
-    addPendingReq(RubyPort::MemSlavePort* port, GPUDynInstPtr inst,
+    addPendingReq(RubyPort::MemResponsePort* port, GPUDynInstPtr inst,
                   bool usingRubyTester)
     {
         assert(port);
@@ -157,7 +157,7 @@
         return (numPendingStores == 0) ? true : false;
     }
 
-    // ack the original requester that this write instruction is complete
+    // ack the original requestor that this write instruction is complete
     void
     ackWriteCompletion(bool usingRubyTester)
     {
@@ -175,7 +175,7 @@
             pkt->senderState = ss;
         }
 
-        // send the ack response to the requester
+        // send the ack response to the requestor
         originalPort->sendTimingResp(pkt);
     }
 
@@ -192,7 +192,7 @@
     // which implies multiple ports per instruction. However, we need
     // only 1 of the ports to call back the CU. Therefore, here we keep
     // track the port that sent the first packet of this instruction.
-    RubyPort::MemSlavePort* originalPort;
+    RubyPort::MemResponsePort* originalPort;
     // similar to the originalPort, this gpuDynInstPtr is set only for
     // the first packet of this instruction.
     GPUDynInstPtr gpuDynInstPtr;
@@ -201,12 +201,12 @@
 class GPUCoalescer : public RubyPort
 {
   public:
-    class GMTokenPort : public TokenSlavePort
+    class GMTokenPort : public TokenResponsePort
     {
       public:
         GMTokenPort(const std::string& name, ClockedObject *owner,
                     PortID id = InvalidPortID)
-            : TokenSlavePort(name, owner, id)
+            : TokenResponsePort(name, owner, id)
         { }
         ~GMTokenPort() { }
 
diff --git a/src/mem/ruby/system/GPUCoalescer.py b/src/mem/ruby/system/GPUCoalescer.py
index 9d4a76b..0bb5628 100644
--- a/src/mem/ruby/system/GPUCoalescer.py
+++ b/src/mem/ruby/system/GPUCoalescer.py
@@ -53,4 +53,4 @@
        "deadlock/livelock declared")
    garnet_standalone = Param.Bool(False, "")
 
-   gmTokenPort = SlavePort("Port to the CU for sharing tokens")
+   gmTokenPort = ResponsePort("Port to the CU for sharing tokens")
diff --git a/src/mem/ruby/system/HTMSequencer.cc b/src/mem/ruby/system/HTMSequencer.cc
index d2cfa07..87bc7d7 100644
--- a/src/mem/ruby/system/HTMSequencer.cc
+++ b/src/mem/ruby/system/HTMSequencer.cc
@@ -227,7 +227,7 @@
     RubyPort::SenderState *senderState =
         safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
 
-    MemSlavePort *port = safe_cast<MemSlavePort*>(senderState->port);
+    MemResponsePort *port = safe_cast<MemResponsePort*>(senderState->port);
     assert(port != nullptr);
     delete senderState;
 
@@ -237,7 +237,7 @@
             pkt->req->isHTMStart(), pkt->req->isHTMCommit(),
             pkt->req->isHTMCancel(), htm_return_code);
 
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to requestor if response expected
     if (pkt->needsResponse()) {
         DPRINTF(RubyPort, "Sending packet back over port\n");
         pkt->makeHtmTransactionalReqResponse(
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index bb86e60..4fc41c9 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -55,28 +55,29 @@
     : ClockedObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
       m_controller(NULL), m_mandatory_q_ptr(NULL),
       m_usingRubyTester(p->using_ruby_tester), system(p->system),
-      pioMasterPort(csprintf("%s.pio-master-port", name()), this),
-      pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
-      memMasterPort(csprintf("%s.mem-master-port", name()), this),
-      memSlavePort(csprintf("%s-mem-slave-port", name()), this,
+      pioRequestPort(csprintf("%s.pio-request-port", name()), this),
+      pioResponsePort(csprintf("%s.pio-response-port", name()), this),
+      memRequestPort(csprintf("%s.mem-request-port", name()), this),
+      memResponsePort(csprintf("%s-mem-response-port", name()), this,
                    p->ruby_system->getAccessBackingStore(), -1,
                    p->no_retry_on_stall),
-      gotAddrRanges(p->port_master_connection_count),
+      gotAddrRanges(p->port_request_ports_connection_count),
       m_isCPUSequencer(p->is_cpu_sequencer)
 {
     assert(m_version != -1);
 
-    // create the slave ports based on the number of connected ports
-    for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
-        slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
-            i), this, p->ruby_system->getAccessBackingStore(),
+    // create the response ports based on the number of connected ports
+    for (size_t i = 0; i < p->port_response_ports_connection_count; ++i) {
+        response_ports.push_back(new MemResponsePort(csprintf
+            ("%s.response_ports%d", name(), i), this,
+            p->ruby_system->getAccessBackingStore(),
             i, p->no_retry_on_stall));
     }
 
-    // create the master ports based on the number of connected ports
-    for (size_t i = 0; i < p->port_master_connection_count; ++i) {
-        master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
-            name(), i), this));
+    // create the request ports based on the number of connected ports
+    for (size_t i = 0; i < p->port_request_ports_connection_count; ++i) {
+        request_ports.push_back(new PioRequestPort(csprintf(
+                    "%s.request_ports%d", name(), i), this));
     }
 }
 
@@ -90,82 +91,84 @@
 Port &
 RubyPort::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "mem_master_port") {
-        return memMasterPort;
-    } else if (if_name == "pio_master_port") {
-        return pioMasterPort;
-    } else if (if_name == "mem_slave_port") {
-        return memSlavePort;
-    } else if (if_name == "pio_slave_port") {
-        return pioSlavePort;
-    } else if (if_name == "master") {
+    if (if_name == "mem_request_port") {
+        return memRequestPort;
+    } else if (if_name == "pio_request_port") {
+        return pioRequestPort;
+    } else if (if_name == "mem_response_port") {
+        return memResponsePort;
+    } else if (if_name == "pio_response_port") {
+        return pioResponsePort;
+    } else if (if_name == "request_ports") {
         // used by the x86 CPUs to connect the interrupt PIO and interrupt
-        // slave port
-        if (idx >= static_cast<PortID>(master_ports.size())) {
-            panic("RubyPort::getPort master: unknown index %d\n", idx);
+        // response port
+        if (idx >= static_cast<PortID>(request_ports.size())) {
+            panic("%s: unknown %s index (%d)\n", __func__, if_name, idx);
         }
 
-        return *master_ports[idx];
-    } else if (if_name == "slave") {
+        return *request_ports[idx];
+    } else if (if_name == "response_ports") {
         // used by the CPUs to connect the caches to the interconnect, and
-        // for the x86 case also the interrupt master
-        if (idx >= static_cast<PortID>(slave_ports.size())) {
-            panic("RubyPort::getPort slave: unknown index %d\n", idx);
+        // for the x86 case also the interrupt request port
+        if (idx >= static_cast<PortID>(response_ports.size())) {
+            panic("%s: unknown %s index (%d)\n", __func__, if_name, idx);
         }
 
-        return *slave_ports[idx];
+        return *response_ports[idx];
     }
 
     // pass it along to our super class
     return ClockedObject::getPort(if_name, idx);
 }
 
-RubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
+RubyPort::PioRequestPort::PioRequestPort(const std::string &_name,
                            RubyPort *_port)
-    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
+    : QueuedRequestPort(_name, _port, reqQueue, snoopRespQueue),
       reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
 {
-    DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
+    DPRINTF(RubyPort, "Created request pioport on sequencer %s\n", _name);
 }
 
-RubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
+RubyPort::PioResponsePort::PioResponsePort(const std::string &_name,
                            RubyPort *_port)
-    : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
+    : QueuedResponsePort(_name, _port, queue), queue(*_port, *this)
 {
-    DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
+    DPRINTF(RubyPort, "Created response pioport on sequencer %s\n", _name);
 }
 
-RubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
+RubyPort::MemRequestPort::MemRequestPort(const std::string &_name,
                            RubyPort *_port)
-    : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
+    : QueuedRequestPort(_name, _port, reqQueue, snoopRespQueue),
       reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
 {
-    DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
+    DPRINTF(RubyPort, "Created request memport on ruby sequencer %s\n", _name);
 }
 
-RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
+RubyPort::
+MemResponsePort::MemResponsePort(const std::string &_name, RubyPort *_port,
                                      bool _access_backing_store, PortID id,
                                      bool _no_retry_on_stall)
-    : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
+    : QueuedResponsePort(_name, _port, queue, id), queue(*_port, *this),
       access_backing_store(_access_backing_store),
       no_retry_on_stall(_no_retry_on_stall)
 {
-    DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
+    DPRINTF(RubyPort, "Created response memport on ruby sequencer %s\n",
+            _name);
 }
 
 bool
-RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
+RubyPort::PioRequestPort::recvTimingResp(PacketPtr pkt)
 {
     RubyPort *rp = static_cast<RubyPort *>(&owner);
     DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
 
     // send next cycle
-    rp->pioSlavePort.schedTimingResp(
+    rp->pioResponsePort.schedTimingResp(
             pkt, curTick() + rp->m_ruby_system->clockPeriod());
     return true;
 }
 
-bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
+bool RubyPort::MemRequestPort::recvTimingResp(PacketPtr pkt)
 {
     // got a response from a device
     assert(pkt->isResponse());
@@ -174,7 +177,7 @@
     // First we must retrieve the request port from the sender State
     RubyPort::SenderState *senderState =
         safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
-    MemSlavePort *port = senderState->port;
+    MemResponsePort *port = senderState->port;
     assert(port != NULL);
     delete senderState;
 
@@ -191,18 +194,18 @@
 }
 
 bool
-RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
+RubyPort::PioResponsePort::recvTimingReq(PacketPtr pkt)
 {
     RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
 
-    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
-        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+    for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) {
+        AddrRangeList l = ruby_port->request_ports[i]->getAddrRanges();
         for (auto it = l.begin(); it != l.end(); ++it) {
             if (it->contains(pkt->getAddr())) {
                 // generally it is not safe to assume success here as
                 // the port could be blocked
                 bool M5_VAR_USED success =
-                    ruby_port->master_ports[i]->sendTimingReq(pkt);
+                    ruby_port->request_ports[i]->sendTimingReq(pkt);
                 assert(success);
                 return true;
             }
@@ -212,7 +215,7 @@
 }
 
 Tick
-RubyPort::PioSlavePort::recvAtomic(PacketPtr pkt)
+RubyPort::PioResponsePort::recvAtomic(PacketPtr pkt)
 {
     RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
     // Only atomic_noncaching mode supported!
@@ -220,11 +223,11 @@
         panic("Ruby supports atomic accesses only in noncaching mode\n");
     }
 
-    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
-        AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+    for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) {
+        AddrRangeList l = ruby_port->request_ports[i]->getAddrRanges();
         for (auto it = l.begin(); it != l.end(); ++it) {
             if (it->contains(pkt->getAddr())) {
-                return ruby_port->master_ports[i]->sendAtomic(pkt);
+                return ruby_port->request_ports[i]->sendAtomic(pkt);
             }
         }
     }
@@ -232,7 +235,7 @@
 }
 
 bool
-RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
+RubyPort::MemResponsePort::recvTimingReq(PacketPtr pkt)
 {
     DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
             pkt->getAddr(), id);
@@ -255,7 +258,7 @@
     if (pkt->cmd != MemCmd::MemSyncReq) {
         if (!isPhysMemAddress(pkt)) {
             assert(!pkt->req->isHTMCmd());
-            assert(ruby_port->memMasterPort.isConnected());
+            assert(ruby_port->memRequestPort.isConnected());
             DPRINTF(RubyPort, "Request address %#x assumed to be a "
                     "pio address\n", pkt->getAddr());
 
@@ -265,7 +268,7 @@
 
             // send next cycle
             RubySystem *rs = ruby_port->m_ruby_system;
-            ruby_port->memMasterPort.schedTimingReq(pkt,
+            ruby_port->memRequestPort.schedTimingReq(pkt,
                 curTick() + rs->clockPeriod());
             return true;
         }
@@ -304,7 +307,7 @@
 }
 
 Tick
-RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt)
+RubyPort::MemResponsePort::recvAtomic(PacketPtr pkt)
 {
     RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
     // Only atomic_noncaching mode supported!
@@ -316,7 +319,7 @@
     // pio port.
     if (pkt->cmd != MemCmd::MemSyncReq) {
         if (!isPhysMemAddress(pkt)) {
-            assert(ruby_port->memMasterPort.isConnected());
+            assert(ruby_port->memRequestPort.isConnected());
             DPRINTF(RubyPort, "Request address %#x assumed to be a "
                     "pio address\n", pkt->getAddr());
 
@@ -325,7 +328,7 @@
             pkt->pushSenderState(new SenderState(this));
 
             // send next cycle
-            Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt);
+            Tick req_ticks = ruby_port->memRequestPort.sendAtomic(pkt);
             return ruby_port->ticksToCycles(req_ticks);
         }
 
@@ -349,12 +352,12 @@
 }
 
 void
-RubyPort::MemSlavePort::addToRetryList()
+RubyPort::MemResponsePort::addToRetryList()
 {
     RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
 
     //
-    // Unless the requestor do not want retries (e.g., the Ruby tester),
+    // Unless the request port do not want retries (e.g., the Ruby tester),
     // record the stalled M5 port for later retry when the sequencer
     // becomes free.
     //
@@ -364,7 +367,7 @@
 }
 
 void
-RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
+RubyPort::MemResponsePort::recvFunctional(PacketPtr pkt)
 {
     DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
 
@@ -375,8 +378,8 @@
     // pio port.
     if (!isPhysMemAddress(pkt)) {
         DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
-        assert(rp->pioMasterPort.isConnected());
-        rp->pioMasterPort.sendFunctional(pkt);
+        assert(rp->pioRequestPort.isConnected());
+        rp->pioRequestPort.sendFunctional(pkt);
         return;
     }
 
@@ -402,14 +405,14 @@
             panic("Unsupported functional command %s\n", pkt->cmdString());
         }
 
-        // Unless the requester explicitly said otherwise, generate an error if
-        // the functional request failed
+        // Unless the request port explicitly said otherwise, generate an error
+        // if the functional request failed
         if (!accessSucceeded && !pkt->suppressFuncError()) {
             fatal("Ruby functional %s failed for address %#x\n",
                   pkt->isWrite() ? "write" : "read", pkt->getAddr());
         }
 
-        // turn packet around to go back to requester if response expected
+        // turn packet around to go back to request port if response expected
         if (needsResponse) {
             // The pkt is already turned into a reponse if the directory
             // forwarded the request to the memory controller (see
@@ -439,7 +442,7 @@
     // First we must retrieve the request port from the sender State
     RubyPort::SenderState *senderState =
         safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
-    MemSlavePort *port = senderState->port;
+    MemResponsePort *port = senderState->port;
     assert(port != NULL);
     delete senderState;
 
@@ -452,8 +455,8 @@
 RubyPort::trySendRetries()
 {
     //
-    // If we had to stall the MemSlavePorts, wake them up because the sequencer
-    // likely has free resources now.
+    // If we had to stall the MemResponsePorts, wake them up because the
+    // sequencer likely has free resources now.
     //
     if (!retryList.empty()) {
         // Record the current list of ports to retry on a temporary list
@@ -461,7 +464,7 @@
         // an immediate retry, which may result in the ports being put back on
         // the list. Therefore we want to clear the retryList before calling
         // sendRetryReq.
-        std::vector<MemSlavePort *> curRetryList(retryList);
+        std::vector<MemResponsePort *> curRetryList(retryList);
 
         retryList.clear();
 
@@ -509,11 +512,11 @@
 }
 
 void
-RubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
+RubyPort::MemResponsePort::hitCallback(PacketPtr pkt)
 {
     bool needsResponse = pkt->needsResponse();
 
-    // Unless specified at configuraiton, all responses except failed SC
+    // Unless specified at configuration, all responses except failed SC
     // and Flush operations access M5 physical memory.
     bool accessPhysMem = access_backing_store;
 
@@ -558,7 +561,7 @@
         // We must check device memory first in case it overlaps with the
         // system memory range.
         if (ruby_port->system->isDeviceMemAddr(pkt)) {
-            auto dmem = ruby_port->system->getDeviceMemory(pkt->masterId());
+            auto dmem = ruby_port->system->getDeviceMemory(pkt->requestorId());
             dmem->access(pkt);
         } else if (ruby_port->system->isMemAddr(pkt->getAddr())) {
             rs->getPhysMem()->access(pkt);
@@ -569,7 +572,7 @@
         pkt->makeResponse();
     }
 
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to request port if response expected
     if (needsResponse || pkt->isResponse()) {
         DPRINTF(RubyPort, "Sending packet back over port\n");
         // Send a response in the same cycle. There is no need to delay the
@@ -584,15 +587,15 @@
 }
 
 AddrRangeList
-RubyPort::PioSlavePort::getAddrRanges() const
+RubyPort::PioResponsePort::getAddrRanges() const
 {
-    // at the moment the assumption is that the master does not care
+    // at the moment the assumption is that the request port does not care
     AddrRangeList ranges;
     RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
 
-    for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
+    for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) {
         ranges.splice(ranges.begin(),
-                ruby_port->master_ports[i]->getAddrRanges());
+                ruby_port->request_ports[i]->getAddrRanges());
     }
     for (const auto M5_VAR_USED &r : ranges)
         DPRINTF(RubyPort, "%s\n", r.to_string());
@@ -600,7 +603,7 @@
 }
 
 bool
-RubyPort::MemSlavePort::isPhysMemAddress(PacketPtr pkt) const
+RubyPort::MemResponsePort::isPhysMemAddress(PacketPtr pkt) const
 {
     RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
     return ruby_port->system->isMemAddr(pkt->getAddr())
@@ -613,16 +616,17 @@
     DPRINTF(RubyPort, "Sending invalidations.\n");
     // Allocate the invalidate request and packet on the stack, as it is
     // assumed they will not be modified or deleted by receivers.
-    // TODO: should this really be using funcMasterId?
+    // TODO: should this really be using funcRequestorId?
     auto request = std::make_shared<Request>(
         address, RubySystem::getBlockSizeBytes(), 0,
-        Request::funcMasterId);
+        Request::funcRequestorId);
 
     // Use a single packet to signal all snooping ports of the invalidation.
     // This assumes that snooping ports do NOT modify the packet/request
     Packet pkt(request, MemCmd::InvalidateReq);
-    for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
-        // check if the connected master port is snooping
+    for (CpuPortIter p = response_ports.begin(); p != response_ports.end();
+         ++p) {
+        // check if the connected request port is snooping
         if ((*p)->isSnooping()) {
             // send as a snoop request
             (*p)->sendTimingSnoopReq(&pkt);
@@ -631,12 +635,12 @@
 }
 
 void
-RubyPort::PioMasterPort::recvRangeChange()
+RubyPort::PioRequestPort::recvRangeChange()
 {
     RubyPort &r = static_cast<RubyPort &>(owner);
     r.gotAddrRanges--;
     if (r.gotAddrRanges == 0 && FullSystem) {
-        r.pioSlavePort.sendRangeChange();
+        r.pioResponsePort.sendRangeChange();
     }
 }
 
@@ -644,7 +648,7 @@
 RubyPort::functionalWrite(Packet *func_pkt)
 {
     int num_written = 0;
-    for (auto port : slave_ports) {
+    for (auto port : response_ports) {
         if (port->trySatisfyFunctional(func_pkt)) {
             num_written += 1;
         }
diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh
index 1e21090..73c4557 100644
--- a/src/mem/ruby/system/RubyPort.hh
+++ b/src/mem/ruby/system/RubyPort.hh
@@ -58,21 +58,21 @@
 class RubyPort : public ClockedObject
 {
   public:
-    class MemMasterPort : public QueuedMasterPort
+    class MemRequestPort : public QueuedRequestPort
     {
       private:
         ReqPacketQueue reqQueue;
         SnoopRespPacketQueue snoopRespQueue;
 
       public:
-        MemMasterPort(const std::string &_name, RubyPort *_port);
+        MemRequestPort(const std::string &_name, RubyPort *_port);
 
       protected:
         bool recvTimingResp(PacketPtr pkt);
         void recvRangeChange() {}
     };
 
-    class MemSlavePort : public QueuedSlavePort
+    class MemResponsePort : public QueuedResponsePort
     {
       private:
         RespPacketQueue queue;
@@ -80,7 +80,7 @@
         bool no_retry_on_stall;
 
       public:
-        MemSlavePort(const std::string &_name, RubyPort *_port,
+        MemResponsePort(const std::string &_name, RubyPort *_port,
                      bool _access_backing_store,
                      PortID id, bool _no_retry_on_stall);
         void hitCallback(PacketPtr pkt);
@@ -102,27 +102,27 @@
         bool isPhysMemAddress(PacketPtr pkt) const;
     };
 
-    class PioMasterPort : public QueuedMasterPort
+    class PioRequestPort : public QueuedRequestPort
     {
       private:
         ReqPacketQueue reqQueue;
         SnoopRespPacketQueue snoopRespQueue;
 
       public:
-        PioMasterPort(const std::string &_name, RubyPort *_port);
+        PioRequestPort(const std::string &_name, RubyPort *_port);
 
       protected:
         bool recvTimingResp(PacketPtr pkt);
         void recvRangeChange();
     };
 
-    class PioSlavePort : public QueuedSlavePort
+    class PioResponsePort : public QueuedResponsePort
     {
       private:
         RespPacketQueue queue;
 
       public:
-        PioSlavePort(const std::string &_name, RubyPort *_port);
+        PioResponsePort(const std::string &_name, RubyPort *_port);
 
       protected:
         bool recvTimingReq(PacketPtr pkt);
@@ -130,15 +130,16 @@
         Tick recvAtomic(PacketPtr pkt);
 
         void recvFunctional(PacketPtr pkt)
-        { panic("recvFunctional should never be called on pio slave port!"); }
+        { panic("recvFunctional should never be called on pio response "
+                "port!"); }
 
         AddrRangeList getAddrRanges() const;
     };
 
     struct SenderState : public Packet::SenderState
     {
-        MemSlavePort *port;
-        SenderState(MemSlavePort * _port) : port(_port)
+        MemResponsePort *port;
+        SenderState(MemResponsePort * _port) : port(_port)
         {}
      };
 
@@ -178,11 +179,11 @@
      * Called by the PIO port when receiving a timing response.
      *
      * @param pkt Response packet
-     * @param master_port_id Port id of the PIO port
+     * @param request_port_id Port id of the PIO port
      *
      * @return Whether successfully sent
      */
-    bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
+    bool recvTimingResp(PacketPtr pkt, PortID request_port_id);
 
     RubySystem *m_ruby_system;
     uint32_t m_version;
@@ -191,35 +192,35 @@
     bool m_usingRubyTester;
     System* system;
 
-    std::vector<MemSlavePort *> slave_ports;
+    std::vector<MemResponsePort *> response_ports;
 
   private:
-    bool onRetryList(MemSlavePort * port)
+    bool onRetryList(MemResponsePort * port)
     {
         return (std::find(retryList.begin(), retryList.end(), port) !=
                 retryList.end());
     }
-    void addToRetryList(MemSlavePort * port)
+    void addToRetryList(MemResponsePort * port)
     {
         if (onRetryList(port)) return;
         retryList.push_back(port);
     }
 
-    PioMasterPort pioMasterPort;
-    PioSlavePort pioSlavePort;
-    MemMasterPort memMasterPort;
-    MemSlavePort memSlavePort;
+    PioRequestPort pioRequestPort;
+    PioResponsePort pioResponsePort;
+    MemRequestPort memRequestPort;
+    MemResponsePort memResponsePort;
     unsigned int gotAddrRanges;
 
     /** Vector of M5 Ports attached to this Ruby port. */
-    typedef std::vector<MemSlavePort *>::iterator CpuPortIter;
-    std::vector<PioMasterPort *> master_ports;
+    typedef std::vector<MemResponsePort *>::iterator CpuPortIter;
+    std::vector<PioRequestPort *> request_ports;
 
     //
     // Based on similar code in the M5 bus.  Stores pointers to those ports
     // that should be called when the Sequencer becomes available after a stall.
     //
-    std::vector<MemSlavePort *> retryList;
+    std::vector<MemResponsePort *> retryList;
 
     bool m_isCPUSequencer;
 };
diff --git a/src/mem/ruby/system/RubySystem.cc b/src/mem/ruby/system/RubySystem.cc
index c35ab02..ac5515f 100644
--- a/src/mem/ruby/system/RubySystem.cc
+++ b/src/mem/ruby/system/RubySystem.cc
@@ -123,23 +123,23 @@
     machineToNetwork.insert(std::make_pair(mach_id, network_id));
 }
 
-// This registers all master IDs in the system for functional reads. This
-// should be called in init() since master IDs are obtained in a SimObject's
+// This registers all requestor IDs in the system for functional reads. This
+// should be called in init() since requestor IDs are obtained in a SimObject's
 // constructor and there are functional reads/writes between init() and
 // startup().
 void
-RubySystem::registerMasterIDs()
+RubySystem::registerRequestorIDs()
 {
-    // Create the map for MasterID to network node. This is done in init()
-    // because all MasterIDs must be obtained in the constructor and
+    // Create the map for RequestorID to network node. This is done in init()
+    // because all RequestorIDs must be obtained in the constructor and
     // AbstractControllers are registered in their constructor. This is done
     // in two steps: (1) Add all of the AbstractControllers. Since we don't
-    // have a mapping of MasterID to MachineID this is the easiest way to
-    // filter out AbstractControllers from non-Ruby masters. (2) Go through
-    // the system's list of MasterIDs and add missing MasterIDs to network 0
-    // (the default).
+    // have a mapping of RequestorID to MachineID this is the easiest way to
+    // filter out AbstractControllers from non-Ruby requestors. (2) Go through
+    // the system's list of RequestorIDs and add missing RequestorIDs to
+    // network 0 (the default).
     for (auto& cntrl : m_abs_cntrl_vec) {
-        MasterID mid = cntrl->getMasterId();
+        RequestorID id = cntrl->getRequestorId();
         MachineID mach_id = cntrl->getMachineID();
 
         // These are setup in Network constructor and should exist
@@ -148,16 +148,16 @@
                  MachineIDToString(mach_id).c_str());
 
         auto network_id = machineToNetwork[mach_id];
-        masterToNetwork.insert(std::make_pair(mid, network_id));
+        requestorToNetwork.insert(std::make_pair(id, network_id));
 
         // Create helper vectors for each network to iterate over.
         netCntrls[network_id].push_back(cntrl);
     }
 
-    // Default all other master IDs to network 0
-    for (auto mid = 0; mid < params()->system->maxMasters(); ++mid) {
-        if (!masterToNetwork.count(mid)) {
-            masterToNetwork.insert(std::make_pair(mid, 0));
+    // Default all other requestor IDs to network 0
+    for (auto id = 0; id < params()->system->maxRequestors(); ++id) {
+        if (!requestorToNetwork.count(id)) {
+            requestorToNetwork.insert(std::make_pair(id, 0));
         }
     }
 }
@@ -400,7 +400,7 @@
 void
 RubySystem::init()
 {
-    registerMasterIDs();
+    registerRequestorIDs();
 }
 
 void
@@ -491,9 +491,9 @@
     unsigned int num_invalid = 0;
 
     // Only send functional requests within the same network.
-    assert(masterToNetwork.count(pkt->masterId()));
-    int master_net_id = masterToNetwork[pkt->masterId()];
-    assert(netCntrls.count(master_net_id));
+    assert(requestorToNetwork.count(pkt->requestorId()));
+    int request_net_id = requestorToNetwork[pkt->requestorId()];
+    assert(netCntrls.count(request_net_id));
 
     AbstractController *ctrl_ro = nullptr;
     AbstractController *ctrl_rw = nullptr;
@@ -501,7 +501,7 @@
 
     // In this loop we count the number of controllers that have the given
     // address in read only, read write and busy states.
-    for (auto& cntrl : netCntrls[master_net_id]) {
+    for (auto& cntrl : netCntrls[request_net_id]) {
         access_perm = cntrl-> getAccessPermission(line_address);
         if (access_perm == AccessPermission_Read_Only){
             num_ro++;
@@ -537,7 +537,7 @@
     // The reason is because the Backing_Store memory could easily be stale, if
     // there are copies floating around the cache hierarchy, so you want to read
     // it only if it's not in the cache hierarchy at all.
-    int num_controllers = netCntrls[master_net_id].size();
+    int num_controllers = netCntrls[request_net_id].size();
     if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
         DPRINTF(RubySystem, "only copy in Backing_Store memory, read from it\n");
         ctrl_backing_store->functionalRead(line_address, pkt);
@@ -573,7 +573,7 @@
         DPRINTF(RubySystem, "Controllers functionalRead lookup "
                             "(num_maybe_stale=%d, num_busy = %d)\n",
                 num_maybe_stale, num_busy);
-        for (auto& cntrl : netCntrls[master_net_id]) {
+        for (auto& cntrl : netCntrls[request_net_id]) {
             if (cntrl->functionalReadBuffers(pkt))
                 return true;
         }
@@ -605,11 +605,11 @@
     uint32_t M5_VAR_USED num_functional_writes = 0;
 
     // Only send functional requests within the same network.
-    assert(masterToNetwork.count(pkt->masterId()));
-    int master_net_id = masterToNetwork[pkt->masterId()];
-    assert(netCntrls.count(master_net_id));
+    assert(requestorToNetwork.count(pkt->requestorId()));
+    int request_net_id = requestorToNetwork[pkt->requestorId()];
+    assert(netCntrls.count(request_net_id));
 
-    for (auto& cntrl : netCntrls[master_net_id]) {
+    for (auto& cntrl : netCntrls[request_net_id]) {
         num_functional_writes += cntrl->functionalWriteBuffers(pkt);
 
         access_perm = cntrl->getAccessPermission(line_addr);
diff --git a/src/mem/ruby/system/RubySystem.hh b/src/mem/ruby/system/RubySystem.hh
index d14b383..cdd2b5c 100644
--- a/src/mem/ruby/system/RubySystem.hh
+++ b/src/mem/ruby/system/RubySystem.hh
@@ -97,7 +97,7 @@
     void registerNetwork(Network*);
     void registerAbstractController(AbstractController*);
     void registerMachineID(const MachineID& mach_id, Network* network);
-    void registerMasterIDs();
+    void registerRequestorIDs();
 
     bool eventQueueEmpty() { return eventq->empty(); }
     void enqueueRubyEvent(Tick tick)
@@ -142,7 +142,7 @@
     Cycles m_start_cycle;
 
     std::unordered_map<MachineID, unsigned> machineToNetwork;
-    std::unordered_map<MasterID, unsigned> masterToNetwork;
+    std::unordered_map<RequestorID, unsigned> requestorToNetwork;
     std::unordered_map<unsigned, std::vector<AbstractController*>> netCntrls;
 
   public:
diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py
index 47c5b41..6869fc2 100644
--- a/src/mem/ruby/system/Sequencer.py
+++ b/src/mem/ruby/system/Sequencer.py
@@ -35,12 +35,21 @@
    cxx_header = "mem/ruby/system/RubyPort.hh"
    version = Param.Int(0, "")
 
-   slave = VectorSlavePort("CPU slave port")
-   master = VectorMasterPort("CPU master port")
-   pio_master_port = RequestPort("Ruby mem master port")
-   mem_master_port = RequestPort("Ruby mem master port")
-   pio_slave_port = ResponsePort("Ruby pio slave port")
-   mem_slave_port = ResponsePort("Ruby memory port")
+   response_ports = VectorResponsePort("CPU response port")
+   slave     = DeprecatedParam(response_ports,
+                        '`slave` is now called `response_ports`')
+   request_ports = VectorRequestPort("CPU request port")
+   master    = DeprecatedParam(request_ports,
+                        '`master` is now called `request_ports`')
+   pio_request_port = RequestPort("Ruby pio request port")
+   pio_master_port  = DeprecatedParam(pio_request_port,
+                        '`pio_master_port` is now called `pio_request_port`')
+   mem_request_port = RequestPort("Ruby mem request port")
+   mem_master_port  = DeprecatedParam(mem_request_port,
+                        '`mem_master_port` is now called `mem_request_port`')
+   pio_response_port = ResponsePort("Ruby pio response port")
+   pio_slave_port    = DeprecatedParam(pio_response_port,
+                        '`pio_slave_port` is now called `pio_response_port`')
 
    using_ruby_tester = Param.Bool(False, "")
    no_retry_on_stall = Param.Bool(False, "")
diff --git a/src/mem/ruby/system/VIPERCoalescer.cc b/src/mem/ruby/system/VIPERCoalescer.cc
index eafce6d..a8a3aa9 100644
--- a/src/mem/ruby/system/VIPERCoalescer.cc
+++ b/src/mem/ruby/system/VIPERCoalescer.cc
@@ -248,7 +248,7 @@
             RubyPort::SenderState *ss =
                 safe_cast<RubyPort::SenderState *>
                     (writeCompletePkt->senderState);
-            MemSlavePort *port = ss->port;
+            MemResponsePort *port = ss->port;
             assert(port != NULL);
 
             writeCompletePkt->senderState = ss->predecessor;
diff --git a/src/mem/serial_link.cc b/src/mem/serial_link.cc
index d05328e..74ac43f 100644
--- a/src/mem/serial_link.cc
+++ b/src/mem/serial_link.cc
@@ -51,14 +51,15 @@
 #include "debug/SerialLink.hh"
 #include "params/SerialLink.hh"
 
-SerialLink::SerialLinkSlavePort::SerialLinkSlavePort(const std::string& _name,
+SerialLink::SerialLinkResponsePort::
+SerialLinkResponsePort(const std::string& _name,
                                          SerialLink& _serial_link,
-                                         SerialLinkMasterPort& _masterPort,
+                                         SerialLinkRequestPort& _mem_side_port,
                                          Cycles _delay, int _resp_limit,
                                          const std::vector<AddrRange>&
                                          _ranges)
     : ResponsePort(_name, &_serial_link), serial_link(_serial_link),
-      masterPort(_masterPort), delay(_delay),
+      mem_side_port(_mem_side_port), delay(_delay),
       ranges(_ranges.begin(), _ranges.end()),
       outstandingResponses(0), retryReq(false),
       respQueueLimit(_resp_limit),
@@ -66,21 +67,22 @@
 {
 }
 
-SerialLink::SerialLinkMasterPort::SerialLinkMasterPort(const std::string&
+SerialLink::SerialLinkRequestPort::SerialLinkRequestPort(const std::string&
                                            _name, SerialLink& _serial_link,
-                                           SerialLinkSlavePort& _slavePort,
-                                           Cycles _delay, int _req_limit)
+                                           SerialLinkResponsePort&
+                                           _cpu_side_port, Cycles _delay,
+                                           int _req_limit)
     : RequestPort(_name, &_serial_link), serial_link(_serial_link),
-      slavePort(_slavePort), delay(_delay), reqQueueLimit(_req_limit),
+      cpu_side_port(_cpu_side_port), delay(_delay), reqQueueLimit(_req_limit),
       sendEvent([this]{ trySendTiming(); }, _name)
 {
 }
 
 SerialLink::SerialLink(SerialLinkParams *p)
     : ClockedObject(p),
-      slavePort(p->name + ".slave", *this, masterPort,
+      cpu_side_port(p->name + ".cpu_side_port", *this, mem_side_port,
                 ticksToCycles(p->delay), p->resp_size, p->ranges),
-      masterPort(p->name + ".master", *this, slavePort,
+      mem_side_port(p->name + ".mem_side_port", *this, cpu_side_port,
                  ticksToCycles(p->delay), p->req_size),
       num_lanes(p->num_lanes),
       link_speed(p->link_speed)
@@ -91,10 +93,10 @@
 Port&
 SerialLink::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master")
-        return masterPort;
-    else if (if_name == "slave")
-        return slavePort;
+    if (if_name == "mem_side_port")
+        return mem_side_port;
+    else if (if_name == "cpu_side_port")
+        return cpu_side_port;
     else
         // pass it along to our super class
         return ClockedObject::getPort(if_name, idx);
@@ -104,29 +106,29 @@
 SerialLink::init()
 {
     // make sure both sides are connected and have the same block size
-    if (!slavePort.isConnected() || !masterPort.isConnected())
+    if (!cpu_side_port.isConnected() || !mem_side_port.isConnected())
         fatal("Both ports of a serial_link must be connected.\n");
 
-    // notify the master side  of our address ranges
-    slavePort.sendRangeChange();
+    // notify the request side  of our address ranges
+    cpu_side_port.sendRangeChange();
 }
 
 bool
-SerialLink::SerialLinkSlavePort::respQueueFull() const
+SerialLink::SerialLinkResponsePort::respQueueFull() const
 {
     return outstandingResponses == respQueueLimit;
 }
 
 bool
-SerialLink::SerialLinkMasterPort::reqQueueFull() const
+SerialLink::SerialLinkRequestPort::reqQueueFull() const
 {
     return transmitList.size() == reqQueueLimit;
 }
 
 bool
-SerialLink::SerialLinkMasterPort::recvTimingResp(PacketPtr pkt)
+SerialLink::SerialLinkRequestPort::recvTimingResp(PacketPtr pkt)
 {
-    // all checks are done when the request is accepted on the slave
+    // all checks are done when the request is accepted on the response
     // side, so we are guaranteed to have space for the response
     DPRINTF(SerialLink, "recvTimingResp: %s addr 0x%x\n",
             pkt->cmdString(), pkt->getAddr());
@@ -151,13 +153,13 @@
     // one crosses this link faster than the first one (because the packet
     // waits in the link based on its size). This can reorder the received
     // response.
-    slavePort.schedTimingResp(pkt, t);
+    cpu_side_port.schedTimingResp(pkt, t);
 
     return true;
 }
 
 bool
-SerialLink::SerialLinkSlavePort::recvTimingReq(PacketPtr pkt)
+SerialLink::SerialLinkResponsePort::recvTimingReq(PacketPtr pkt)
 {
     DPRINTF(SerialLink, "recvTimingReq: %s addr 0x%x\n",
             pkt->cmdString(), pkt->getAddr());
@@ -169,7 +171,7 @@
             transmitList.size(), outstandingResponses);
 
     // if the request queue is full then there is no hope
-    if (masterPort.reqQueueFull()) {
+    if (mem_side_port.reqQueueFull()) {
         DPRINTF(SerialLink, "Request queue full\n");
         retryReq = true;
     } else if ( !retryReq ) {
@@ -212,19 +214,19 @@
             // that the second one crosses this link faster than the first one
             // (because the packet waits in the link based on its size).
             // This can reorder the received response.
-            masterPort.schedTimingReq(pkt, t);
+            mem_side_port.schedTimingReq(pkt, t);
         }
     }
 
     // remember that we are now stalling a packet and that we have to
-    // tell the sending master to retry once space becomes available,
+    // tell the sending requestor to retry once space becomes available,
     // we make no distinction whether the stalling is due to the
     // request queue or response queue being full
     return !retryReq;
 }
 
 void
-SerialLink::SerialLinkSlavePort::retryStalledReq()
+SerialLink::SerialLinkResponsePort::retryStalledReq()
 {
     if (retryReq) {
         DPRINTF(SerialLink, "Request waiting for retry, now retrying\n");
@@ -234,7 +236,7 @@
 }
 
 void
-SerialLink::SerialLinkMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
+SerialLink::SerialLinkRequestPort::schedTimingReq(PacketPtr pkt, Tick when)
 {
     // If we're about to put this packet at the head of the queue, we
     // need to schedule an event to do the transmit.  Otherwise there
@@ -251,7 +253,7 @@
 
 
 void
-SerialLink::SerialLinkSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
+SerialLink::SerialLinkResponsePort::schedTimingResp(PacketPtr pkt, Tick when)
 {
     // If we're about to put this packet at the head of the queue, we
     // need to schedule an event to do the transmit.  Otherwise there
@@ -265,7 +267,7 @@
 }
 
 void
-SerialLink::SerialLinkMasterPort::trySendTiming()
+SerialLink::SerialLinkRequestPort::trySendTiming()
 {
     assert(!transmitList.empty());
 
@@ -300,7 +302,7 @@
         // then send a retry at this point, also note that if the
         // request we stalled was waiting for the response queue
         // rather than the request queue we might stall it again
-        slavePort.retryStalledReq();
+        cpu_side_port.retryStalledReq();
     }
 
     // if the send failed, then we try again once we receive a retry,
@@ -308,7 +310,7 @@
 }
 
 void
-SerialLink::SerialLinkSlavePort::trySendTiming()
+SerialLink::SerialLinkResponsePort::trySendTiming()
 {
     assert(!transmitList.empty());
 
@@ -344,7 +346,7 @@
         // if there is space in the request queue and we were stalling
         // a request, it will definitely be possible to accept it now
         // since there is guaranteed space in the response queue
-        if (!masterPort.reqQueueFull() && retryReq) {
+        if (!mem_side_port.reqQueueFull() && retryReq) {
             DPRINTF(SerialLink, "Request waiting for retry, now retrying\n");
             retryReq = false;
             sendRetryReq();
@@ -356,25 +358,25 @@
 }
 
 void
-SerialLink::SerialLinkMasterPort::recvReqRetry()
+SerialLink::SerialLinkRequestPort::recvReqRetry()
 {
     trySendTiming();
 }
 
 void
-SerialLink::SerialLinkSlavePort::recvRespRetry()
+SerialLink::SerialLinkResponsePort::recvRespRetry()
 {
     trySendTiming();
 }
 
 Tick
-SerialLink::SerialLinkSlavePort::recvAtomic(PacketPtr pkt)
+SerialLink::SerialLinkResponsePort::recvAtomic(PacketPtr pkt)
 {
-    return delay * serial_link.clockPeriod() + masterPort.sendAtomic(pkt);
+    return delay * serial_link.clockPeriod() + mem_side_port.sendAtomic(pkt);
 }
 
 void
-SerialLink::SerialLinkSlavePort::recvFunctional(PacketPtr pkt)
+SerialLink::SerialLinkResponsePort::recvFunctional(PacketPtr pkt)
 {
     pkt->pushLabel(name());
 
@@ -386,19 +388,19 @@
         }
     }
 
-    // also check the master port's request queue
-    if (masterPort.trySatisfyFunctional(pkt)) {
+    // also check the memory-side port's request queue
+    if (mem_side_port.trySatisfyFunctional(pkt)) {
         return;
     }
 
     pkt->popLabel();
 
     // fall through if pkt still not satisfied
-    masterPort.sendFunctional(pkt);
+    mem_side_port.sendFunctional(pkt);
 }
 
 bool
-SerialLink::SerialLinkMasterPort::trySatisfyFunctional(PacketPtr pkt)
+SerialLink::SerialLinkRequestPort::trySatisfyFunctional(PacketPtr pkt)
 {
     bool found = false;
     auto i = transmitList.begin();
@@ -415,7 +417,7 @@
 }
 
 AddrRangeList
-SerialLink::SerialLinkSlavePort::getAddrRanges() const
+SerialLink::SerialLinkResponsePort::getAddrRanges() const
 {
     return ranges;
 }
diff --git a/src/mem/serial_link.hh b/src/mem/serial_link.hh
index 7f047ef..903387e 100644
--- a/src/mem/serial_link.hh
+++ b/src/mem/serial_link.hh
@@ -82,16 +82,16 @@
         { }
     };
 
-    // Forward declaration to allow the slave port to have a pointer
-    class SerialLinkMasterPort;
+    // Forward declaration to allow the CPU-side port to have a pointer
+    class SerialLinkRequestPort;
 
     /**
      * The port on the side that receives requests and sends
-     * responses. The slave port has a set of address ranges that it
-     * is responsible for. The slave port also has a buffer for the
+     * responses. The CPU-side port has a set of address ranges that it
+     * is responsible for. The CPU-side port also has a buffer for the
      * responses not yet sent.
      */
-    class SerialLinkSlavePort : public ResponsePort
+    class SerialLinkResponsePort : public ResponsePort
     {
 
       private:
@@ -100,9 +100,9 @@
         SerialLink& serial_link;
 
         /**
-         * Master port on the other side of the serial_link.
+         * Request port on the other side of the serial_link.
          */
-        SerialLinkMasterPort& masterPort;
+        SerialLinkRequestPort& mem_side_port;
 
         /** Minimum request delay though this serial_link. */
         const Cycles delay;
@@ -147,18 +147,18 @@
       public:
 
         /**
-         * Constructor for the SerialLinkSlavePort.
+         * Constructor for the SerialLinkResponsePort.
          *
          * @param _name the port name including the owner
          * @param _serial_link the structural owner
-         * @param _masterPort the master port on the other side of the
+         * @param _mem_side_port the memory-side port on the other side of the
          * serial_link
          * @param _delay the delay in cycles from receiving to sending
          * @param _resp_limit the size of the response queue
          * @param _ranges a number of address ranges to forward
          */
-        SerialLinkSlavePort(const std::string& _name, SerialLink&
-                        _serial_link, SerialLinkMasterPort& _masterPort,
+        SerialLinkResponsePort(const std::string& _name, SerialLink&
+                        _serial_link, SerialLinkRequestPort& _mem_side_port,
                         Cycles _delay, int _resp_limit, const
                         std::vector<AddrRange>& _ranges);
 
@@ -204,10 +204,10 @@
 
     /**
      * Port on the side that forwards requests and receives
-     * responses. The master port has a buffer for the requests not
+     * responses. The memory-side port has a buffer for the requests not
      * yet sent.
      */
-    class SerialLinkMasterPort : public RequestPort
+    class SerialLinkRequestPort : public RequestPort
     {
 
       private:
@@ -216,9 +216,10 @@
         SerialLink& serial_link;
 
         /**
-         * The slave port on the other side of the serial_link.
+         * The response (CPU-side port) port on the other side of
+         * the serial_link.
          */
-        SerialLinkSlavePort& slavePort;
+        SerialLinkResponsePort& cpu_side_port;
 
         /** Minimum delay though this serial_link. */
         const Cycles delay;
@@ -247,18 +248,18 @@
       public:
 
         /**
-         * Constructor for the SerialLinkMasterPort.
+         * Constructor for the SerialLinkRequestPort.
          *
          * @param _name the port name including the owner
          * @param _serial_link the structural owner
-         * @param _slavePort the slave port on the other side of the
-         * serial_link
+         * @param _cpu_side_port the CPU-side port on the other
+         * side of the serial_link
          * @param _delay the delay in cycles from receiving to sending
          * @param _req_limit the size of the request queue
          */
-        SerialLinkMasterPort(const std::string& _name, SerialLink&
-                         _serial_link, SerialLinkSlavePort& _slavePort, Cycles
-                         _delay, int _req_limit);
+        SerialLinkRequestPort(const std::string& _name, SerialLink&
+                         _serial_link, SerialLinkResponsePort& _cpu_side_port,
+                         Cycles _delay, int _req_limit);
 
         /**
          * Is this side blocked from accepting new request packets.
@@ -297,11 +298,11 @@
         void recvReqRetry();
     };
 
-    /** Slave port of the serial_link. */
-    SerialLinkSlavePort slavePort;
+    /** Response port of the serial_link. */
+    SerialLinkResponsePort cpu_side_port;
 
-    /** Master port of the serial_link. */
-    SerialLinkMasterPort masterPort;
+    /** Request port of the serial_link. */
+    SerialLinkRequestPort mem_side_port;
 
     /** Number of parallel lanes in this serial link */
     unsigned num_lanes;
diff --git a/src/mem/simple_mem.cc b/src/mem/simple_mem.cc
index 4161e87..c593a27 100644
--- a/src/mem/simple_mem.cc
+++ b/src/mem/simple_mem.cc
@@ -154,7 +154,7 @@
     // queue if there is one
     bool needsResponse = pkt->needsResponse();
     recvAtomic(pkt);
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to requestor if response expected
     if (needsResponse) {
         // recvAtomic() should already have turned packet into
         // atomic response
@@ -260,7 +260,7 @@
 
 SimpleMemory::MemoryPort::MemoryPort(const std::string& _name,
                                      SimpleMemory& _memory)
-    : SlavePort(_name, &_memory), memory(_memory)
+    : ResponsePort(_name, &_memory), memory(_memory)
 { }
 
 AddrRangeList
diff --git a/src/mem/simple_mem.hh b/src/mem/simple_mem.hh
index 0bcb15c..e80c88f 100644
--- a/src/mem/simple_mem.hh
+++ b/src/mem/simple_mem.hh
@@ -79,7 +79,7 @@
         { }
     };
 
-    class MemoryPort : public SlavePort
+    class MemoryPort : public ResponsePort
     {
       private:
         SimpleMemory& memory;
diff --git a/src/mem/snoop_filter.cc b/src/mem/snoop_filter.cc
index d9ac521..d1a62dc 100644
--- a/src/mem/snoop_filter.cc
+++ b/src/mem/snoop_filter.cc
@@ -61,19 +61,20 @@
 }
 
 std::pair<SnoopFilter::SnoopList, Cycles>
-SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort& slave_port)
+SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort&
+                           cpu_side_port)
 {
     DPRINTF(SnoopFilter, "%s: src %s packet %s\n", __func__,
-            slave_port.name(), cpkt->print());
+            cpu_side_port.name(), cpkt->print());
 
     // check if the packet came from a cache
-    bool allocate = !cpkt->req->isUncacheable() && slave_port.isSnooping() &&
-        cpkt->fromCache();
+    bool allocate = !cpkt->req->isUncacheable() && cpu_side_port.isSnooping()
+        && cpkt->fromCache();
     Addr line_addr = cpkt->getBlockAddr(linesize);
     if (cpkt->isSecure()) {
         line_addr |= LineSecure;
     }
-    SnoopMask req_port = portToMask(slave_port);
+    SnoopMask req_port = portToMask(cpu_side_port);
     reqLookupResult.it = cachedLocations.find(line_addr);
     bool is_hit = (reqLookupResult.it != cachedLocations.end());
 
@@ -137,7 +138,7 @@
     } else { // if (!cpkt->needsResponse())
         assert(cpkt->isEviction());
         // make sure that the sender actually had the line
-        panic_if((sf_item.holder & req_port).none(), "requester %x is not a " \
+        panic_if((sf_item.holder & req_port).none(), "requestor %x is not a " \
                  "holder :( SF value %x.%x\n", req_port,
                  sf_item.requested, sf_item.holder);
         // CleanEvicts and Writebacks -> the sender and all caches above
@@ -333,16 +334,17 @@
 }
 
 void
-SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort& slave_port)
+SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort&
+                            cpu_side_port)
 {
     DPRINTF(SnoopFilter, "%s: src %s packet %s\n",
-            __func__, slave_port.name(), cpkt->print());
+            __func__, cpu_side_port.name(), cpkt->print());
 
     assert(cpkt->isResponse());
 
     // we only allocate if the packet actually came from a cache, but
     // start by checking if the port is snooping
-    if (cpkt->req->isUncacheable() || !slave_port.isSnooping())
+    if (cpkt->req->isUncacheable() || !cpu_side_port.isSnooping())
         return;
 
     // next check if we actually allocated an entry
@@ -354,31 +356,31 @@
     if (sf_it == cachedLocations.end())
         return;
 
-    SnoopMask slave_mask = portToMask(slave_port);
+    SnoopMask response_mask = portToMask(cpu_side_port);
     SnoopItem& sf_item = sf_it->second;
 
     DPRINTF(SnoopFilter, "%s:   old SF value %x.%x\n",
             __func__,  sf_item.requested, sf_item.holder);
 
     // Make sure we have seen the actual request, too
-    panic_if((sf_item.requested & slave_mask).none(),
+    panic_if((sf_item.requested & response_mask).none(),
              "SF value %x.%x missing request bit\n",
              sf_item.requested, sf_item.holder);
 
-    sf_item.requested &= ~slave_mask;
+    sf_item.requested &= ~response_mask;
     // Update the residency of the cache line.
 
     if (cpkt->req->isCacheMaintenance()) {
         // A cache clean response does not carry any data so it
         // shouldn't change the holders, unless it is invalidating.
         if (cpkt->isInvalidate()) {
-            sf_item.holder &= ~slave_mask;
+            sf_item.holder &= ~response_mask;
         }
         eraseIfNullEntry(sf_it);
     } else {
         // Any other response implies that a cache above will have the
         // block.
-        sf_item.holder |= slave_mask;
+        sf_item.holder |= response_mask;
         assert((sf_item.holder | sf_item.requested).any());
     }
     DPRINTF(SnoopFilter, "%s:   new SF value %x.%x\n",
diff --git a/src/mem/snoop_filter.hh b/src/mem/snoop_filter.hh
index 62d764e..6a38325 100644
--- a/src/mem/snoop_filter.hh
+++ b/src/mem/snoop_filter.hh
@@ -89,7 +89,7 @@
     // Change for systems with more than 256 ports tracked by this object
     static const int SNOOP_MASK_SIZE = 256;
 
-    typedef std::vector<QueuedSlavePort*> SnoopList;
+    typedef std::vector<QueuedResponsePort*> SnoopList;
 
     SnoopFilter (const SnoopFilterParams *p) :
         SimObject(p), reqLookupResult(cachedLocations.end()),
@@ -99,20 +99,20 @@
     }
 
     /**
-     * Init a new snoop filter and tell it about all the slave ports
+     * Init a new snoop filter and tell it about all the cpu_sideports
      * of the enclosing bus.
      *
-     * @param slave_ports Slave ports that the bus is attached to.
+     * @param _cpu_side_ports Response ports that the bus is attached to.
      */
-    void setSlavePorts(const SnoopList& slave_ports) {
-        localSlavePortIds.resize(slave_ports.size(), InvalidPortID);
+    void setCPUSidePorts(const SnoopList& _cpu_side_ports) {
+        localResponsePortIds.resize(_cpu_side_ports.size(), InvalidPortID);
 
         PortID id = 0;
-        for (const auto& p : slave_ports) {
+        for (const auto& p : _cpu_side_ports) {
             // no need to track this port if it is not snooping
             if (p->isSnooping()) {
-                slavePorts.push_back(p);
-                localSlavePortIds[p->getId()] = id++;
+                cpuSidePorts.push_back(p);
+                localResponsePortIds[p->getId()] = id++;
             }
         }
 
@@ -123,19 +123,19 @@
     }
 
     /**
-     * Lookup a request (from a slave port) in the snoop filter and
-     * return a list of other slave ports that need forwarding of the
+     * Lookup a request (from a CPU-side port) in the snoop filter and
+     * return a list of other CPU-side ports that need forwarding of the
      * resulting snoops.  Additionally, update the tracking structures
      * with new request information. Note that the caller must also
      * call finishRequest once it is known if the request needs to
      * retry or not.
      *
-     * @param cpkt          Pointer to the request packet. Not changed.
-     * @param slave_port    Slave port where the request came from.
+     * @param cpkt              Pointer to the request packet. Not changed.
+     * @param cpu_side_port     Response port where the request came from.
      * @return Pair of a vector of snoop target ports and lookup latency.
      */
     std::pair<SnoopList, Cycles> lookupRequest(const Packet* cpkt,
-                                               const ResponsePort& slave_port);
+                                        const ResponsePort& cpu_side_port);
 
     /**
      * For an un-successful request, revert the change to the snoop
@@ -149,7 +149,7 @@
     void finishRequest(bool will_retry, Addr addr, bool is_secure);
 
     /**
-     * Handle an incoming snoop from below (the master port). These
+     * Handle an incoming snoop from below (the memory-side port). These
      * can upgrade the tracking logic and may also benefit from
      * additional steering thanks to the snoop filter.
      *
@@ -189,11 +189,11 @@
      * other cache, or memory) and update the tracking information in
      * the snoop filter.
      *
-     * @param cpkt       Pointer to const Packet holding the snoop response.
-     * @param slave_port ResponsePort that made the original request and
-     *                   is the target of this response.
+     * @param cpkt          Pointer to const Packet holding the snoop response.
+     * @param cpu_side_port ResponsePort that made the original request and
+     *                      is the target of this response.
      */
-    void updateResponse(const Packet *cpkt, const ResponsePort& slave_port);
+    void updateResponse(const Packet *cpkt, const ResponsePort& cpu_side_port);
 
     virtual void regStats();
 
@@ -224,12 +224,12 @@
      */
     std::pair<SnoopList, Cycles> snoopAll(Cycles latency) const
     {
-        return std::make_pair(slavePorts, latency);
+        return std::make_pair(cpuSidePorts, latency);
     }
-    std::pair<SnoopList, Cycles> snoopSelected(const SnoopList& slave_ports,
-                                               Cycles latency) const
+    std::pair<SnoopList, Cycles> snoopSelected(const SnoopList&
+                                _cpu_side_ports, Cycles latency) const
     {
-        return std::make_pair(slave_ports, latency);
+        return std::make_pair(_cpu_side_ports, latency);
     }
     std::pair<SnoopList, Cycles> snoopDown(Cycles latency) const
     {
@@ -253,7 +253,7 @@
   private:
 
     /**
-     * Removes snoop filter items which have no requesters and no holders.
+     * Removes snoop filter items which have no requestors and no holders.
      */
     void eraseIfNullEntry(SnoopFilterCache::iterator& sf_it);
 
@@ -290,10 +290,10 @@
         ReqLookupResult() = delete;
     } reqLookupResult;
 
-    /** List of all attached snooping slave ports. */
-    SnoopList slavePorts;
+    /** List of all attached snooping CPU-side ports. */
+    SnoopList cpuSidePorts;
     /** Track the mapping from port ids to the local mask ids. */
-    std::vector<PortID> localSlavePortIds;
+    std::vector<PortID> localResponsePortIds;
     /** Cache line size. */
     const unsigned linesize;
     /** Latency for doing a lookup in the filter */
@@ -325,14 +325,14 @@
     assert(port.getId() != InvalidPortID);
     // if this is not a snooping port, return a zero mask
     return !port.isSnooping() ? 0 :
-        ((SnoopMask)1) << localSlavePortIds[port.getId()];
+        ((SnoopMask)1) << localResponsePortIds[port.getId()];
 }
 
 inline SnoopFilter::SnoopList
 SnoopFilter::maskToPortList(SnoopMask port_mask) const
 {
     SnoopList res;
-    for (const auto& p : slavePorts)
+    for (const auto& p : cpuSidePorts)
         if ((port_mask & portToMask(*p)).any())
             res.push_back(p);
     return res;
diff --git a/src/mem/token_port.cc b/src/mem/token_port.cc
index 648d041..97c59a0 100644
--- a/src/mem/token_port.cc
+++ b/src/mem/token_port.cc
@@ -40,13 +40,13 @@
 #include "debug/TokenPort.hh"
 
 void
-TokenMasterPort::bind(Port &peer)
+TokenRequestPort::bind(Port &peer)
 {
     RequestPort::bind(peer);
 }
 
 void
-TokenMasterPort::recvTokens(int num_tokens)
+TokenRequestPort::recvTokens(int num_tokens)
 {
     panic_if(!tokenManager, "TokenManager not set for %s.\n", name());
 
@@ -54,7 +54,7 @@
 }
 
 bool
-TokenMasterPort::haveTokens(int num_tokens)
+TokenRequestPort::haveTokens(int num_tokens)
 {
     panic_if(!tokenManager, "TokenManager not set for %s.\n", name());
 
@@ -62,7 +62,7 @@
 }
 
 void
-TokenMasterPort::acquireTokens(int num_tokens)
+TokenRequestPort::acquireTokens(int num_tokens)
 {
     panic_if(!tokenManager, "TokenManager not set for %s.\n", name());
 
@@ -70,53 +70,53 @@
 }
 
 void
-TokenMasterPort::setTokenManager(TokenManager *_tokenManager)
+TokenRequestPort::setTokenManager(TokenManager *_tokenManager)
 {
     tokenManager = _tokenManager;
 }
 
 void
-TokenSlavePort::sendTokens(int num_tokens)
+TokenResponsePort::sendTokens(int num_tokens)
 {
-    fatal_if(!tokenMasterPort, "Tried sendTokens to non-token master!\n");
+    fatal_if(!tokenRequestPort, "Tried sendTokens to non-token requestor!\n");
 
-    // Send tokens to a master
-    tokenMasterPort->recvTokens(num_tokens);
+    // Send tokens to a requestor
+    tokenRequestPort->recvTokens(num_tokens);
 }
 
 void
-TokenSlavePort::bind(Port& peer)
+TokenResponsePort::bind(Port& peer)
 {
-    // TokenSlavePort is allowed to bind to either TokenMasterPort or a
-    // RequestPort as fallback. If the type is a RequestPort, tokenMasterPort
+    // TokenResponsePort is allowed to bind to either TokenRequestPort or a
+    // RequestPort as fallback. If the type is a RequestPort, tokenRequestPort
     // is set to nullptr to indicate tokens should not be exchanged.
-    auto *token_master_port = dynamic_cast<TokenMasterPort*>(&peer);
-    auto *master_port = dynamic_cast<RequestPort*>(&peer);
-    if (!token_master_port && !master_port) {
-        fatal("Attempt to bind port %s to unsupported slave port %s.",
+    auto *token_request_port = dynamic_cast<TokenRequestPort*>(&peer);
+    auto *request_port = dynamic_cast<RequestPort*>(&peer);
+    if (!token_request_port && !request_port) {
+        fatal("Attempt to bind port %s to unsupported response port %s.",
               name(), peer.name());
-    } else if (token_master_port) {
-        // slave port keeps track of the master port
-        tokenMasterPort = token_master_port;
+    } else if (token_request_port) {
+        // response port keeps track of the request port
+        tokenRequestPort = token_request_port;
 
-        // master port also keeps track of slave port
-        tokenMasterPort->bind(*this);
-    } else if (master_port) {
-        tokenMasterPort = nullptr;
+        // request port also keeps track of response port
+        tokenRequestPort->bind(*this);
+    } else if (request_port) {
+        tokenRequestPort = nullptr;
     }
 }
 
 void
-TokenSlavePort::unbind()
+TokenResponsePort::unbind()
 {
     ResponsePort::responderUnbind();
-    tokenMasterPort = nullptr;
+    tokenRequestPort = nullptr;
 }
 
 void
-TokenSlavePort::recvRespRetry()
+TokenResponsePort::recvRespRetry()
 {
-    // fallback to QueuedSlavePort-like impl for now
+    // fallback to QueuedResponsePort-like impl for now
     panic_if(respQueue.empty(),
              "Attempted to retry a response when no retry was queued!\n");
 
@@ -129,7 +129,7 @@
 }
 
 bool
-TokenSlavePort::sendTimingResp(PacketPtr pkt)
+TokenResponsePort::sendTimingResp(PacketPtr pkt)
 {
     bool success = ResponsePort::sendTimingResp(pkt);
 
diff --git a/src/mem/token_port.hh b/src/mem/token_port.hh
index 358ee03..617b9f9 100644
--- a/src/mem/token_port.hh
+++ b/src/mem/token_port.hh
@@ -38,33 +38,33 @@
 #include "sim/clocked_object.hh"
 
 class TokenManager;
-class TokenSlavePort;
+class TokenResponsePort;
 
-class TokenMasterPort : public RequestPort
+class TokenRequestPort : public RequestPort
 {
   private:
     /* Manager to track tokens between this token port pair. */
     TokenManager *tokenManager;
 
   public:
-    TokenMasterPort(const std::string& name, SimObject* owner,
+    TokenRequestPort(const std::string& name, SimObject* owner,
                     PortID id = InvalidPortID) :
         RequestPort(name, owner, id), tokenManager(nullptr)
     { }
 
     /**
-     * Bind this master port to slave port. Called by the slave port in
+     * Bind this request port to response port. Called by the response port in
      * this token implementation.
      */
     void bind(Port &peer) override;
 
     /**
-     * Unbind port. Handled by slave port in token implementation.
+     * Unbind port. Handled by response port in token implementation.
      */
     void unbind() override {}
 
     /**
-     * Receive tokens returned by the slave port. This increments the number
+     * Receive tokens returned by the response port. This increments the number
      * or available tokens across the port.
      */
     void recvTokens(int num_tokens);
@@ -82,41 +82,41 @@
 
     /**
      * Specify a token manger, which will handle tracking of tokens for a
-     * TokenMasterPort/SlaveMasterPort pair.
+     * TokenRequestPort/ResponseRequestPort pair.
      */
     void setTokenManager(TokenManager *_tokenManager);
 };
 
-class TokenSlavePort : public ResponsePort
+class TokenResponsePort : public ResponsePort
 {
   private:
-    TokenMasterPort *tokenMasterPort;
+    TokenRequestPort *tokenRequestPort;
 
     std::deque<PacketPtr> respQueue;
 
     void recvRespRetry() override;
 
   public:
-    TokenSlavePort(const std::string& name, ClockedObject *owner,
+    TokenResponsePort(const std::string& name, ClockedObject *owner,
                    PortID id = InvalidPortID) :
-        ResponsePort(name, owner, id), tokenMasterPort(nullptr)
+        ResponsePort(name, owner, id), tokenRequestPort(nullptr)
     { }
-    ~TokenSlavePort() { }
+    ~TokenResponsePort() { }
 
     /**
-     * Bind this slave port to a master port. This also does the mirror
-     * action and bainds the master port to the slave port as well as
+     * Bind this response port to a request port. This also does the mirror
+     * action and binds the request port to the response port as well as
      * binding the base class types.
      */
     void bind(Port &peer) override;
 
     /**
-     * Unbind this slave port and associated master port.
+     * Unbind this response port and associated request port.
      */
     void unbind() override;
 
     /**
-     * Return num_tokens tokens back to the master port.
+     * Return num_tokens tokens back to the request port.
      */
     void sendTokens(int num_tokens);
 
diff --git a/src/mem/tport.cc b/src/mem/tport.cc
index 2bcb022..301dfb1 100644
--- a/src/mem/tport.cc
+++ b/src/mem/tport.cc
@@ -43,7 +43,7 @@
 
 SimpleTimingPort::SimpleTimingPort(const std::string& _name,
                                    SimObject* _owner) :
-    QueuedSlavePort(_name, _owner, queueImpl), queueImpl(*_owner, *this)
+    QueuedResponsePort(_name, _owner, queueImpl), queueImpl(*_owner, *this)
 {
 }
 
@@ -68,7 +68,7 @@
 
     bool needsResponse = pkt->needsResponse();
     Tick latency = recvAtomic(pkt);
-    // turn packet around to go back to requester if response expected
+    // turn packet around to go back to requestor if response expected
     if (needsResponse) {
         // recvAtomic() should already have turned packet into
         // atomic response
diff --git a/src/mem/tport.hh b/src/mem/tport.hh
index 6c560d3..fe32872 100644
--- a/src/mem/tport.hh
+++ b/src/mem/tport.hh
@@ -54,9 +54,9 @@
 /**
  * The simple timing port uses a queued port to implement
  * recvFunctional and recvTimingReq through recvAtomic. It is always a
- * slave port.
+ * response port.
  */
-class SimpleTimingPort : public QueuedSlavePort
+class SimpleTimingPort : public QueuedResponsePort
 {
 
   private:
@@ -64,7 +64,7 @@
     /**
      * The packet queue used to store outgoing responses. Note that
      * the queue is made private and that we avoid overloading the
-     * name used in the QueuedSlavePort. Access is provided through
+     * name used in the QueuedResponsePort. Access is provided through
      * the queue reference in the base class.
      */
     RespPacketQueue queueImpl;
diff --git a/src/mem/translating_port_proxy.cc b/src/mem/translating_port_proxy.cc
index 8bb93cc..1e8d836 100644
--- a/src/mem/translating_port_proxy.cc
+++ b/src/mem/translating_port_proxy.cc
@@ -83,7 +83,7 @@
          gen.next())
     {
         auto req = std::make_shared<Request>(
-                gen.addr(), gen.size(), flags, Request::funcMasterId, 0,
+                gen.addr(), gen.size(), flags, Request::funcRequestorId, 0,
                 _tc->contextId());
 
         if (!tryTLBs(req, BaseTLB::Read))
@@ -105,7 +105,7 @@
          gen.next())
     {
         auto req = std::make_shared<Request>(
-                gen.addr(), gen.size(), flags, Request::funcMasterId, 0,
+                gen.addr(), gen.size(), flags, Request::funcRequestorId, 0,
                 _tc->contextId());
 
         if (!tryTLBs(req, BaseTLB::Write))
@@ -125,7 +125,7 @@
          gen.next())
     {
         auto req = std::make_shared<Request>(
-                gen.addr(), gen.size(), flags, Request::funcMasterId, 0,
+                gen.addr(), gen.size(), flags, Request::funcRequestorId, 0,
                 _tc->contextId());
 
         if (!tryTLBs(req, BaseTLB::Write))
diff --git a/src/mem/xbar.cc b/src/mem/xbar.cc
index 5e2c28c..f9544f8 100644
--- a/src/mem/xbar.cc
+++ b/src/mem/xbar.cc
@@ -59,38 +59,39 @@
       headerLatency(p->header_latency),
       width(p->width),
       gotAddrRanges(p->port_default_connection_count +
-                          p->port_master_connection_count, false),
+                          p->port_mem_side_ports_connection_count, false),
       gotAllAddrRanges(false), defaultPortID(InvalidPortID),
       useDefaultRange(p->use_default_range),
 
       transDist(this, "trans_dist", "Transaction distribution"),
       pktCount(this, "pkt_count",
-                   "Packet count per connected master and slave (bytes)"),
-      pktSize(this, "pkt_size",
-              "Cumulative packet size per connected master and slave (bytes)")
+              "Packet count per connected requestor and responder (bytes)"),
+      pktSize(this, "pkt_size", "Cumulative packet size per connected "
+             "requestor and responder (bytes)")
 {
 }
 
 BaseXBar::~BaseXBar()
 {
-    for (auto m: masterPorts)
-        delete m;
+    for (auto port: memSidePorts)
+        delete port;
 
-    for (auto s: slavePorts)
-        delete s;
+    for (auto port: cpuSidePorts)
+        delete port;
 }
 
 Port &
 BaseXBar::getPort(const std::string &if_name, PortID idx)
 {
-    if (if_name == "master" && idx < masterPorts.size()) {
-        // the master port index translates directly to the vector position
-        return *masterPorts[idx];
+    if (if_name == "mem_side_ports" && idx < memSidePorts.size()) {
+        // the memory-side ports index translates directly to the vector
+        // position
+        return *memSidePorts[idx];
     } else  if (if_name == "default") {
-        return *masterPorts[defaultPortID];
-    } else if (if_name == "slave" && idx < slavePorts.size()) {
-        // the slave port index translates directly to the vector position
-        return *slavePorts[idx];
+        return *memSidePorts[defaultPortID];
+    } else if (if_name == "cpu_side_ports" && idx < cpuSidePorts.size()) {
+        // the CPU-side ports index translates directly to the vector position
+        return *cpuSidePorts[idx];
     } else {
         return ClockedObject::getPort(if_name, idx);
     }
@@ -179,7 +180,7 @@
 {
     // if we are in the retry state, we will not see anything but the
     // retrying port (or in the case of the snoop ports the snoop
-    // response port that mirrors the actual slave port) as we leave
+    // response port that mirrors the actual CPU-side port) as we leave
     // this state again in zero time if the peer does not immediately
     // call the layer when receiving the retry
 
@@ -326,7 +327,7 @@
 BaseXBar::findPort(AddrRange addr_range)
 {
     // we should never see any address lookups before we've got the
-    // ranges of all connected slave modules
+    // ranges of all connected CPU-side-port modules
     assert(gotAllAddrRanges);
 
     // Check the address map interval tree
@@ -356,14 +357,14 @@
 
 /** Function called by the port when the crossbar is receiving a range change.*/
 void
-BaseXBar::recvRangeChange(PortID master_port_id)
+BaseXBar::recvRangeChange(PortID mem_side_port_id)
 {
-    DPRINTF(AddrRanges, "Received range change from slave port %s\n",
-            masterPorts[master_port_id]->getPeer());
+    DPRINTF(AddrRanges, "Received range change from cpu_side_ports %s\n",
+            memSidePorts[mem_side_port_id]->getPeer());
 
-    // remember that we got a range from this master port and thus the
-    // connected slave module
-    gotAddrRanges[master_port_id] = true;
+    // remember that we got a range from this memory-side port and thus the
+    // connected CPU-side-port module
+    gotAddrRanges[mem_side_port_id] = true;
 
     // update the global flag
     if (!gotAllAddrRanges) {
@@ -375,19 +376,20 @@
             gotAllAddrRanges &= *r++;
         }
         if (gotAllAddrRanges)
-            DPRINTF(AddrRanges, "Got address ranges from all slaves\n");
+            DPRINTF(AddrRanges, "Got address ranges from all responders\n");
     }
 
     // note that we could get the range from the default port at any
     // point in time, and we cannot assume that the default range is
     // set before the other ones are, so we do additional checks once
     // all ranges are provided
-    if (master_port_id == defaultPortID) {
+    if (mem_side_port_id == defaultPortID) {
         // only update if we are indeed checking ranges for the
         // default port since the port might not have a valid range
         // otherwise
         if (useDefaultRange) {
-            AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges();
+            AddrRangeList ranges = memSidePorts[mem_side_port_id]->
+                                   getAddrRanges();
 
             if (ranges.size() != 1)
                 fatal("Crossbar %s may only have a single default range",
@@ -398,9 +400,9 @@
     } else {
         // the ports are allowed to update their address ranges
         // dynamically, so remove any existing entries
-        if (gotAddrRanges[master_port_id]) {
+        if (gotAddrRanges[mem_side_port_id]) {
             for (auto p = portMap.begin(); p != portMap.end(); ) {
-                if (p->second == master_port_id)
+                if (p->second == mem_side_port_id)
                     // erasing invalidates the iterator, so advance it
                     // before the deletion takes place
                     portMap.erase(p++);
@@ -409,25 +411,26 @@
             }
         }
 
-        AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges();
+        AddrRangeList ranges = memSidePorts[mem_side_port_id]->
+                               getAddrRanges();
 
         for (const auto& r: ranges) {
             DPRINTF(AddrRanges, "Adding range %s for id %d\n",
-                    r.to_string(), master_port_id);
-            if (portMap.insert(r, master_port_id) == portMap.end()) {
+                    r.to_string(), mem_side_port_id);
+            if (portMap.insert(r, mem_side_port_id) == portMap.end()) {
                 PortID conflict_id = portMap.intersects(r)->second;
                 fatal("%s has two ports responding within range "
                       "%s:\n\t%s\n\t%s\n",
                       name(),
                       r.to_string(),
-                      masterPorts[master_port_id]->getPeer(),
-                      masterPorts[conflict_id]->getPeer());
+                      memSidePorts[mem_side_port_id]->getPeer(),
+                      memSidePorts[conflict_id]->getPeer());
             }
         }
     }
 
-    // if we have received ranges from all our neighbouring slave
-    // modules, go ahead and tell our connected master modules in
+    // if we have received ranges from all our neighbouring CPU-side-port
+    // modules, go ahead and tell our connected memory-side-port modules in
     // turn, this effectively assumes a tree structure of the system
     if (gotAllAddrRanges) {
         DPRINTF(AddrRanges, "Aggregating address ranges\n");
@@ -508,10 +511,10 @@
             }
         }
 
-        // tell all our neighbouring master ports that our address
+        // tell all our neighbouring memory-side ports that our address
         // ranges have changed
-        for (const auto& s: slavePorts)
-            s->sendRangeChange();
+        for (const auto& port: cpuSidePorts)
+            port->sendRangeChange();
     }
 }
 
@@ -524,7 +527,7 @@
     assert(gotAllAddrRanges);
 
     // at the moment, this never happens, as there are no cycles in
-    // the range queries and no devices on the master side of a crossbar
+    // the range queries and no devices on the memory side of a crossbar
     // (CPU, cache, bridge etc) actually care about the ranges of the
     // ports they are connected to
 
@@ -552,25 +555,26 @@
     }
 
     pktCount
-        .init(slavePorts.size(), masterPorts.size())
+        .init(cpuSidePorts.size(), memSidePorts.size())
         .flags(total | nozero | nonan);
 
     pktSize
-        .init(slavePorts.size(), masterPorts.size())
+        .init(cpuSidePorts.size(), memSidePorts.size())
         .flags(total | nozero | nonan);
 
     // both the packet count and total size are two-dimensional
-    // vectors, indexed by slave port id and master port id, thus the
-    // neighbouring master and slave, they do not differentiate what
-    // came from the master and was forwarded to the slave (requests
-    // and snoop responses) and what came from the slave and was
-    // forwarded to the master (responses and snoop requests)
-    for (int i = 0; i < slavePorts.size(); i++) {
-        pktCount.subname(i, slavePorts[i]->getPeer().name());
-        pktSize.subname(i, slavePorts[i]->getPeer().name());
-        for (int j = 0; j < masterPorts.size(); j++) {
-            pktCount.ysubname(j, masterPorts[j]->getPeer().name());
-            pktSize.ysubname(j, masterPorts[j]->getPeer().name());
+    // vectors, indexed by CPU-side port id and memory-side port id, thus the
+    // neighbouring memory-side ports and CPU-side ports, they do not
+    // differentiate what came from the memory-side ports and was forwarded to
+    // the CPU-side ports (requests and snoop responses) and what came from
+    // the CPU-side ports and was forwarded to the memory-side ports (responses
+    // and snoop requests)
+    for (int i = 0; i < cpuSidePorts.size(); i++) {
+        pktCount.subname(i, cpuSidePorts[i]->getPeer().name());
+        pktSize.subname(i, cpuSidePorts[i]->getPeer().name());
+        for (int j = 0; j < memSidePorts.size(); j++) {
+            pktCount.ysubname(j, memSidePorts[j]->getPeer().name());
+            pktSize.ysubname(j, memSidePorts[j]->getPeer().name());
         }
     }
 }
diff --git a/src/mem/xbar.hh b/src/mem/xbar.hh
index 535277a..cf06742 100644
--- a/src/mem/xbar.hh
+++ b/src/mem/xbar.hh
@@ -79,10 +79,10 @@
      * PCIe, etc.
      *
      * The template parameter, PortClass, indicates the destination
-     * port type for the layer. The retry list holds either master
-     * ports or slave ports, depending on the direction of the
-     * layer. Thus, a request layer has a retry list containing slave
-     * ports, whereas a response layer holds master ports.
+     * port type for the layer. The retry list holds either memory-side ports
+     * or CPU-side ports, depending on the direction of the
+     * layer. Thus, a request layer has a retry list containing
+     * CPU-side ports, whereas a response layer holds memory-side ports.
      */
     template <typename SrcType, typename DstType>
     class Layer : public Drainable, public Stats::Group
@@ -332,9 +332,9 @@
      * Function called by the port when the crossbar is recieving a
      * range change.
      *
-     * @param master_port_id id of the port that received the change
+     * @param mem_side_port_id id of the port that received the change
      */
-    virtual void recvRangeChange(PortID master_port_id);
+    virtual void recvRangeChange(PortID mem_side_port_id);
 
     /**
      * Find which port connected to this crossbar (if any) should be
@@ -364,17 +364,17 @@
     void calcPacketTiming(PacketPtr pkt, Tick header_delay);
 
     /**
-     * Remember for each of the master ports of the crossbar if we got
-     * an address range from the connected slave. For convenience,
-     * also keep track of if we got ranges from all the slave modules
+     * Remember for each of the memory-side ports of the crossbar if we got
+     * an address range from the connected CPU-side ports. For convenience,
+     * also keep track of if we got ranges from all the CPU-side-port modules
      * or not.
      */
     std::vector<bool> gotAddrRanges;
     bool gotAllAddrRanges;
 
-    /** The master and slave ports of the crossbar */
-    std::vector<QueuedSlavePort*> slavePorts;
-    std::vector<RequestPort*> masterPorts;
+    /** The memory-side ports and CPU-side ports of the crossbar */
+    std::vector<QueuedResponsePort*> cpuSidePorts;
+    std::vector<RequestPort*> memSidePorts;
 
     /** Port that handles requests that don't match any of the interfaces.*/
     PortID defaultPortID;
@@ -392,9 +392,9 @@
      * crossbar. The transaction distribution is globally counting
      * different types of commands. The packet count and total packet
      * size are two-dimensional vectors that are indexed by the
-     * slave port and master port id (thus the neighbouring master and
-     * neighbouring slave), summing up both directions (request and
-     * response).
+     * CPU-side port and memory-side port id (thus the neighbouring memory-side
+     * ports and neighbouring CPU-side ports), summing up both directions
+     * (request and response).
      */
     Stats::Vector transDist;
     Stats::Vector2d pktCount;
diff --git a/src/python/m5/SimObject.py b/src/python/m5/SimObject.py
index 7c4c809..9c9a9ed 100644
--- a/src/python/m5/SimObject.py
+++ b/src/python/m5/SimObject.py
@@ -186,11 +186,11 @@
 
         for port in simobj._ports.values():
             is_vector = isinstance(port, m5.params.VectorPort)
-            is_master = port.role == 'MASTER'
+            is_requestor = port.role == 'GEM5 REQUESTOR'
 
             code('ports["%s"] = new PortDesc("%s", %s, %s);' %
                 (port.name, port.name, cxx_bool(is_vector),
-                cxx_bool(is_master)))
+                cxx_bool(is_requestor)))
 
         code.dedent()
         code('}')
diff --git a/src/python/m5/params.py b/src/python/m5/params.py
index 4e58667..45082d7 100644
--- a/src/python/m5/params.py
+++ b/src/python/m5/params.py
@@ -2120,13 +2120,13 @@
     def cxx_decl(self, code):
         code('unsigned int port_${{self.name}}_connection_count;')
 
-Port.compat('GEM5 REQUESTER', 'GEM5 RESPONDER')
+Port.compat('GEM5 REQUESTOR', 'GEM5 RESPONDER')
 
 class RequestPort(Port):
     # RequestPort("description")
     def __init__(self, desc):
         super(RequestPort, self).__init__(
-                'GEM5 REQUESTER', desc, is_source=True)
+                'GEM5 REQUESTOR', desc, is_source=True)
 
 class ResponsePort(Port):
     # ResponsePort("description")
@@ -2143,7 +2143,7 @@
     # VectorRequestPort("description")
     def __init__(self, desc):
         super(VectorRequestPort, self).__init__(
-                'GEM5 REQUESTER', desc, is_source=True)
+                'GEM5 REQUESTOR', desc, is_source=True)
 
 class VectorResponsePort(VectorPort):
     # VectorResponsePort("description")
diff --git a/src/python/m5/util/dot_writer.py b/src/python/m5/util/dot_writer.py
index c66108d..8b757e8 100644
--- a/src/python/m5/util/dot_writer.py
+++ b/src/python/m5/util/dot_writer.py
@@ -42,7 +42,7 @@
 # view. The output generated by do_dot() is a DOT-based figure (as a
 # pdf and an editable svg file) and its source dot code. Nodes are
 # components, and edges represent the memory hierarchy: the edges are
-# directed, from a master to slave. Initially all nodes are
+# directed, from a requestor to responder. Initially all nodes are
 # generated, and then all edges are added. do_dot should be called
 # with the top-most SimObject (namely root but not necessarily), the
 # output folder and the output dot source filename. From the given
diff --git a/src/sim/cxx_config.hh b/src/sim/cxx_config.hh
index 9f8a07d..9a45e76 100644
--- a/src/sim/cxx_config.hh
+++ b/src/sim/cxx_config.hh
@@ -95,12 +95,12 @@
         /* Is this a vector or singleton parameters/SimObject */
         const bool isVector;
 
-        /** Is this a master or slave port */
-        const bool isMaster;
+        /** Is this a request or response port */
+        const bool isRequestor;
 
         PortDesc(const std::string &name_,
-            bool isVector_, bool isMaster_) :
-            name(name_), isVector(isVector_), isMaster(isMaster_)
+            bool isVector_, bool isRequestor_) :
+            name(name_), isVector(isVector_), isRequestor(isRequestor_)
         { }
     };
 
diff --git a/src/sim/cxx_manager.cc b/src/sim/cxx_manager.cc
index 71ee10b..7df3bca 100644
--- a/src/sim/cxx_manager.cc
+++ b/src/sim/cxx_manager.cc
@@ -444,72 +444,74 @@
 
 void
 CxxConfigManager::bindPort(
-    SimObject *master_object, const std::string &master_port_name,
-    PortID master_port_index,
-    SimObject *slave_object, const std::string &slave_port_name,
-    PortID slave_port_index)
+    SimObject *requestor_object, const std::string &request_port_name,
+    PortID request_port_index,
+    SimObject *responder_object, const std::string &response_port_name,
+    PortID response_port_index)
 {
-    /* FIXME, check slave_port_index against connection_count
+    /* FIXME, check response_port_index against connection_count
      *  defined for port, need getPortConnectionCount and a
      *  getCxxConfigDirectoryEntry for each object. */
 
     /* It would be nice to be able to catch the errors from these calls. */
-    Port &master_port = master_object->getPort(
-        master_port_name, master_port_index);
-    Port &slave_port = slave_object->getPort(
-        slave_port_name, slave_port_index);
+    Port &request_port = requestor_object->getPort(
+        request_port_name, request_port_index);
+    Port &response_port = responder_object->getPort(
+        response_port_name, response_port_index);
 
-    if (master_port.isConnected()) {
-        throw Exception(master_object->name(), csprintf(
-            "Master port: %s[%d] is already connected\n", master_port_name,
-            master_port_index));
+    if (request_port.isConnected()) {
+        throw Exception(requestor_object->name(), csprintf(
+            "Request port: %s[%d] is already connected\n", request_port_name,
+            request_port_index));
     }
 
-    if (slave_port.isConnected()) {
-        throw Exception(slave_object->name(), csprintf(
-            "Slave port: %s[%d] is already connected\n", slave_port_name,
-            slave_port_index));
+    if (response_port.isConnected()) {
+        throw Exception(responder_object->name(), csprintf(
+            "Response port: %s[%d] is already connected\n", response_port_name,
+            response_port_index));
     }
 
     DPRINTF(CxxConfig, "Binding port %s.%s[%d]"
         " to %s:%s[%d]\n",
-        master_object->name(), master_port_name, master_port_index,
-        slave_object->name(), slave_port_name, slave_port_index);
+        requestor_object->name(), request_port_name, request_port_index,
+        responder_object->name(), response_port_name, response_port_index);
 
-    master_port.bind(slave_port);
+    request_port.bind(response_port);
 }
 
 void
-CxxConfigManager::bindMasterPort(SimObject *object,
+CxxConfigManager::bindRequestPort(SimObject *object,
     const CxxConfigDirectoryEntry::PortDesc &port,
     const std::vector<std::string> &peers)
 {
-    unsigned int master_port_index = 0;
+    unsigned int request_port_index = 0;
 
     for (auto peer_i = peers.begin(); peer_i != peers.end();
         ++peer_i)
     {
         const std::string &peer = *peer_i;
-        std::string slave_object_name;
-        std::string slave_port_name;
-        unsigned int slave_port_index;
+        std::string response_object_name;
+        std::string response_port_name;
+        unsigned int response_port_index;
 
-        parsePort(peer, slave_object_name, slave_port_name,
-            slave_port_index);
+        parsePort(peer, response_object_name, response_port_name,
+            response_port_index);
 
-        std::string slave_instance_name = rename(slave_object_name);
+        std::string response_instance_name = rename(response_object_name);
 
-        if (objectsByName.find(slave_instance_name) == objectsByName.end()) {
+        if (objectsByName.find(response_instance_name)
+            == objectsByName.end()) {
             throw Exception(object->name(), csprintf(
-                "Can't find slave port object: %s", slave_instance_name));
+                "Can't find response port object: %s",
+                response_instance_name));
         }
 
-        SimObject *slave_object = objectsByName[slave_instance_name];
+        SimObject *responder_object = objectsByName[response_instance_name];
 
-        bindPort(object, port.name, master_port_index,
-            slave_object, slave_port_name, slave_port_index);
+        bindPort(object, port.name, request_port_index,
+            responder_object, response_port_name, response_port_index);
 
-        master_port_index++;
+        request_port_index++;
     }
 }
 
@@ -540,14 +542,14 @@
 
         /* Only handle master ports as binding only needs to happen once
          *  for each observed pair of ports */
-        if (port->isMaster) {
+        if (port->isRequestor) {
             if (!port->isVector && peers.size() > 1) {
                 throw Exception(instance_name, csprintf(
                     "Too many connections to non-vector port %s (%d)\n",
                     port->name, peers.size()));
             }
 
-            bindMasterPort(object, *port, peers);
+            bindRequestPort(object, *port, peers);
         }
     }
 }
diff --git a/src/sim/cxx_manager.hh b/src/sim/cxx_manager.hh
index 30339ad..e2cbadd 100644
--- a/src/sim/cxx_manager.hh
+++ b/src/sim/cxx_manager.hh
@@ -126,14 +126,14 @@
     std::list<Renaming> renamings;
 
     /** Bind a single connection between two objects' ports */
-    void bindPort(SimObject *masterObject, const std::string &masterPort,
-        PortID masterPortIndex, SimObject *slaveObject,
-        const std::string &slavePort, PortID slavePortIndex);
+    void bindPort(SimObject *requestorObject, const std::string &requestPort,
+        PortID requestPortIndex, SimObject *responderObject,
+        const std::string &responsePort, PortID responsePortIndex);
 
-    /** Bind a single (possibly vectored) master port to peers from the
+    /** Bind a single (possibly vectored) request port to peers from the
      *  unparsed list peers with elements in the .ini connection format:
      *  path(.path)*.port[index] */
-    void bindMasterPort(SimObject *object,
+    void bindRequestPort(SimObject *object,
         const CxxConfigDirectoryEntry::PortDesc &port,
         const std::vector<std::string> &peers);
 
diff --git a/src/sim/probe/mem.hh b/src/sim/probe/mem.hh
index fed7bcf..444c38e 100644
--- a/src/sim/probe/mem.hh
+++ b/src/sim/probe/mem.hh
@@ -56,7 +56,7 @@
     uint32_t size;
     Request::FlagsType flags;
     Addr pc;
-    MasterID master;
+    RequestorID id;
 
     explicit PacketInfo(const PacketPtr& pkt) :
         cmd(pkt->cmd),
@@ -64,7 +64,7 @@
         size(pkt->getSize()),
         flags(pkt->req->getFlags()),
         pc(pkt->req->hasPC() ? pkt->req->getPC() : 0),
-        master(pkt->req->masterId())  { }
+        id(pkt->req->requestorId())  { }
 };
 
 /**
diff --git a/src/sim/system.cc b/src/sim/system.cc
index 8185f13..cb412a8 100644
--- a/src/sim/system.cc
+++ b/src/sim/system.cc
@@ -246,14 +246,14 @@
           _cacheLineSize == 64 || _cacheLineSize == 128))
         warn_once("Cache line size is neither 16, 32, 64 nor 128 bytes.\n");
 
-    // Get the generic system master IDs
-    MasterID tmp_id M5_VAR_USED;
-    tmp_id = getMasterId(this, "writebacks");
-    assert(tmp_id == Request::wbMasterId);
-    tmp_id = getMasterId(this, "functional");
-    assert(tmp_id == Request::funcMasterId);
-    tmp_id = getMasterId(this, "interrupt");
-    assert(tmp_id == Request::intMasterId);
+    // Get the generic system requestor IDs
+    RequestorID tmp_id M5_VAR_USED;
+    tmp_id = getRequestorId(this, "writebacks");
+    assert(tmp_id == Request::wbRequestorId);
+    tmp_id = getRequestorId(this, "functional");
+    assert(tmp_id == Request::funcRequestorId);
+    tmp_id = getRequestorId(this, "interrupt");
+    assert(tmp_id == Request::intRequestorId);
 
     // increment the number of running systems
     numSystemsRunning++;
@@ -420,28 +420,28 @@
 }
 
 void
-System::addDeviceMemory(MasterID masterId, AbstractMemory *deviceMemory)
+System::addDeviceMemory(RequestorID requestor_id, AbstractMemory *deviceMemory)
 {
-    if (!deviceMemMap.count(masterId)) {
-        deviceMemMap.insert(std::make_pair(masterId, deviceMemory));
+    if (!deviceMemMap.count(requestor_id)) {
+        deviceMemMap.insert(std::make_pair(requestor_id, deviceMemory));
     }
 }
 
 bool
 System::isDeviceMemAddr(PacketPtr pkt) const
 {
-    const MasterID& mid = pkt->masterId();
+    const RequestorID& id = pkt->requestorId();
 
-    return (deviceMemMap.count(mid) &&
-            deviceMemMap.at(mid)->getAddrRange().contains(pkt->getAddr()));
+    return (deviceMemMap.count(id) &&
+            deviceMemMap.at(id)->getAddrRange().contains(pkt->getAddr()));
 }
 
 AbstractMemory *
-System::getDeviceMemory(MasterID mid) const
+System::getDeviceMemory(RequestorID id) const
 {
-    panic_if(!deviceMemMap.count(mid),
-             "No device memory found for MasterID %d\n", mid);
-    return deviceMemMap.at(mid);
+    panic_if(!deviceMemMap.count(id),
+             "No device memory found for RequestorID %d\n", id);
+    return deviceMemMap.at(id);
 }
 
 void
@@ -544,73 +544,74 @@
 }
 
 std::string
-System::stripSystemName(const std::string& master_name) const
+System::stripSystemName(const std::string& requestor_name) const
 {
-    if (startswith(master_name, name())) {
-        return master_name.substr(name().size());
+    if (startswith(requestor_name, name())) {
+        return requestor_name.substr(name().size());
     } else {
-        return master_name;
+        return requestor_name;
     }
 }
 
-MasterID
-System::lookupMasterId(const SimObject* obj) const
+RequestorID
+System::lookupRequestorId(const SimObject* obj) const
 {
-    MasterID id = Request::invldMasterId;
+    RequestorID id = Request::invldRequestorId;
 
     // number of occurrences of the SimObject pointer
-    // in the master list.
+    // in the requestor list.
     auto obj_number = 0;
 
-    for (int i = 0; i < masters.size(); i++) {
-        if (masters[i].obj == obj) {
+    for (int i = 0; i < requestors.size(); i++) {
+        if (requestors[i].obj == obj) {
             id = i;
             obj_number++;
         }
     }
 
     fatal_if(obj_number > 1,
-        "Cannot lookup MasterID by SimObject pointer: "
-        "More than one master is sharing the same SimObject\n");
+        "Cannot lookup RequestorID by SimObject pointer: "
+        "More than one requestor is sharing the same SimObject\n");
 
     return id;
 }
 
-MasterID
-System::lookupMasterId(const std::string& master_name) const
+RequestorID
+System::lookupRequestorId(const std::string& requestor_name) const
 {
-    std::string name = stripSystemName(master_name);
+    std::string name = stripSystemName(requestor_name);
 
-    for (int i = 0; i < masters.size(); i++) {
-        if (masters[i].masterName == name) {
+    for (int i = 0; i < requestors.size(); i++) {
+        if (requestors[i].req_name == name) {
             return i;
         }
     }
 
-    return Request::invldMasterId;
+    return Request::invldRequestorId;
 }
 
-MasterID
-System::getGlobalMasterId(const std::string& master_name)
+RequestorID
+System::getGlobalRequestorId(const std::string& requestor_name)
 {
-    return _getMasterId(nullptr, master_name);
+    return _getRequestorId(nullptr, requestor_name);
 }
 
-MasterID
-System::getMasterId(const SimObject* master, std::string submaster)
+RequestorID
+System::getRequestorId(const SimObject* requestor, std::string subrequestor)
 {
-    auto master_name = leafMasterName(master, submaster);
-    return _getMasterId(master, master_name);
+    auto requestor_name = leafRequestorName(requestor, subrequestor);
+    return _getRequestorId(requestor, requestor_name);
 }
 
-MasterID
-System::_getMasterId(const SimObject* master, const std::string& master_name)
+RequestorID
+System::_getRequestorId(const SimObject* requestor,
+                     const std::string& requestor_name)
 {
-    std::string name = stripSystemName(master_name);
+    std::string name = stripSystemName(requestor_name);
 
     // CPUs in switch_cpus ask for ids again after switching
-    for (int i = 0; i < masters.size(); i++) {
-        if (masters[i].masterName == name) {
+    for (int i = 0; i < requestors.size(); i++) {
+        if (requestors[i].req_name == name) {
             return i;
         }
     }
@@ -620,39 +621,40 @@
     // they will be too small
 
     if (Stats::enabled()) {
-        fatal("Can't request a masterId after regStats(). "
+        fatal("Can't request a requestorId after regStats(). "
                 "You must do so in init().\n");
     }
 
-    // Generate a new MasterID incrementally
-    MasterID master_id = masters.size();
+    // Generate a new RequestorID incrementally
+    RequestorID requestor_id = requestors.size();
 
-    // Append the new Master metadata to the group of system Masters.
-    masters.emplace_back(master, name, master_id);
+    // Append the new Requestor metadata to the group of system Requestors.
+    requestors.emplace_back(requestor, name, requestor_id);
 
-    return masters.back().masterId;
+    return requestors.back().id;
 }
 
 std::string
-System::leafMasterName(const SimObject* master, const std::string& submaster)
+System::leafRequestorName(const SimObject* requestor,
+                       const std::string& subrequestor)
 {
-    if (submaster.empty()) {
-        return master->name();
+    if (subrequestor.empty()) {
+        return requestor->name();
     } else {
-        // Get the full master name by appending the submaster name to
-        // the root SimObject master name
-        return master->name() + "." + submaster;
+        // Get the full requestor name by appending the subrequestor name to
+        // the root SimObject requestor name
+        return requestor->name() + "." + subrequestor;
     }
 }
 
 std::string
-System::getMasterName(MasterID master_id)
+System::getRequestorName(RequestorID requestor_id)
 {
-    if (master_id >= masters.size())
-        fatal("Invalid master_id passed to getMasterName()\n");
+    if (requestor_id >= requestors.size())
+        fatal("Invalid requestor_id passed to getRequestorName()\n");
 
-    const auto& master_info = masters[master_id];
-    return master_info.masterName;
+    const auto& requestor_info = requestors[requestor_id];
+    return requestor_info.req_name;
 }
 
 System *
diff --git a/src/sim/system.hh b/src/sim/system.hh
index 8b31b2f..7d77c48 100644
--- a/src/sim/system.hh
+++ b/src/sim/system.hh
@@ -55,7 +55,7 @@
 #include "cpu/base.hh"
 #include "cpu/pc_event.hh"
 #include "enums/MemoryMode.hh"
-#include "mem/mem_master.hh"
+#include "mem/mem_requestor.hh"
 #include "mem/physical.hh"
 #include "mem/port.hh"
 #include "mem/port_proxy.hh"
@@ -76,7 +76,7 @@
 
     /**
      * Private class for the system port which is only used as a
-     * master for debug access and for non-structural entities that do
+     * requestor for debug access and for non-structural entities that do
      * not have a port of their own.
      */
     class SystemPort : public RequestPort
@@ -99,7 +99,7 @@
     SystemPort _systemPort;
 
     // Map of memory address ranges for devices with their own backing stores
-    std::unordered_map<MasterID, AbstractMemory *> deviceMemMap;
+    std::unordered_map<RequestorID, AbstractMemory *> deviceMemMap;
 
   public:
 
@@ -358,22 +358,23 @@
 
     /**
      * Add a physical memory range for a device. The ranges added here will
-     * be considered a non-PIO memory address if the masterId of the packet
+     * be considered a non-PIO memory address if the requestorId of the packet
      * and range match something in the device memory map.
      */
-    void addDeviceMemory(MasterID masterID, AbstractMemory *deviceMemory);
+    void addDeviceMemory(RequestorID requestorId,
+                      AbstractMemory *deviceMemory);
 
     /**
      * Similar to isMemAddr but for devices. Checks if a physical address
      * of the packet match an address range of a device corresponding to the
-     * MasterId of the request.
+     * RequestorId of the request.
      */
     bool isDeviceMemAddr(PacketPtr pkt) const;
 
     /**
      * Return a pointer to the device memory.
      */
-    AbstractMemory *getDeviceMemory(MasterID masterID) const;
+    AbstractMemory *getDeviceMemory(RequestorID _id) const;
 
     /**
      * Get the architecture.
@@ -419,98 +420,99 @@
     uint32_t numWorkIds;
 
     /** This array is a per-system list of all devices capable of issuing a
-     * memory system request and an associated string for each master id.
-     * It's used to uniquely id any master in the system by name for things
+     * memory system request and an associated string for each requestor id.
+     * It's used to uniquely id any requestor in the system by name for things
      * like cache statistics.
      */
-    std::vector<MasterInfo> masters;
+    std::vector<RequestorInfo> requestors;
 
     ThermalModel * thermalModel;
 
   protected:
     /**
-     * Strips off the system name from a master name
+     * Strips off the system name from a requestor name
      */
-    std::string stripSystemName(const std::string& master_name) const;
+    std::string stripSystemName(const std::string& requestor_name) const;
 
   public:
 
     /**
      * Request an id used to create a request object in the system. All objects
      * that intend to issues requests into the memory system must request an id
-     * in the init() phase of startup. All master ids must be fixed by the
+     * in the init() phase of startup. All requestor ids must be fixed by the
      * regStats() phase that immediately precedes it. This allows objects in
-     * the memory system to understand how many masters may exist and
-     * appropriately name the bins of their per-master stats before the stats
-     * are finalized.
+     * the memory system to understand how many requestors may exist and
+     * appropriately name the bins of their per-requestor stats before the
+     * stats are finalized.
      *
-     * Registers a MasterID:
+     * Registers a RequestorID:
      * This method takes two parameters, one of which is optional.
-     * The first one is the master object, and it is compulsory; in case
-     * a object has multiple (sub)masters, a second parameter must be
-     * provided and it contains the name of the submaster. The method will
-     * create a master's name by concatenating the SimObject name with the
-     * eventual submaster string, separated by a dot.
+     * The first one is the requestor object, and it is compulsory; in case
+     * a object has multiple (sub)requestors, a second parameter must be
+     * provided and it contains the name of the subrequestor. The method will
+     * create a requestor's name by concatenating the SimObject name with the
+     * eventual subrequestor string, separated by a dot.
      *
      * As an example:
-     * For a cpu having two masters: a data master and an instruction master,
+     * For a cpu having two requestors: a data requestor and an
+     * instruction requestor,
      * the method must be called twice:
      *
-     * instMasterId = getMasterId(cpu, "inst");
-     * dataMasterId = getMasterId(cpu, "data");
+     * instRequestorId = getRequestorId(cpu, "inst");
+     * dataRequestorId = getRequestorId(cpu, "data");
      *
-     * and the masters' names will be:
+     * and the requestors' names will be:
      * - "cpu.inst"
      * - "cpu.data"
      *
-     * @param master SimObject related to the master
-     * @param submaster String containing the submaster's name
-     * @return the master's ID.
+     * @param requestor SimObject related to the requestor
+     * @param subrequestor String containing the subrequestor's name
+     * @return the requestor's ID.
      */
-    MasterID getMasterId(const SimObject* master,
-                         std::string submaster = std::string());
+    RequestorID getRequestorId(const SimObject* requestor,
+                         std::string subrequestor = std::string());
 
     /**
-     * Registers a GLOBAL MasterID, which is a MasterID not related
+     * Registers a GLOBAL RequestorID, which is a RequestorID not related
      * to any particular SimObject; since no SimObject is passed,
-     * the master gets registered by providing the full master name.
+     * the requestor gets registered by providing the full requestor name.
      *
-     * @param masterName full name of the master
-     * @return the master's ID.
+     * @param requestorName full name of the requestor
+     * @return the requestor's ID.
      */
-    MasterID getGlobalMasterId(const std::string& master_name);
+    RequestorID getGlobalRequestorId(const std::string& requestor_name);
 
     /**
      * Get the name of an object for a given request id.
      */
-    std::string getMasterName(MasterID master_id);
+    std::string getRequestorName(RequestorID requestor_id);
 
     /**
-     * Looks up the MasterID for a given SimObject
-     * returns an invalid MasterID (invldMasterId) if not found.
+     * Looks up the RequestorID for a given SimObject
+     * returns an invalid RequestorID (invldRequestorId) if not found.
      */
-    MasterID lookupMasterId(const SimObject* obj) const;
+    RequestorID lookupRequestorId(const SimObject* obj) const;
 
     /**
-     * Looks up the MasterID for a given object name string
-     * returns an invalid MasterID (invldMasterId) if not found.
+     * Looks up the RequestorID for a given object name string
+     * returns an invalid RequestorID (invldRequestorId) if not found.
      */
-    MasterID lookupMasterId(const std::string& name) const;
+    RequestorID lookupRequestorId(const std::string& name) const;
 
-    /** Get the number of masters registered in the system */
-    MasterID maxMasters() { return masters.size(); }
+    /** Get the number of requestors registered in the system */
+    RequestorID maxRequestors() { return requestors.size(); }
 
   protected:
-    /** helper function for getMasterId */
-    MasterID _getMasterId(const SimObject* master,
-                          const std::string& master_name);
+    /** helper function for getRequestorId */
+    RequestorID _getRequestorId(const SimObject* requestor,
+                          const std::string& requestor_name);
 
     /**
-     * Helper function for constructing the full (sub)master name
-     * by providing the root master and the relative submaster name.
+     * Helper function for constructing the full (sub)requestor name
+     * by providing the root requestor and the relative subrequestor name.
      */
-    std::string leafMasterName(const SimObject* master,
-                               const std::string& submaster);
+    std::string leafRequestorName(const SimObject* requestor,
+                               const std::string& subrequestor);
 
   public:
 
diff --git a/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h b/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h
index 3e03db3..f43218f 100644
--- a/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h
+++ b/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h
@@ -34,7 +34,7 @@
 This class is a simple crossbar switch through which an arbitrary number of initiators
 may communicate in parallel as long as they do not talk to the same target.
 
-If two masters address the same target at the same point of time,
+If two requestors address the same target at the same point of time,
 the choice who will be allowed to communicate
 is done non-deterministically (based on the SystemC process exectution order).
 
diff --git a/src/systemc/tlm_bridge/TlmBridge.py b/src/systemc/tlm_bridge/TlmBridge.py
index 1851464..0a2aaa7 100644
--- a/src/systemc/tlm_bridge/TlmBridge.py
+++ b/src/systemc/tlm_bridge/TlmBridge.py
@@ -37,7 +37,7 @@
 
     system = Param.System(Parent.any, "system")
 
-    gem5 = ResponsePort('gem5 slave port')
+    gem5 = ResponsePort('gem5 response port')
     addr_ranges = VectorParam.AddrRange([],
             'Addresses served by this port\'s TLM side')
 
@@ -49,7 +49,7 @@
 
     system = Param.System(Parent.any, "system")
 
-    gem5 = RequestPort('gem5 master port')
+    gem5 = RequestPort('gem5 request port')
 
 
 class Gem5ToTlmBridge32(Gem5ToTlmBridgeBase):
diff --git a/src/systemc/tlm_bridge/gem5_to_tlm.cc b/src/systemc/tlm_bridge/gem5_to_tlm.cc
index f3bc1d5..ffcd531 100644
--- a/src/systemc/tlm_bridge/gem5_to_tlm.cc
+++ b/src/systemc/tlm_bridge/gem5_to_tlm.cc
@@ -128,7 +128,7 @@
         // Did another request arrive while blocked, schedule a retry.
         if (needToSendRequestRetry) {
             needToSendRequestRetry = false;
-            bsp.sendRetryReq();
+            bridgeResponsePort.sendRetryReq();
         }
     }
     if (phase == tlm::BEGIN_RESP) {
@@ -147,11 +147,11 @@
          */
         if (extension.isPipeThrough()) {
             if (packet->isResponse()) {
-                need_retry = !bsp.sendTimingResp(packet);
+                need_retry = !bridgeResponsePort.sendTimingResp(packet);
             }
         } else if (packet->needsResponse()) {
             packet->makeResponse();
-            need_retry = !bsp.sendTimingResp(packet);
+            need_retry = !bridgeResponsePort.sendTimingResp(packet);
         }
 
         if (need_retry) {
@@ -381,7 +381,7 @@
     PacketPtr packet =
         Gem5SystemC::Gem5Extension::getExtension(trans).getPacket();
 
-    bool need_retry = !bsp.sendTimingResp(packet);
+    bool need_retry = !bridgeResponsePort.sendTimingResp(packet);
 
     sc_assert(!need_retry);
 
@@ -442,7 +442,8 @@
 template <unsigned int BITWIDTH>
 Gem5ToTlmBridge<BITWIDTH>::Gem5ToTlmBridge(
         Params *params, const sc_core::sc_module_name &mn) :
-    Gem5ToTlmBridgeBase(mn), bsp(std::string(name()) + ".gem5", *this),
+    Gem5ToTlmBridgeBase(mn),
+    bridgeResponsePort(std::string(name()) + ".gem5", *this),
     socket("tlm_socket"),
     wrapper(socket, std::string(name()) + ".tlm", InvalidPortID),
     system(params->system), blockingRequest(nullptr),
@@ -456,7 +457,7 @@
 Gem5ToTlmBridge<BITWIDTH>::gem5_getPort(const std::string &if_name, int idx)
 {
     if (if_name == "gem5")
-        return bsp;
+        return bridgeResponsePort;
     else if (if_name == "tlm")
         return wrapper;
 
@@ -467,7 +468,7 @@
 void
 Gem5ToTlmBridge<BITWIDTH>::before_end_of_elaboration()
 {
-    bsp.sendRangeChange();
+    bridgeResponsePort.sendRangeChange();
 
     socket.register_nb_transport_bw(this, &Gem5ToTlmBridge::nb_transport_bw);
     socket.register_invalidate_direct_mem_ptr(
diff --git a/src/systemc/tlm_bridge/gem5_to_tlm.hh b/src/systemc/tlm_bridge/gem5_to_tlm.hh
index 7e69e3c..1fe0840 100644
--- a/src/systemc/tlm_bridge/gem5_to_tlm.hh
+++ b/src/systemc/tlm_bridge/gem5_to_tlm.hh
@@ -85,7 +85,7 @@
 class Gem5ToTlmBridge : public Gem5ToTlmBridgeBase
 {
   private:
-    class BridgeSlavePort : public ResponsePort
+    class BridgeResponsePort : public ResponsePort
     {
       protected:
         Gem5ToTlmBridge<BITWIDTH> &bridge;
@@ -128,13 +128,13 @@
         void recvRespRetry() override { bridge.recvRespRetry(); }
 
       public:
-        BridgeSlavePort(const std::string &name_,
+        BridgeResponsePort(const std::string &name_,
                         Gem5ToTlmBridge<BITWIDTH> &bridge_) :
             ResponsePort(name_, nullptr), bridge(bridge_)
         {}
     };
 
-    BridgeSlavePort bsp;
+    BridgeResponsePort bridgeResponsePort;
     tlm_utils::simple_initiator_socket<
         Gem5ToTlmBridge<BITWIDTH>, BITWIDTH> socket;
     sc_gem5::TlmInitiatorWrapper<BITWIDTH> wrapper;
diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.cc b/src/systemc/tlm_bridge/tlm_to_gem5.cc
index 2bfbcc4..3891f58 100644
--- a/src/systemc/tlm_bridge/tlm_to_gem5.cc
+++ b/src/systemc/tlm_bridge/tlm_to_gem5.cc
@@ -67,7 +67,7 @@
 {
 
 PacketPtr
-payload2packet(MasterID masterId, tlm::tlm_generic_payload &trans)
+payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans)
 {
     MemCmd cmd;
 
@@ -87,7 +87,7 @@
 
     Request::Flags flags;
     auto req = std::make_shared<Request>(
-        trans.get_address(), trans.get_data_length(), flags, masterId);
+        trans.get_address(), trans.get_data_length(), flags, _id);
 
     /*
      * Allocate a new Packet. The packet will be deleted when it returns from
@@ -156,7 +156,7 @@
         extension->setPipeThrough();
         pkt = extension->getPacket();
     } else {
-        pkt = payload2packet(masterId, trans);
+        pkt = payload2packet(_id, trans);
     }
 
     auto tlmSenderState = new TlmSenderState(trans);
@@ -281,7 +281,7 @@
         extension->setPipeThrough();
         pkt = extension->getPacket();
     } else {
-        pkt = payload2packet(masterId, trans);
+        pkt = payload2packet(_id, trans);
     }
 
     MemBackdoorPtr backdoor = nullptr;
@@ -318,7 +318,7 @@
         extension->setPipeThrough();
         bmp.sendFunctional(extension->getPacket());
     } else {
-        auto pkt = payload2packet(masterId, trans);
+        auto pkt = payload2packet(_id, trans);
         if (pkt) {
             bmp.sendFunctional(pkt);
             destroyPacket(pkt);
@@ -344,7 +344,7 @@
         extension->setPipeThrough();
         pkt = extension->getPacket();
     } else {
-        pkt = payload2packet(masterId, trans);
+        pkt = payload2packet(_id, trans);
         pkt->req->setFlags(Request::NO_ACCESS);
     }
 
@@ -484,7 +484,7 @@
     bmp(std::string(name()) + "master", *this), socket("tlm_socket"),
     wrapper(socket, std::string(name()) + ".tlm", InvalidPortID),
     system(params->system),
-    masterId(params->system->getGlobalMasterId(
+    _id(params->system->getGlobalRequestorId(
                 std::string("[systemc].") + name()))
 {
 }
diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.hh b/src/systemc/tlm_bridge/tlm_to_gem5.hh
index e2e7540..f1e3e08 100644
--- a/src/systemc/tlm_bridge/tlm_to_gem5.hh
+++ b/src/systemc/tlm_bridge/tlm_to_gem5.hh
@@ -89,7 +89,7 @@
         TlmSenderState(tlm::tlm_generic_payload &trans) : trans(trans) {}
     };
 
-    class BridgeMasterPort : public RequestPort
+    class BridgeRequestPort : public RequestPort
     {
       protected:
         TlmToGem5Bridge<BITWIDTH> &bridge;
@@ -103,7 +103,7 @@
         void recvRangeChange() override { bridge.recvRangeChange(); }
 
       public:
-        BridgeMasterPort(const std::string &name_,
+        BridgeRequestPort(const std::string &name_,
                          TlmToGem5Bridge<BITWIDTH> &bridge_) :
             RequestPort(name_, nullptr), bridge(bridge_)
         {}
@@ -119,7 +119,7 @@
 
     bool responseInProgress;
 
-    BridgeMasterPort bmp;
+    BridgeRequestPort bmp;
     tlm_utils::simple_target_socket<
         TlmToGem5Bridge<BITWIDTH>, BITWIDTH> socket;
     sc_gem5::TlmTargetWrapper<BITWIDTH> wrapper;
@@ -171,7 +171,7 @@
 
     void before_end_of_elaboration() override;
 
-    const MasterID masterId;
+    const RequestorID _id;
 };
 
 } // namespace sc_gem5
diff --git a/tests/gem5/x86-boot-tests/system/system.py b/tests/gem5/x86-boot-tests/system/system.py
index bffd08a..b050b6f 100755
--- a/tests/gem5/x86-boot-tests/system/system.py
+++ b/tests/gem5/x86-boot-tests/system/system.py
@@ -318,7 +318,7 @@
 class CowDisk(IdeDisk):
     def __init__(self, filename):
         super(CowDisk, self).__init__()
-        self.driveID = 'master'
+        self.driveID = 'device0'
         self.image = CowDiskImage(child=RawDiskImage(read_only=True),
                                   read_only=False)
         self.image.child.image_file = filename