| /* |
| * Copyright (c) 2010-2015 Advanced Micro Devices, Inc. |
| * All rights reserved. |
| * |
| * For use for simulation and test purposes only |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are met: |
| * |
| * 1. Redistributions of source code must retain the above copyright notice, |
| * this list of conditions and the following disclaimer. |
| * |
| * 2. Redistributions in binary form must reproduce the above copyright notice, |
| * this list of conditions and the following disclaimer in the documentation |
| * and/or other materials provided with the distribution. |
| * |
| * 3. Neither the name of the copyright holder nor the names of its |
| * contributors may be used to endorse or promote products derived from this |
| * software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
| * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
| * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
| * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
| * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
| * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
| * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
| * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
| * POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| machine(MachineType:Directory, "AMD Baseline protocol") |
| : DirectoryMemory * directory; |
| CacheMemory * L3CacheMemory; |
| Cycles response_latency := 5; |
| Cycles l3_hit_latency := 50; |
| bool noTCCdir := "False"; |
| bool CPUonly := "False"; |
| bool GPUonly := "False"; |
| int TCC_select_num_bits; |
| bool useL3OnWT := "False"; |
| Cycles to_memory_controller_latency := 1; |
| |
| // DMA |
| MessageBuffer * requestFromDMA, network="From", virtual_network="1", vnet_type="request"; |
| MessageBuffer * responseToDMA, network="To", virtual_network="3", vnet_type="response"; |
| |
| // From the Cores |
| MessageBuffer * requestFromCores, network="From", virtual_network="0", vnet_type="request"; |
| MessageBuffer * responseFromCores, network="From", virtual_network="2", vnet_type="response"; |
| MessageBuffer * unblockFromCores, network="From", virtual_network="4", vnet_type="unblock"; |
| |
| MessageBuffer * probeToCore, network="To", virtual_network="0", vnet_type="request"; |
| MessageBuffer * responseToCore, network="To", virtual_network="2", vnet_type="response"; |
| |
| MessageBuffer * triggerQueue; |
| MessageBuffer * L3triggerQueue; |
| |
| MessageBuffer * requestToMemory; |
| MessageBuffer * responseFromMemory; |
| { |
| // STATES |
| state_declaration(State, desc="Directory states", default="Directory_State_U") { |
| U, AccessPermission:Backing_Store, desc="unblocked"; |
| BL, AccessPermission:Busy, desc="got L3 WB request"; |
| // BL is Busy because it's possible for the data only to be in the network |
| // in the WB, L3 has sent it and gone on with its business in possibly I |
| // state. |
| BDR_M, AccessPermission:Backing_Store, desc="DMA read, blocked waiting for memory"; |
| BS_M, AccessPermission:Backing_Store, desc="blocked waiting for memory"; |
| BM_M, AccessPermission:Backing_Store, desc="blocked waiting for memory"; |
| B_M, AccessPermission:Backing_Store, desc="blocked waiting for memory"; |
| BP, AccessPermission:Backing_Store, desc="blocked waiting for probes, no need for memory"; |
| BDR_PM, AccessPermission:Backing_Store, desc="DMA read, blocked waiting for probes and memory"; |
| BS_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory"; |
| BM_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory"; |
| B_PM, AccessPermission:Backing_Store, desc="blocked waiting for probes and Memory"; |
| BDW_P, AccessPermission:Backing_Store, desc="DMA write, blocked waiting for probes, no need for memory"; |
| BDR_Pm, AccessPermission:Backing_Store, desc="DMA read, blocked waiting for probes, already got memory"; |
| BS_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory"; |
| BM_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory"; |
| B_Pm, AccessPermission:Backing_Store, desc="blocked waiting for probes, already got memory"; |
| B, AccessPermission:Backing_Store, desc="sent response, Blocked til ack"; |
| } |
| |
| // Events |
| enumeration(Event, desc="Directory events") { |
| // CPU requests |
| RdBlkS, desc="..."; |
| RdBlkM, desc="..."; |
| RdBlk, desc="..."; |
| CtoD, desc="..."; |
| WriteThrough, desc="WriteThrough Message"; |
| Atomic, desc="Atomic Message"; |
| |
| // writebacks |
| VicDirty, desc="..."; |
| VicClean, desc="..."; |
| CPUData, desc="WB data from CPU"; |
| StaleWB, desc="Notification that WB has been superceded by a probe"; |
| |
| // probe responses |
| CPUPrbResp, desc="Probe Response Msg"; |
| |
| ProbeAcksComplete, desc="Probe Acks Complete"; |
| |
| L3Hit, desc="Hit in L3 return data to core"; |
| |
| // Memory Controller |
| MemData, desc="Fetched data from memory arrives"; |
| WBAck, desc="Writeback Ack from memory arrives"; |
| |
| CoreUnblock, desc="Core received data, unblock"; |
| UnblockWriteThrough, desc="Unblock because of writethrough request finishing"; |
| |
| StaleVicDirty, desc="Core invalidated before VicDirty processed"; |
| |
| // DMA |
| DmaRead, desc="DMA read"; |
| DmaWrite, desc="DMA write"; |
| } |
| |
| enumeration(RequestType, desc="To communicate stats from transitions to recordStats") { |
| L3DataArrayRead, desc="Read the data array"; |
| L3DataArrayWrite, desc="Write the data array"; |
| L3TagArrayRead, desc="Read the data array"; |
| L3TagArrayWrite, desc="Write the data array"; |
| } |
| |
| // TYPES |
| |
| // DirectoryEntry |
| structure(Entry, desc="...", interface="AbstractCacheEntry", main="false") { |
| State DirectoryState, desc="Directory state"; |
| DataBlock DataBlk, desc="data for the block"; |
| NetDest VicDirtyIgnore, desc="VicDirty coming from whom to ignore"; |
| } |
| |
| structure(CacheEntry, desc="...", interface="AbstractCacheEntry") { |
| DataBlock DataBlk, desc="data for the block"; |
| MachineID LastSender, desc="Mach which this block came from"; |
| } |
| |
| structure(TBE, desc="...") { |
| State TBEState, desc="Transient state"; |
| DataBlock DataBlk, desc="data for the block"; |
| bool Dirty, desc="Is the data dirty?"; |
| int NumPendingAcks, desc="num acks expected"; |
| MachineID OriginalRequestor, desc="Original Requestor"; |
| MachineID WTRequestor, desc="WT Requestor"; |
| bool Cached, desc="data hit in Cache"; |
| bool MemData, desc="Got MemData?",default="false"; |
| bool wtData, desc="Got write through data?",default="false"; |
| bool atomicData, desc="Got Atomic op?",default="false"; |
| Cycles InitialRequestTime, desc="..."; |
| Cycles ForwardRequestTime, desc="..."; |
| Cycles ProbeRequestStartTime, desc="..."; |
| MachineID LastSender, desc="Mach which this block came from"; |
| bool L3Hit, default="false", desc="Was this an L3 hit?"; |
| uint64_t probe_id, desc="probe id for lifetime profiling"; |
| WriteMask writeMask, desc="outstanding write through mask"; |
| int Len, desc="Length of memory request for DMA"; |
| } |
| |
| structure(TBETable, external="yes") { |
| TBE lookup(Addr); |
| void allocate(Addr); |
| void deallocate(Addr); |
| bool isPresent(Addr); |
| } |
| |
| TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs"; |
| |
| int TCC_select_low_bit, default="RubySystem::getBlockSizeBits()"; |
| |
| Tick clockEdge(); |
| Tick cyclesToTicks(Cycles c); |
| |
| void set_tbe(TBE a); |
| void unset_tbe(); |
| void wakeUpAllBuffers(); |
| void wakeUpAllBuffers(Addr a); |
| void wakeUpBuffers(Addr a); |
| Cycles curCycle(); |
| |
| Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" { |
| Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr)); |
| |
| if (is_valid(dir_entry)) { |
| return dir_entry; |
| } |
| |
| dir_entry := static_cast(Entry, "pointer", |
| directory.allocate(addr, new Entry)); |
| return dir_entry; |
| } |
| |
| DataBlock getDataBlock(Addr addr), return_by_ref="yes" { |
| TBE tbe := TBEs.lookup(addr); |
| if (is_valid(tbe) && tbe.MemData) { |
| DPRINTF(RubySlicc, "Returning DataBlk from TBE %s:%s\n", addr, tbe); |
| return tbe.DataBlk; |
| } |
| DPRINTF(RubySlicc, "Returning DataBlk from Dir %s:%s\n", addr, getDirectoryEntry(addr)); |
| return getDirectoryEntry(addr).DataBlk; |
| } |
| |
| State getState(TBE tbe, CacheEntry entry, Addr addr) { |
| return getDirectoryEntry(addr).DirectoryState; |
| } |
| |
| void setState(TBE tbe, CacheEntry entry, Addr addr, State state) { |
| getDirectoryEntry(addr).DirectoryState := state; |
| } |
| |
| void functionalRead(Addr addr, Packet *pkt) { |
| TBE tbe := TBEs.lookup(addr); |
| if(is_valid(tbe)) { |
| testAndRead(addr, tbe.DataBlk, pkt); |
| } else { |
| functionalMemoryRead(pkt); |
| } |
| } |
| |
| int functionalWrite(Addr addr, Packet *pkt) { |
| int num_functional_writes := 0; |
| |
| TBE tbe := TBEs.lookup(addr); |
| if(is_valid(tbe)) { |
| num_functional_writes := num_functional_writes + |
| testAndWrite(addr, tbe.DataBlk, pkt); |
| } |
| |
| num_functional_writes := num_functional_writes |
| + functionalMemoryWrite(pkt); |
| return num_functional_writes; |
| } |
| |
| AccessPermission getAccessPermission(Addr addr) { |
| // For this Directory, all permissions are just tracked in Directory, since |
| // it's not possible to have something in TBE but not Dir, just keep track |
| // of state all in one place. |
| if (directory.isPresent(addr)) { |
| return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState); |
| } |
| |
| return AccessPermission:NotPresent; |
| } |
| |
| void setAccessPermission(CacheEntry entry, Addr addr, State state) { |
| getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state)); |
| } |
| |
| void recordRequestType(RequestType request_type, Addr addr) { |
| if (request_type == RequestType:L3DataArrayRead) { |
| L3CacheMemory.recordRequestType(CacheRequestType:DataArrayRead, addr); |
| } else if (request_type == RequestType:L3DataArrayWrite) { |
| L3CacheMemory.recordRequestType(CacheRequestType:DataArrayWrite, addr); |
| } else if (request_type == RequestType:L3TagArrayRead) { |
| L3CacheMemory.recordRequestType(CacheRequestType:TagArrayRead, addr); |
| } else if (request_type == RequestType:L3TagArrayWrite) { |
| L3CacheMemory.recordRequestType(CacheRequestType:TagArrayWrite, addr); |
| } |
| } |
| |
| bool checkResourceAvailable(RequestType request_type, Addr addr) { |
| if (request_type == RequestType:L3DataArrayRead) { |
| return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr); |
| } else if (request_type == RequestType:L3DataArrayWrite) { |
| return L3CacheMemory.checkResourceAvailable(CacheResourceType:DataArray, addr); |
| } else if (request_type == RequestType:L3TagArrayRead) { |
| return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr); |
| } else if (request_type == RequestType:L3TagArrayWrite) { |
| return L3CacheMemory.checkResourceAvailable(CacheResourceType:TagArray, addr); |
| } else { |
| error("Invalid RequestType type in checkResourceAvailable"); |
| return true; |
| } |
| } |
| |
| // ** OUT_PORTS ** |
| out_port(dmaResponseQueue_out, DMAResponseMsg, responseToDMA); |
| |
| out_port(probeNetwork_out, NBProbeRequestMsg, probeToCore); |
| out_port(responseNetwork_out, ResponseMsg, responseToCore); |
| |
| out_port(triggerQueue_out, TriggerMsg, triggerQueue); |
| out_port(L3TriggerQueue_out, TriggerMsg, L3triggerQueue); |
| |
| out_port(memQueue_out, MemoryMsg, requestToMemory); |
| |
| // ** IN_PORTS ** |
| |
| // DMA Ports |
| in_port(dmaRequestQueue_in, DMARequestMsg, requestFromDMA, rank=6) { |
| if (dmaRequestQueue_in.isReady(clockEdge())) { |
| peek(dmaRequestQueue_in, DMARequestMsg) { |
| TBE tbe := TBEs.lookup(in_msg.LineAddress); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.LineAddress)); |
| if (in_msg.Type == DMARequestType:READ) { |
| trigger(Event:DmaRead, in_msg.LineAddress, entry, tbe); |
| } else if (in_msg.Type == DMARequestType:WRITE) { |
| trigger(Event:DmaWrite, in_msg.LineAddress, entry, tbe); |
| } else { |
| error("Unknown DMA msg"); |
| } |
| } |
| } |
| } |
| |
| // Trigger Queue |
| in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) { |
| if (triggerQueue_in.isReady(clockEdge())) { |
| peek(triggerQueue_in, TriggerMsg) { |
| TBE tbe := TBEs.lookup(in_msg.addr); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr)); |
| if (in_msg.Type == TriggerType:AcksComplete) { |
| trigger(Event:ProbeAcksComplete, in_msg.addr, entry, tbe); |
| }else if (in_msg.Type == TriggerType:UnblockWriteThrough) { |
| trigger(Event:UnblockWriteThrough, in_msg.addr, entry, tbe); |
| } else { |
| error("Unknown trigger msg"); |
| } |
| } |
| } |
| } |
| |
| in_port(L3TriggerQueue_in, TriggerMsg, L3triggerQueue, rank=4) { |
| if (L3TriggerQueue_in.isReady(clockEdge())) { |
| peek(L3TriggerQueue_in, TriggerMsg) { |
| TBE tbe := TBEs.lookup(in_msg.addr); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr)); |
| if (in_msg.Type == TriggerType:L3Hit) { |
| trigger(Event:L3Hit, in_msg.addr, entry, tbe); |
| } else { |
| error("Unknown trigger msg"); |
| } |
| } |
| } |
| } |
| |
| // Unblock Network |
| in_port(unblockNetwork_in, UnblockMsg, unblockFromCores, rank=3) { |
| if (unblockNetwork_in.isReady(clockEdge())) { |
| peek(unblockNetwork_in, UnblockMsg) { |
| TBE tbe := TBEs.lookup(in_msg.addr); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr)); |
| trigger(Event:CoreUnblock, in_msg.addr, entry, tbe); |
| } |
| } |
| } |
| |
| // Core response network |
| in_port(responseNetwork_in, ResponseMsg, responseFromCores, rank=2) { |
| if (responseNetwork_in.isReady(clockEdge())) { |
| peek(responseNetwork_in, ResponseMsg) { |
| TBE tbe := TBEs.lookup(in_msg.addr); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr)); |
| if (in_msg.Type == CoherenceResponseType:CPUPrbResp) { |
| trigger(Event:CPUPrbResp, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceResponseType:CPUData) { |
| trigger(Event:CPUData, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceResponseType:StaleNotif) { |
| trigger(Event:StaleWB, in_msg.addr, entry, tbe); |
| } else { |
| error("Unexpected response type"); |
| } |
| } |
| } |
| } |
| |
| // off-chip memory request/response is done |
| in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=1) { |
| if (memQueue_in.isReady(clockEdge())) { |
| peek(memQueue_in, MemoryMsg) { |
| TBE tbe := TBEs.lookup(in_msg.addr); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr)); |
| if (in_msg.Type == MemoryRequestType:MEMORY_READ) { |
| trigger(Event:MemData, in_msg.addr, entry, tbe); |
| DPRINTF(RubySlicc, "%s\n", in_msg); |
| } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { |
| trigger(Event:WBAck, in_msg.addr, entry, tbe); // ignore WBAcks, don't care about them. |
| } else { |
| DPRINTF(RubySlicc, "%s\n", in_msg.Type); |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| in_port(requestNetwork_in, CPURequestMsg, requestFromCores, rank=0) { |
| if (requestNetwork_in.isReady(clockEdge())) { |
| peek(requestNetwork_in, CPURequestMsg) { |
| TBE tbe := TBEs.lookup(in_msg.addr); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(in_msg.addr)); |
| if (in_msg.Type == CoherenceRequestType:RdBlk) { |
| trigger(Event:RdBlk, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceRequestType:RdBlkS) { |
| trigger(Event:RdBlkS, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceRequestType:RdBlkM) { |
| trigger(Event:RdBlkM, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceRequestType:WriteThrough) { |
| trigger(Event:WriteThrough, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceRequestType:Atomic) { |
| trigger(Event:Atomic, in_msg.addr, entry, tbe); |
| } else if (in_msg.Type == CoherenceRequestType:VicDirty) { |
| if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) { |
| DPRINTF(RubySlicc, "Dropping VicDirty for address %s\n", in_msg.addr); |
| trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe); |
| } else { |
| DPRINTF(RubySlicc, "Got VicDirty from %s on %s\n", in_msg.Requestor, in_msg.addr); |
| trigger(Event:VicDirty, in_msg.addr, entry, tbe); |
| } |
| } else if (in_msg.Type == CoherenceRequestType:VicClean) { |
| if (getDirectoryEntry(in_msg.addr).VicDirtyIgnore.isElement(in_msg.Requestor)) { |
| DPRINTF(RubySlicc, "Dropping VicClean for address %s\n", in_msg.addr); |
| trigger(Event:StaleVicDirty, in_msg.addr, entry, tbe); |
| } else { |
| DPRINTF(RubySlicc, "Got VicClean from %s on %s\n", in_msg.Requestor, in_msg.addr); |
| trigger(Event:VicClean, in_msg.addr, entry, tbe); |
| } |
| } else { |
| error("Bad request message type"); |
| } |
| } |
| } |
| } |
| |
| // Actions |
| action(dd_sendResponseDmaData, "dd", desc="send DMA data response") { |
| enqueue(dmaResponseQueue_out, DMAResponseMsg, response_latency) { |
| out_msg.LineAddress := address; |
| out_msg.Type := DMAResponseType:DATA; |
| out_msg.Destination.add(tbe.OriginalRequestor); |
| out_msg.DataBlk := tbe.DataBlk; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| |
| action(da_sendResponseDmaAck, "da", desc="send DMA data response") { |
| enqueue(dmaResponseQueue_out, DMAResponseMsg, response_latency) { |
| out_msg.LineAddress := address; |
| out_msg.Type := DMAResponseType:ACK; |
| out_msg.Destination.add(tbe.OriginalRequestor); |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| |
| action(s_sendResponseS, "s", desc="send Shared response") { |
| enqueue(responseNetwork_out, ResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:NBSysResp; |
| if (tbe.L3Hit) { |
| out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0)); |
| } else { |
| out_msg.Sender := machineID; |
| } |
| out_msg.Destination.add(tbe.OriginalRequestor); |
| out_msg.DataBlk := tbe.DataBlk; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| out_msg.Dirty := false; |
| out_msg.State := CoherenceState:Shared; |
| out_msg.InitialRequestTime := tbe.InitialRequestTime; |
| out_msg.ForwardRequestTime := tbe.ForwardRequestTime; |
| out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime; |
| out_msg.OriginalResponder := tbe.LastSender; |
| out_msg.L3Hit := tbe.L3Hit; |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| |
| action(es_sendResponseES, "es", desc="send Exclusive or Shared response") { |
| enqueue(responseNetwork_out, ResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:NBSysResp; |
| if (tbe.L3Hit) { |
| out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0)); |
| } else { |
| out_msg.Sender := machineID; |
| } |
| out_msg.Destination.add(tbe.OriginalRequestor); |
| out_msg.DataBlk := tbe.DataBlk; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| out_msg.Dirty := tbe.Dirty; |
| if (tbe.Cached) { |
| out_msg.State := CoherenceState:Shared; |
| } else { |
| out_msg.State := CoherenceState:Exclusive; |
| } |
| out_msg.InitialRequestTime := tbe.InitialRequestTime; |
| out_msg.ForwardRequestTime := tbe.ForwardRequestTime; |
| out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime; |
| out_msg.OriginalResponder := tbe.LastSender; |
| out_msg.L3Hit := tbe.L3Hit; |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| |
| action(m_sendResponseM, "m", desc="send Modified response") { |
| if (tbe.wtData) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:UnblockWriteThrough; |
| } |
| }else{ |
| enqueue(responseNetwork_out, ResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:NBSysResp; |
| if (tbe.L3Hit) { |
| out_msg.Sender := createMachineID(MachineType:L3Cache, intToID(0)); |
| } else { |
| out_msg.Sender := machineID; |
| } |
| out_msg.Destination.add(tbe.OriginalRequestor); |
| out_msg.DataBlk := tbe.DataBlk; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| out_msg.Dirty := tbe.Dirty; |
| out_msg.State := CoherenceState:Modified; |
| out_msg.CtoD := false; |
| out_msg.InitialRequestTime := tbe.InitialRequestTime; |
| out_msg.ForwardRequestTime := tbe.ForwardRequestTime; |
| out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime; |
| out_msg.OriginalResponder := tbe.LastSender; |
| if(tbe.atomicData){ |
| out_msg.WTRequestor := tbe.WTRequestor; |
| } |
| out_msg.L3Hit := tbe.L3Hit; |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| if (tbe.atomicData) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:UnblockWriteThrough; |
| } |
| } |
| } |
| } |
| |
| action(c_sendResponseCtoD, "c", desc="send CtoD Ack") { |
| enqueue(responseNetwork_out, ResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:NBSysResp; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(tbe.OriginalRequestor); |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| out_msg.Dirty := false; |
| out_msg.State := CoherenceState:Modified; |
| out_msg.CtoD := true; |
| out_msg.InitialRequestTime := tbe.InitialRequestTime; |
| out_msg.ForwardRequestTime := curCycle(); |
| out_msg.ProbeRequestStartTime := tbe.ProbeRequestStartTime; |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| |
| action(w_sendResponseWBAck, "w", desc="send WB Ack") { |
| peek(requestNetwork_in, CPURequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:NBSysWBAck; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.WTRequestor := in_msg.WTRequestor; |
| out_msg.Sender := machineID; |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| out_msg.InitialRequestTime := in_msg.InitialRequestTime; |
| out_msg.ForwardRequestTime := curCycle(); |
| out_msg.ProbeRequestStartTime := curCycle(); |
| out_msg.instSeqNum := in_msg.instSeqNum; |
| } |
| } |
| } |
| |
| action(l_queueMemWBReq, "lq", desc="Write WB data to memory") { |
| peek(responseNetwork_in, ResponseMsg) { |
| enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) { |
| out_msg.addr := address; |
| out_msg.Type := MemoryRequestType:MEMORY_WB; |
| out_msg.Sender := machineID; |
| out_msg.MessageSize := MessageSizeType:Writeback_Data; |
| out_msg.DataBlk := in_msg.DataBlk; |
| } |
| } |
| } |
| |
| action(qdr_queueDmaRdReq, "qdr", desc="Read data from memory for DMA") { |
| peek(dmaRequestQueue_in, DMARequestMsg) { |
| if (L3CacheMemory.isTagPresent(address)) { |
| enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:L3Hit; |
| } |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address)); |
| tbe.DataBlk := entry.DataBlk; |
| tbe.L3Hit := true; |
| tbe.MemData := true; |
| L3CacheMemory.deallocate(address); |
| } else { |
| enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) { |
| out_msg.addr := address; |
| out_msg.Type := MemoryRequestType:MEMORY_READ; |
| out_msg.Sender := machineID; |
| out_msg.MessageSize := MessageSizeType:Request_Control; |
| } |
| } |
| } |
| } |
| |
| action(l_queueMemRdReq, "lr", desc="Read data from memory") { |
| peek(requestNetwork_in, CPURequestMsg) { |
| if (L3CacheMemory.isTagPresent(address)) { |
| enqueue(L3TriggerQueue_out, TriggerMsg, l3_hit_latency) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:L3Hit; |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address)); |
| if (tbe.Dirty == false) { |
| tbe.DataBlk := entry.DataBlk; |
| } |
| tbe.LastSender := entry.LastSender; |
| tbe.L3Hit := true; |
| tbe.MemData := true; |
| L3CacheMemory.deallocate(address); |
| } else { |
| enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) { |
| out_msg.addr := address; |
| out_msg.Type := MemoryRequestType:MEMORY_READ; |
| out_msg.Sender := machineID; |
| out_msg.MessageSize := MessageSizeType:Request_Control; |
| } |
| } |
| } |
| } |
| |
| //This action profiles a hit or miss for a given request or write back. |
| //It should be called after l_queueMemRdReq, qdr_queueDmaRdReq, and al_allocateL3Block |
| //actions (where the tag has been checked and the L3Hit Flag is set) and before the TBE is |
| //deallocated in dt_deallocateTBE (only for WB) as it checks the L3Hit flag of the TBE entry. |
| action(pr_profileL3HitMiss, "pr_l3hm", desc="L3 Hit or Miss Profile") { |
| if (tbe.L3Hit) { |
| L3CacheMemory.profileDemandHit(); |
| } else { |
| L3CacheMemory.profileDemandMiss(); |
| } |
| } |
| |
| action(icd_probeInvCoreDataForDMA, "icd", desc="Probe inv cores, return data for DMA") { |
| peek(dmaRequestQueue_in, DMARequestMsg) { |
| enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := ProbeRequestType:PrbInv; |
| out_msg.ReturnData := true; |
| out_msg.MessageSize := MessageSizeType:Control; |
| if (!GPUonly) { |
| out_msg.Destination.broadcast(MachineType:CorePair); |
| } |
| |
| // Add relevant TCC node to list. This replaces all TCPs and SQCs |
| if (CPUonly) { |
| // CPU only has neither TCC nor TCC directory to add. |
| } else if (noTCCdir) { |
| out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } else { |
| out_msg.Destination.add(mapAddressToRange(address, |
| MachineType:TCCdir, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } |
| out_msg.Destination.remove(in_msg.Requestor); |
| tbe.NumPendingAcks := out_msg.Destination.count(); |
| if (tbe.NumPendingAcks == 0) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:AcksComplete; |
| } |
| } |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| APPEND_TRANSITION_COMMENT(" dc: Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| tbe.ProbeRequestStartTime := curCycle(); |
| assert(out_msg.Destination.count() > 0); |
| } |
| } |
| } |
| |
| action(dc_probeInvCoreData, "dc", desc="probe inv cores, return data") { |
| peek(requestNetwork_in, CPURequestMsg) { |
| enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := ProbeRequestType:PrbInv; |
| out_msg.ReturnData := true; |
| out_msg.MessageSize := MessageSizeType:Control; |
| if (!GPUonly) { |
| // won't be realistic for multisocket |
| out_msg.Destination.broadcast(MachineType:CorePair); |
| } |
| |
| // add relevant TCC node to list. This replaces all TCPs and SQCs |
| if (((in_msg.Type == CoherenceRequestType:WriteThrough || |
| in_msg.Type == CoherenceRequestType:Atomic) && |
| in_msg.NoWriteConflict) || |
| CPUonly) { |
| } else if (noTCCdir) { |
| out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } else { |
| out_msg.Destination.add(mapAddressToRange(address, |
| MachineType:TCCdir, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } |
| out_msg.Destination.remove(in_msg.Requestor); |
| tbe.NumPendingAcks := out_msg.Destination.count(); |
| if (tbe.NumPendingAcks == 0) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:AcksComplete; |
| } |
| } |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| APPEND_TRANSITION_COMMENT(" dc: Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| tbe.ProbeRequestStartTime := curCycle(); |
| assert(out_msg.Destination.count() > 0); |
| } |
| } |
| } |
| |
| action(scd_probeShrCoreDataForDma, "dsc", desc="probe shared cores, return data for DMA") { |
| peek(dmaRequestQueue_in, DMARequestMsg) { |
| enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := ProbeRequestType:PrbDowngrade; |
| out_msg.ReturnData := true; |
| out_msg.MessageSize := MessageSizeType:Control; |
| if (!GPUonly) { |
| out_msg.Destination.broadcast(MachineType:CorePair); |
| } |
| // add relevant TCC node to the list. This replaces all TCPs and SQCs |
| if (noTCCdir || CPUonly) { |
| //Don't need to notify TCC about reads |
| } else { |
| out_msg.Destination.add(mapAddressToRange(address, |
| MachineType:TCCdir, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } |
| if (noTCCdir && !CPUonly) { |
| out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } |
| out_msg.Destination.remove(in_msg.Requestor); |
| tbe.NumPendingAcks := out_msg.Destination.count(); |
| if (tbe.NumPendingAcks == 0) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:AcksComplete; |
| } |
| } |
| DPRINTF(RubySlicc, "%s\n", (out_msg)); |
| APPEND_TRANSITION_COMMENT(" sc: Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| tbe.ProbeRequestStartTime := curCycle(); |
| assert(out_msg.Destination.count() > 0); |
| } |
| } |
| } |
| |
| action(sc_probeShrCoreData, "sc", desc="probe shared cores, return data") { |
| peek(requestNetwork_in, CPURequestMsg) { // not the right network? |
| enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := ProbeRequestType:PrbDowngrade; |
| out_msg.ReturnData := true; |
| out_msg.MessageSize := MessageSizeType:Control; |
| if (!GPUonly) { |
| // won't be realistic for multisocket |
| out_msg.Destination.broadcast(MachineType:CorePair); |
| } |
| // add relevant TCC node to the list. This replaces all TCPs and SQCs |
| if (noTCCdir || CPUonly) { |
| //Don't need to notify TCC about reads |
| } else { |
| out_msg.Destination.add(mapAddressToRange(address, |
| MachineType:TCCdir, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| tbe.NumPendingAcks := tbe.NumPendingAcks + 1; |
| } |
| if (noTCCdir && !CPUonly) { |
| out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } |
| out_msg.Destination.remove(in_msg.Requestor); |
| tbe.NumPendingAcks := out_msg.Destination.count(); |
| if (tbe.NumPendingAcks == 0) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:AcksComplete; |
| } |
| } |
| DPRINTF(RubySlicc, "%s\n", (out_msg)); |
| APPEND_TRANSITION_COMMENT(" sc: Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| tbe.ProbeRequestStartTime := curCycle(); |
| assert(out_msg.Destination.count() > 0); |
| } |
| } |
| } |
| |
| action(ic_probeInvCore, "ic", desc="probe invalidate core, no return data needed") { |
| peek(requestNetwork_in, CPURequestMsg) { // not the right network? |
| enqueue(probeNetwork_out, NBProbeRequestMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := ProbeRequestType:PrbInv; |
| out_msg.ReturnData := false; |
| out_msg.MessageSize := MessageSizeType:Control; |
| if (!GPUonly) { |
| // won't be realistic for multisocket |
| out_msg.Destination.broadcast(MachineType:CorePair); |
| } |
| |
| // add relevant TCC node to the list. This replaces all TCPs and SQCs |
| if (noTCCdir && !CPUonly) { |
| out_msg.Destination.add(mapAddressToRange(address,MachineType:TCC, |
| TCC_select_low_bit, TCC_select_num_bits)); |
| } else { |
| if (!noTCCdir) { |
| out_msg.Destination.add(mapAddressToRange(address, |
| MachineType:TCCdir, |
| TCC_select_low_bit, |
| TCC_select_num_bits)); |
| } |
| } |
| out_msg.Destination.remove(in_msg.Requestor); |
| tbe.NumPendingAcks := out_msg.Destination.count(); |
| if (tbe.NumPendingAcks == 0) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:AcksComplete; |
| } |
| } |
| APPEND_TRANSITION_COMMENT(" ic: Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| tbe.ProbeRequestStartTime := curCycle(); |
| assert(out_msg.Destination.count() > 0); |
| } |
| } |
| } |
| |
| action(d_writeDataToMemory, "d", desc="Write data to memory") { |
| peek(responseNetwork_in, ResponseMsg) { |
| getDirectoryEntry(address).DataBlk := in_msg.DataBlk; |
| if (tbe.Dirty == false) { |
| // have to update the TBE, too, because of how this |
| // directory deals with functional writes |
| tbe.DataBlk := in_msg.DataBlk; |
| } |
| } |
| } |
| |
| action(atd_allocateTBEforDMA, "atd", desc="allocate TBE Entry for DMA") { |
| check_allocate(TBEs); |
| peek(dmaRequestQueue_in, DMARequestMsg) { |
| TBEs.allocate(address); |
| set_tbe(TBEs.lookup(address)); |
| tbe.OriginalRequestor := in_msg.Requestor; |
| tbe.NumPendingAcks := 0; |
| tbe.Dirty := false; |
| tbe.Len := in_msg.Len; |
| if (in_msg.Type == DMARequestType:WRITE) { |
| tbe.wtData := true; |
| tbe.Dirty := true; |
| tbe.DataBlk := in_msg.DataBlk; |
| tbe.writeMask.fillMask(); |
| } |
| } |
| } |
| |
| action(t_allocateTBE, "t", desc="allocate TBE Entry") { |
| check_allocate(TBEs); |
| peek(requestNetwork_in, CPURequestMsg) { |
| TBEs.allocate(address); |
| set_tbe(TBEs.lookup(address)); |
| if (in_msg.Type == CoherenceRequestType:WriteThrough) { |
| tbe.writeMask.clear(); |
| tbe.writeMask.orMask(in_msg.writeMask); |
| tbe.wtData := true; |
| tbe.WTRequestor := in_msg.WTRequestor; |
| tbe.LastSender := in_msg.Requestor; |
| } |
| if (in_msg.Type == CoherenceRequestType:Atomic) { |
| tbe.writeMask.clear(); |
| tbe.writeMask.orMask(in_msg.writeMask); |
| tbe.atomicData := true; |
| tbe.WTRequestor := in_msg.WTRequestor; |
| tbe.LastSender := in_msg.Requestor; |
| } |
| tbe.DataBlk := getDirectoryEntry(address).DataBlk; // Data only for WBs |
| tbe.Dirty := false; |
| if (in_msg.Type == CoherenceRequestType:WriteThrough) { |
| tbe.DataBlk.copyPartial(in_msg.DataBlk,in_msg.writeMask); |
| tbe.Dirty := true; |
| } |
| tbe.OriginalRequestor := in_msg.Requestor; |
| tbe.NumPendingAcks := 0; |
| tbe.Cached := in_msg.ForceShared; |
| tbe.InitialRequestTime := in_msg.InitialRequestTime; |
| } |
| } |
| |
| action(dt_deallocateTBE, "dt", desc="deallocate TBE Entry") { |
| if (tbe.Dirty == false) { |
| getDirectoryEntry(address).DataBlk := tbe.DataBlk; |
| } |
| TBEs.deallocate(address); |
| unset_tbe(); |
| } |
| |
| action(wd_writeBackData, "wd", desc="Write back data if needed") { |
| if (tbe.wtData) { |
| getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.writeMask); |
| } else if (tbe.atomicData) { |
| tbe.DataBlk.atomicPartial(getDirectoryEntry(address).DataBlk,tbe.writeMask); |
| getDirectoryEntry(address).DataBlk := tbe.DataBlk; |
| } else if (tbe.Dirty == false) { |
| getDirectoryEntry(address).DataBlk := tbe.DataBlk; |
| } |
| } |
| |
| action(mt_writeMemDataToTBE, "mt", desc="write Mem data to TBE") { |
| peek(memQueue_in, MemoryMsg) { |
| if (tbe.wtData == true) { |
| // do nothing |
| } else if (tbe.Dirty == false) { |
| tbe.DataBlk := getDirectoryEntry(address).DataBlk; |
| } |
| tbe.MemData := true; |
| } |
| } |
| |
| action(y_writeProbeDataToTBE, "y", desc="write Probe Data to TBE") { |
| peek(responseNetwork_in, ResponseMsg) { |
| if (in_msg.Dirty) { |
| if (tbe.wtData) { |
| DataBlock tmp := in_msg.DataBlk; |
| tmp.copyPartial(tbe.DataBlk,tbe.writeMask); |
| tbe.DataBlk := tmp; |
| tbe.writeMask.fillMask(); |
| } else if (tbe.Dirty) { |
| if(tbe.atomicData == false && tbe.wtData == false) { |
| DPRINTF(RubySlicc, "Got double data for %s from %s\n", address, in_msg.Sender); |
| assert(tbe.DataBlk == in_msg.DataBlk); // in case of double data |
| } |
| } else { |
| tbe.DataBlk := in_msg.DataBlk; |
| tbe.Dirty := in_msg.Dirty; |
| tbe.LastSender := in_msg.Sender; |
| } |
| } |
| if (in_msg.Hit) { |
| tbe.Cached := true; |
| } |
| } |
| } |
| |
| action(mwc_markSinkWriteCancel, "mwc", desc="Mark to sink impending VicDirty") { |
| peek(responseNetwork_in, ResponseMsg) { |
| getDirectoryEntry(address).VicDirtyIgnore.add(in_msg.Sender); |
| APPEND_TRANSITION_COMMENT(" setting bit to sink VicDirty "); |
| } |
| } |
| |
| action(x_decrementAcks, "x", desc="decrement Acks pending") { |
| tbe.NumPendingAcks := tbe.NumPendingAcks - 1; |
| APPEND_TRANSITION_COMMENT(" Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| } |
| |
| action(o_checkForCompletion, "o", desc="check for ack completion") { |
| if (tbe.NumPendingAcks == 0) { |
| enqueue(triggerQueue_out, TriggerMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := TriggerType:AcksComplete; |
| } |
| } |
| APPEND_TRANSITION_COMMENT(" Check: Acks remaining: "); |
| APPEND_TRANSITION_COMMENT(tbe.NumPendingAcks); |
| } |
| |
| action(rv_removeVicDirtyIgnore, "rv", desc="Remove ignored core") { |
| peek(requestNetwork_in, CPURequestMsg) { |
| getDirectoryEntry(address).VicDirtyIgnore.remove(in_msg.Requestor); |
| } |
| } |
| |
| action(al_allocateL3Block, "al", desc="allocate the L3 block on WB") { |
| peek(responseNetwork_in, ResponseMsg) { |
| if (L3CacheMemory.isTagPresent(address)) { |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address)); |
| APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) "); |
| entry.DataBlk := in_msg.DataBlk; |
| entry.LastSender := in_msg.Sender; |
| assert(is_valid(tbe)); |
| //The controller always allocates a TBE entry upon receipt of a request from L2 caches. |
| //L3Hit flag is used by the hit profiling action pr_profileL3HitMiss to determine hit or miss. |
| //A TBE entry is not deallocated until a request is fully serviced and profiled. |
| tbe.L3Hit := true; |
| } else { |
| if (L3CacheMemory.cacheAvail(address) == false) { |
| Addr victim := L3CacheMemory.cacheProbe(address); |
| CacheEntry victim_entry := static_cast(CacheEntry, "pointer", |
| L3CacheMemory.lookup(victim)); |
| enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) { |
| out_msg.addr := victim; |
| out_msg.Type := MemoryRequestType:MEMORY_WB; |
| out_msg.Sender := machineID; |
| out_msg.MessageSize := MessageSizeType:Writeback_Data; |
| out_msg.DataBlk := victim_entry.DataBlk; |
| } |
| L3CacheMemory.deallocate(victim); |
| } |
| assert(L3CacheMemory.cacheAvail(address)); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry)); |
| APPEND_TRANSITION_COMMENT(" al wrote data to L3 "); |
| entry.DataBlk := in_msg.DataBlk; |
| |
| entry.LastSender := in_msg.Sender; |
| } |
| } |
| } |
| |
| action(alwt_allocateL3BlockOnWT, "alwt", desc="allocate the L3 block on WT") { |
| if ((tbe.wtData || tbe.atomicData) && useL3OnWT) { |
| //This tag check does not need to be counted as a hit or Miss, it has already been recorded. |
| if (L3CacheMemory.isTagPresent(address)) { |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.lookup(address)); |
| APPEND_TRANSITION_COMMENT(" al wrote data to L3 (hit) "); |
| entry.DataBlk := tbe.DataBlk; |
| entry.LastSender := tbe.LastSender; |
| } else { |
| if (L3CacheMemory.cacheAvail(address) == false) { |
| Addr victim := L3CacheMemory.cacheProbe(address); |
| CacheEntry victim_entry := static_cast(CacheEntry, "pointer", |
| L3CacheMemory.lookup(victim)); |
| enqueue(memQueue_out, MemoryMsg, to_memory_controller_latency) { |
| out_msg.addr := victim; |
| out_msg.Type := MemoryRequestType:MEMORY_WB; |
| out_msg.Sender := machineID; |
| out_msg.MessageSize := MessageSizeType:Writeback_Data; |
| out_msg.DataBlk := victim_entry.DataBlk; |
| } |
| L3CacheMemory.deallocate(victim); |
| } |
| assert(L3CacheMemory.cacheAvail(address)); |
| CacheEntry entry := static_cast(CacheEntry, "pointer", L3CacheMemory.allocate(address, new CacheEntry)); |
| APPEND_TRANSITION_COMMENT(" al wrote data to L3 "); |
| entry.DataBlk := tbe.DataBlk; |
| entry.LastSender := tbe.LastSender; |
| } |
| } |
| } |
| |
| action(sf_setForwardReqTime, "sf", desc="...") { |
| tbe.ForwardRequestTime := curCycle(); |
| } |
| |
| action(dl_deallocateL3, "dl", desc="deallocate the L3 block") { |
| L3CacheMemory.deallocate(address); |
| } |
| |
| action(pd_popDmaRequestQueue, "pd", desc="Pop DMA request queue") { |
| dmaRequestQueue_in.dequeue(clockEdge()); |
| } |
| |
| action(p_popRequestQueue, "p", desc="pop request queue") { |
| requestNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(pr_popResponseQueue, "pr", desc="pop response queue") { |
| responseNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(pm_popMemQueue, "pm", desc="pop mem queue") { |
| memQueue_in.dequeue(clockEdge()); |
| } |
| |
| action(pt_popTriggerQueue, "pt", desc="pop trigger queue") { |
| triggerQueue_in.dequeue(clockEdge()); |
| } |
| |
| action(ptl_popTriggerQueue, "ptl", desc="pop L3 trigger queue") { |
| L3TriggerQueue_in.dequeue(clockEdge()); |
| } |
| |
| action(pu_popUnblockQueue, "pu", desc="pop unblock queue") { |
| unblockNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(zz_recycleRequestQueue, "zz", desc="recycle request queue") { |
| requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency)); |
| } |
| |
| action(yy_recycleResponseQueue, "yy", desc="recycle response queue") { |
| responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency)); |
| } |
| |
| action(st_stallAndWaitRequest, "st", desc="Stall and wait on the address") { |
| stall_and_wait(requestNetwork_in, address); |
| } |
| |
| action(sd_stallAndWaitRequest, "sd", desc="Stall and wait on the address") { |
| stall_and_wait(dmaRequestQueue_in, address); |
| } |
| |
| action(wad_wakeUpDependents, "wad", desc="Wake up any requests waiting for this address") { |
| wakeUpBuffers(address); |
| } |
| |
| action(wa_wakeUpAllDependents, "wa", desc="Wake up any requests waiting for this region") { |
| wakeUpAllBuffers(); |
| } |
| |
| action(wada_wakeUpAllDependentsAddr, "wada", desc="Wake up any requests waiting for this address") { |
| wakeUpAllBuffers(address); |
| } |
| |
| action(z_stall, "z", desc="...") { |
| } |
| |
| // TRANSITIONS |
| transition({BL, BDR_M, BS_M, BM_M, B_M, BP, BDR_PM, BDW_P, BS_PM, BM_PM, B_PM, BDR_Pm, BS_Pm, BM_Pm, B_Pm, B}, {RdBlkS, RdBlkM, RdBlk, CtoD}) { |
| st_stallAndWaitRequest; |
| } |
| |
| // It may be possible to save multiple invalidations here! |
| transition({BL, BS_M, BM_M, B_M, BP, BS_PM, BM_PM, B_PM, BS_Pm, BM_Pm, B_Pm, B}, {Atomic, WriteThrough}) { |
| st_stallAndWaitRequest; |
| } |
| |
| // The exit state is always going to be U, so wakeUpDependents logic should be covered in all the |
| // transitions which are flowing into U. |
| transition({BL, BDR_M, BS_M, BM_M, B_M, BP, BDR_PM, BDW_P, BS_PM, BM_PM, B_PM, BDR_Pm, BS_Pm, BM_Pm, B_Pm, B}, {DmaRead,DmaWrite}){ |
| sd_stallAndWaitRequest; |
| } |
| |
| // transitions from U |
| transition(U, DmaRead, BDR_PM) {L3TagArrayRead} { |
| atd_allocateTBEforDMA; |
| qdr_queueDmaRdReq; |
| pr_profileL3HitMiss; //Must come after qdr_queueDmaRdReq |
| scd_probeShrCoreDataForDma; |
| pd_popDmaRequestQueue; |
| } |
| |
| transition(U, {RdBlkS}, BS_PM) {L3TagArrayRead} { |
| t_allocateTBE; |
| l_queueMemRdReq; |
| pr_profileL3HitMiss; //Must come after l_queueMemRdReq |
| sc_probeShrCoreData; |
| p_popRequestQueue; |
| } |
| |
| transition(U, DmaWrite, BDW_P) {L3TagArrayRead} { |
| atd_allocateTBEforDMA; |
| da_sendResponseDmaAck; |
| icd_probeInvCoreDataForDMA; |
| pd_popDmaRequestQueue; |
| } |
| |
| transition(U, WriteThrough, BM_PM) {L3TagArrayRead, L3TagArrayWrite} { |
| t_allocateTBE; |
| w_sendResponseWBAck; |
| l_queueMemRdReq; |
| pr_profileL3HitMiss; //Must come after l_queueMemRdReq |
| dc_probeInvCoreData; |
| p_popRequestQueue; |
| } |
| |
| transition(U, Atomic, BM_PM) {L3TagArrayRead, L3TagArrayWrite} { |
| t_allocateTBE; |
| l_queueMemRdReq; |
| pr_profileL3HitMiss; //Must come after l_queueMemRdReq |
| dc_probeInvCoreData; |
| p_popRequestQueue; |
| } |
| |
| transition(U, {RdBlkM}, BM_PM) {L3TagArrayRead} { |
| t_allocateTBE; |
| l_queueMemRdReq; |
| pr_profileL3HitMiss; //Must come after l_queueMemRdReq |
| dc_probeInvCoreData; |
| p_popRequestQueue; |
| } |
| |
| transition(U, RdBlk, B_PM) {L3TagArrayRead}{ |
| t_allocateTBE; |
| l_queueMemRdReq; |
| pr_profileL3HitMiss; //Must come after l_queueMemRdReq |
| sc_probeShrCoreData; |
| p_popRequestQueue; |
| } |
| |
| transition(U, CtoD, BP) {L3TagArrayRead} { |
| t_allocateTBE; |
| ic_probeInvCore; |
| p_popRequestQueue; |
| } |
| |
| transition(U, VicDirty, BL) {L3TagArrayRead} { |
| t_allocateTBE; |
| w_sendResponseWBAck; |
| p_popRequestQueue; |
| } |
| |
| transition(U, VicClean, BL) {L3TagArrayRead} { |
| t_allocateTBE; |
| w_sendResponseWBAck; |
| p_popRequestQueue; |
| } |
| |
| transition(BL, {VicDirty, VicClean}) { |
| zz_recycleRequestQueue; |
| } |
| |
| transition(BL, CPUData, U) {L3TagArrayWrite, L3DataArrayWrite} { |
| d_writeDataToMemory; |
| al_allocateL3Block; |
| pr_profileL3HitMiss; //Must come after al_allocateL3Block and before dt_deallocateTBE |
| wad_wakeUpDependents; |
| dt_deallocateTBE; |
| pr_popResponseQueue; |
| } |
| |
| transition(BL, StaleWB, U) {L3TagArrayWrite} { |
| dt_deallocateTBE; |
| wa_wakeUpAllDependents; |
| pr_popResponseQueue; |
| } |
| |
| transition({B, BDR_M, BS_M, BM_M, B_M, BP, BDR_PM, BDW_P, BS_PM, BM_PM, B_PM, BDR_Pm, BS_Pm, BM_Pm, B_Pm}, {VicDirty, VicClean}) { |
| z_stall; |
| } |
| |
| transition({U, BL, BDR_M, BS_M, BM_M, B_M, BP, BDR_PM, BDW_P, BS_PM, BM_PM, B_PM, BDR_Pm, BS_Pm, BM_Pm, B_Pm, B}, WBAck) { |
| pm_popMemQueue; |
| } |
| |
| transition({U, BL, BDR_M, BS_M, BM_M, B_M, BP, BDR_PM, BDW_P, BS_PM, BM_PM, B_PM, BDR_Pm, BS_Pm, BM_Pm, B_Pm, B}, StaleVicDirty) { |
| rv_removeVicDirtyIgnore; |
| w_sendResponseWBAck; |
| p_popRequestQueue; |
| } |
| |
| transition({B}, CoreUnblock, U) { |
| wada_wakeUpAllDependentsAddr; |
| pu_popUnblockQueue; |
| } |
| |
| transition(B, UnblockWriteThrough, U) { |
| wada_wakeUpAllDependentsAddr; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BDR_PM, MemData, BDR_Pm) { |
| mt_writeMemDataToTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(BS_PM, MemData, BS_Pm) {} { |
| mt_writeMemDataToTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(BM_PM, MemData, BM_Pm){} { |
| mt_writeMemDataToTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(B_PM, MemData, B_Pm){} { |
| mt_writeMemDataToTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(BDR_PM, L3Hit, BDR_Pm) { |
| ptl_popTriggerQueue; |
| } |
| |
| transition(BS_PM, L3Hit, BS_Pm) {} { |
| ptl_popTriggerQueue; |
| } |
| |
| transition(BM_PM, L3Hit, BM_Pm) {} { |
| ptl_popTriggerQueue; |
| } |
| |
| transition(B_PM, L3Hit, B_Pm) {} { |
| ptl_popTriggerQueue; |
| } |
| |
| transition(BDR_M, MemData, U) { |
| mt_writeMemDataToTBE; |
| dd_sendResponseDmaData; |
| wada_wakeUpAllDependentsAddr; |
| dt_deallocateTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(BS_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} { |
| mt_writeMemDataToTBE; |
| s_sendResponseS; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(BM_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} { |
| mt_writeMemDataToTBE; |
| m_sendResponseM; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(B_M, MemData, B){L3TagArrayWrite, L3DataArrayWrite} { |
| mt_writeMemDataToTBE; |
| es_sendResponseES; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pm_popMemQueue; |
| } |
| |
| transition(BS_M, L3Hit, B) {L3TagArrayWrite, L3DataArrayWrite} { |
| s_sendResponseS; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| ptl_popTriggerQueue; |
| } |
| |
| transition(BM_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} { |
| m_sendResponseM; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| ptl_popTriggerQueue; |
| } |
| |
| transition(B_M, L3Hit, B) {L3DataArrayWrite, L3TagArrayWrite} { |
| es_sendResponseES; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| ptl_popTriggerQueue; |
| } |
| |
| transition({BDR_PM, BS_PM, BDW_P, BM_PM, B_PM, BDR_Pm, BS_Pm, BM_Pm, B_Pm, BP}, CPUPrbResp) { |
| y_writeProbeDataToTBE; |
| x_decrementAcks; |
| o_checkForCompletion; |
| pr_popResponseQueue; |
| } |
| |
| transition(BDR_PM, ProbeAcksComplete, BDR_M) { |
| pt_popTriggerQueue; |
| } |
| |
| transition(BS_PM, ProbeAcksComplete, BS_M) {} { |
| sf_setForwardReqTime; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BM_PM, ProbeAcksComplete, BM_M) {} { |
| sf_setForwardReqTime; |
| pt_popTriggerQueue; |
| } |
| |
| transition(B_PM, ProbeAcksComplete, B_M){} { |
| sf_setForwardReqTime; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BDW_P, ProbeAcksComplete, U) { |
| // Check for pending requests from the core we put to sleep while waiting |
| // for a response |
| wada_wakeUpAllDependentsAddr; |
| dt_deallocateTBE; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BDR_Pm, ProbeAcksComplete, U) { |
| dd_sendResponseDmaData; |
| // Check for pending requests from the core we put to sleep while waiting |
| // for a response |
| wada_wakeUpAllDependentsAddr; |
| dt_deallocateTBE; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BS_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} { |
| sf_setForwardReqTime; |
| s_sendResponseS; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BM_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} { |
| sf_setForwardReqTime; |
| m_sendResponseM; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pt_popTriggerQueue; |
| } |
| |
| transition(B_Pm, ProbeAcksComplete, B){L3DataArrayWrite, L3TagArrayWrite} { |
| sf_setForwardReqTime; |
| es_sendResponseES; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pt_popTriggerQueue; |
| } |
| |
| transition(BP, ProbeAcksComplete, B){L3TagArrayWrite, L3TagArrayWrite} { |
| sf_setForwardReqTime; |
| c_sendResponseCtoD; |
| wd_writeBackData; |
| alwt_allocateL3BlockOnWT; |
| dt_deallocateTBE; |
| pt_popTriggerQueue; |
| } |
| } |