| /* |
| * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood |
| * All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are |
| * met: redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer; |
| * redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution; |
| * neither the name of the copyright holders nor the names of its |
| * contributors may be used to endorse or promote products derived from |
| * this software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| machine(MachineType:Directory, "Directory protocol") |
| : DirectoryMemory * directory; |
| Cycles directory_latency := 6; |
| Cycles to_memory_controller_latency := 1; |
| |
| // Message Queues |
| MessageBuffer * requestToDir, network="From", virtual_network="1", |
| vnet_type="request"; // a mod-L2 bank -> this Dir |
| MessageBuffer * responseToDir, network="From", virtual_network="2", |
| vnet_type="response"; // a mod-L2 bank -> this Dir |
| |
| MessageBuffer * forwardFromDir, network="To", virtual_network="1", |
| vnet_type="forward"; |
| MessageBuffer * responseFromDir, network="To", virtual_network="2", |
| vnet_type="response"; // Dir -> mod-L2 bank |
| |
| MessageBuffer * responseFromMemory; |
| { |
| // STATES |
| state_declaration(State, desc="Directory states", default="Directory_State_I") { |
| // Base states |
| I, AccessPermission:Read_Write, desc="Invalid"; |
| S, AccessPermission:Read_Only, desc="Shared"; |
| O, AccessPermission:Maybe_Stale, desc="Owner"; |
| M, AccessPermission:Maybe_Stale, desc="Modified"; |
| |
| IS, AccessPermission:Busy, desc="Blocked, was in idle"; |
| SS, AccessPermission:Read_Only, desc="Blocked, was in shared"; |
| OO, AccessPermission:Busy, desc="Blocked, was in owned"; |
| MO, AccessPermission:Busy, desc="Blocked, going to owner or maybe modified"; |
| MM, AccessPermission:Busy, desc="Blocked, going to modified"; |
| MM_DMA, AccessPermission:Busy, desc="Blocked, going to I"; |
| |
| MI, AccessPermission:Busy, desc="Blocked on a writeback"; |
| MIS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received"; |
| OS, AccessPermission:Busy, desc="Blocked on a writeback"; |
| OSS, AccessPermission:Busy, desc="Blocked on a writeback, but don't remove from sharers when received"; |
| |
| XI_M, AccessPermission:Busy, desc="In a stable state, going to I, waiting for the memory controller"; |
| XI_U, AccessPermission:Busy, desc="In a stable state, going to I, waiting for an unblock"; |
| OI_D, AccessPermission:Busy, desc="In O, going to I, waiting for data"; |
| |
| OD, AccessPermission:Busy, desc="In O, waiting for dma ack from L2"; |
| MD, AccessPermission:Busy, desc="In M, waiting for dma ack from L2"; |
| } |
| |
| // Events |
| enumeration(Event, desc="Directory events") { |
| GETX, desc="A GETX arrives"; |
| GETS, desc="A GETS arrives"; |
| PUTX, desc="A PUTX arrives"; |
| PUTO, desc="A PUTO arrives"; |
| PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list"; |
| Unblock, desc="An unblock message arrives"; |
| Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks"; |
| Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line"; |
| Clean_Writeback, desc="The final message as part of a PutX/PutS, no data"; |
| Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data"; |
| Memory_Data, desc="Fetched data from memory arrives"; |
| Memory_Ack, desc="Writeback Ack from memory arrives"; |
| DMA_READ, desc="DMA Read"; |
| DMA_WRITE, desc="DMA Write"; |
| DMA_ACK, desc="DMA Ack"; |
| Data, desc="Data to directory"; |
| } |
| |
| // TYPES |
| |
| // DirectoryEntry |
| structure(Entry, desc="...", interface='AbstractEntry') { |
| State DirectoryState, desc="Directory state"; |
| NetDest Sharers, desc="Sharers for this block"; |
| NetDest Owner, desc="Owner of this block"; |
| int WaitingUnblocks, desc="Number of acks we're waiting for"; |
| } |
| |
| structure(TBE, desc="...") { |
| Addr PhysicalAddress, desc="Physical address for this entry"; |
| int Len, desc="Length of request"; |
| DataBlock DataBlk, desc="DataBlk"; |
| MachineID Requestor, desc="original requestor"; |
| } |
| |
| structure(TBETable, external = "yes") { |
| TBE lookup(Addr); |
| void allocate(Addr); |
| void deallocate(Addr); |
| bool isPresent(Addr); |
| } |
| |
| // ** OBJECTS ** |
| TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs"; |
| |
| Tick clockEdge(); |
| Tick cyclesToTicks(Cycles c); |
| void set_tbe(TBE b); |
| void unset_tbe(); |
| |
| Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" { |
| Entry dir_entry := static_cast(Entry, "pointer", directory[addr]); |
| |
| if (is_valid(dir_entry)) { |
| return dir_entry; |
| } |
| |
| dir_entry := static_cast(Entry, "pointer", |
| directory.allocate(addr, new Entry)); |
| return dir_entry; |
| } |
| |
| State getState(TBE tbe, Addr addr) { |
| return getDirectoryEntry(addr).DirectoryState; |
| } |
| |
| void setState(TBE tbe, Addr addr, State state) { |
| if (directory.isPresent(addr)) { |
| |
| if (state == State:I) { |
| assert(getDirectoryEntry(addr).Owner.count() == 0); |
| assert(getDirectoryEntry(addr).Sharers.count() == 0); |
| } |
| |
| if (state == State:S) { |
| assert(getDirectoryEntry(addr).Owner.count() == 0); |
| } |
| |
| if (state == State:O) { |
| assert(getDirectoryEntry(addr).Owner.count() == 1); |
| assert(getDirectoryEntry(addr).Sharers.isSuperset(getDirectoryEntry(addr).Owner) == false); |
| } |
| |
| if (state == State:M) { |
| assert(getDirectoryEntry(addr).Owner.count() == 1); |
| assert(getDirectoryEntry(addr).Sharers.count() == 0); |
| } |
| |
| if ((state != State:SS) && (state != State:OO)) { |
| assert(getDirectoryEntry(addr).WaitingUnblocks == 0); |
| } |
| |
| if ( (getDirectoryEntry(addr).DirectoryState != State:I) && (state == State:I) ) { |
| getDirectoryEntry(addr).DirectoryState := state; |
| // disable coherence checker |
| // sequencer.checkCoherence(addr); |
| } |
| else { |
| getDirectoryEntry(addr).DirectoryState := state; |
| } |
| } |
| } |
| |
| AccessPermission getAccessPermission(Addr addr) { |
| if (directory.isPresent(addr)) { |
| DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState)); |
| return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState); |
| } |
| |
| DPRINTF(RubySlicc, "AccessPermission_NotPresent\n"); |
| return AccessPermission:NotPresent; |
| } |
| |
| void setAccessPermission(Addr addr, State state) { |
| if (directory.isPresent(addr)) { |
| getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state)); |
| } |
| } |
| |
| void functionalRead(Addr addr, Packet *pkt) { |
| functionalMemoryRead(pkt); |
| } |
| |
| int functionalWrite(Addr addr, Packet *pkt) { |
| int num_functional_writes := 0; |
| num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt); |
| return num_functional_writes; |
| } |
| |
| // if no sharers, then directory can be considered |
| // both a sharer and exclusive w.r.t. coherence checking |
| bool isBlockShared(Addr addr) { |
| if (directory.isPresent(addr)) { |
| if (getDirectoryEntry(addr).DirectoryState == State:I) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| bool isBlockExclusive(Addr addr) { |
| if (directory.isPresent(addr)) { |
| if (getDirectoryEntry(addr).DirectoryState == State:I) { |
| return true; |
| } |
| } |
| return false; |
| } |
| |
| // ** OUT_PORTS ** |
| out_port(forwardNetwork_out, RequestMsg, forwardFromDir); |
| out_port(responseNetwork_out, ResponseMsg, responseFromDir); |
| |
| // ** IN_PORTS ** |
| |
| in_port(unblockNetwork_in, ResponseMsg, responseToDir) { |
| if (unblockNetwork_in.isReady(clockEdge())) { |
| peek(unblockNetwork_in, ResponseMsg) { |
| if (in_msg.Type == CoherenceResponseType:UNBLOCK) { |
| if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) { |
| trigger(Event:Last_Unblock, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } else { |
| trigger(Event:Unblock, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } |
| } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) { |
| trigger(Event:Exclusive_Unblock, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) { |
| trigger(Event:Dirty_Writeback, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) { |
| trigger(Event:Clean_Writeback, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) { |
| trigger(Event:Data, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceResponseType:DMA_ACK) { |
| trigger(Event:DMA_ACK, in_msg.addr, |
| TBEs[in_msg.addr]); |
| } else { |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| in_port(requestQueue_in, RequestMsg, requestToDir) { |
| if (requestQueue_in.isReady(clockEdge())) { |
| peek(requestQueue_in, RequestMsg) { |
| if (in_msg.Type == CoherenceRequestType:GETS) { |
| trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceRequestType:GETX) { |
| trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceRequestType:PUTX) { |
| trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceRequestType:PUTO) { |
| trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) { |
| trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]); |
| } else if (in_msg.Type == CoherenceRequestType:DMA_READ) { |
| trigger(Event:DMA_READ, makeLineAddress(in_msg.addr), |
| TBEs[makeLineAddress(in_msg.addr)]); |
| } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) { |
| trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr), |
| TBEs[makeLineAddress(in_msg.addr)]); |
| } else { |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| // off-chip memory request/response is done |
| in_port(memQueue_in, MemoryMsg, responseFromMemory) { |
| if (memQueue_in.isReady(clockEdge())) { |
| peek(memQueue_in, MemoryMsg) { |
| if (in_msg.Type == MemoryRequestType:MEMORY_READ) { |
| trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]); |
| } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { |
| trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]); |
| } else { |
| DPRINTF(RubySlicc, "%s\n", in_msg.Type); |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| // Actions |
| |
| action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") { |
| peek(requestQueue_in, RequestMsg) { |
| enqueue(forwardNetwork_out, RequestMsg, directory_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceRequestType:WB_ACK; |
| out_msg.Requestor := in_msg.Requestor; |
| out_msg.RequestorMachine := MachineType:Directory; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| } |
| } |
| } |
| |
| action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") { |
| peek(requestQueue_in, RequestMsg) { |
| enqueue(forwardNetwork_out, RequestMsg, directory_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceRequestType:WB_NACK; |
| out_msg.Requestor := in_msg.Requestor; |
| out_msg.RequestorMachine := MachineType:Directory; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| } |
| } |
| } |
| |
| action(c_clearOwner, "c", desc="Clear the owner field") { |
| getDirectoryEntry(address).Owner.clear(); |
| } |
| |
| action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") { |
| getDirectoryEntry(address).Sharers.addNetDest(getDirectoryEntry(address).Owner); |
| getDirectoryEntry(address).Owner.clear(); |
| } |
| |
| action(cc_clearSharers, "\c", desc="Clear the sharers field") { |
| getDirectoryEntry(address).Sharers.clear(); |
| } |
| |
| action(d_sendDataMsg, "d", desc="Send data to requestor") { |
| peek(memQueue_in, MemoryMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Sender := machineID; |
| out_msg.SenderMachine := MachineType:Directory; |
| out_msg.Destination.add(in_msg.OriginalRequestorMachId); |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Dirty := false; // By definition, the block is now clean |
| out_msg.Acks := in_msg.Acks; |
| if (in_msg.ReadX) { |
| out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE; |
| } else { |
| out_msg.Type := CoherenceResponseType:DATA; |
| } |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| } |
| |
| action(p_fwdDataToDMA, "\d", desc="Send data to requestor") { |
| peek(requestQueue_in, RequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Sender := machineID; |
| out_msg.SenderMachine := MachineType:Directory; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.Dirty := false; // By definition, the block is now clean |
| out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| } |
| |
| action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") { |
| peek(unblockNetwork_in, ResponseMsg) { |
| getDirectoryEntry(address).Owner.clear(); |
| getDirectoryEntry(address).Owner.add(in_msg.Sender); |
| } |
| } |
| |
| action(f_forwardRequest, "f", desc="Forward request to owner") { |
| peek(requestQueue_in, RequestMsg) { |
| enqueue(forwardNetwork_out, RequestMsg, directory_latency) { |
| out_msg.addr := address; |
| out_msg.Type := in_msg.Type; |
| out_msg.Requestor := in_msg.Requestor; |
| out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor); |
| out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner); |
| out_msg.Acks := getDirectoryEntry(address).Sharers.count(); |
| if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) { |
| out_msg.Acks := out_msg.Acks - 1; |
| } |
| out_msg.MessageSize := MessageSizeType:Forwarded_Control; |
| } |
| } |
| } |
| |
| action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") { |
| peek(requestQueue_in, RequestMsg) { |
| enqueue(forwardNetwork_out, RequestMsg, directory_latency) { |
| out_msg.addr := address; |
| out_msg.Type := in_msg.Type; |
| out_msg.Requestor := machineID; |
| out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor); |
| out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Owner); |
| out_msg.Acks := getDirectoryEntry(address).Sharers.count(); |
| if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) { |
| out_msg.Acks := out_msg.Acks - 1; |
| } |
| out_msg.MessageSize := MessageSizeType:Forwarded_Control; |
| } |
| } |
| } |
| |
| action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") { |
| peek(requestQueue_in, RequestMsg) { |
| if ((getDirectoryEntry(in_msg.addr).Sharers.count() > 1) || |
| ((getDirectoryEntry(in_msg.addr).Sharers.count() > 0) && |
| (getDirectoryEntry(in_msg.addr).Sharers.isElement(in_msg.Requestor) == false))) { |
| enqueue(forwardNetwork_out, RequestMsg, directory_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceRequestType:INV; |
| out_msg.Requestor := in_msg.Requestor; |
| out_msg.RequestorMachine := machineIDToMachineType(in_msg.Requestor); |
| // out_msg.Destination := getDirectoryEntry(in_msg.addr).Sharers; |
| out_msg.Destination.addNetDest(getDirectoryEntry(in_msg.addr).Sharers); |
| out_msg.Destination.remove(in_msg.Requestor); |
| out_msg.MessageSize := MessageSizeType:Invalidate_Control; |
| } |
| } |
| } |
| } |
| |
| action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") { |
| requestQueue_in.dequeue(clockEdge()); |
| } |
| |
| action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") { |
| unblockNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") { |
| peek(unblockNetwork_in, ResponseMsg) { |
| getDirectoryEntry(address).Sharers.add(in_msg.Sender); |
| } |
| } |
| |
| action(n_incrementOutstanding, "n", desc="Increment outstanding requests") { |
| getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks + 1; |
| } |
| |
| action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") { |
| getDirectoryEntry(address).WaitingUnblocks := getDirectoryEntry(address).WaitingUnblocks - 1; |
| assert(getDirectoryEntry(address).WaitingUnblocks >= 0); |
| } |
| |
| action(q_popMemQueue, "q", desc="Pop off-chip request queue") { |
| memQueue_in.dequeue(clockEdge()); |
| } |
| |
| action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") { |
| peek(requestQueue_in, RequestMsg) { |
| queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency); |
| } |
| } |
| |
| action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") { |
| peek(unblockNetwork_in, ResponseMsg) { |
| if (is_valid(tbe)) { |
| queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency, |
| in_msg.DataBlk); |
| } else { |
| queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency, |
| in_msg.DataBlk); |
| } |
| } |
| } |
| |
| action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt", |
| desc="Queue off-chip writeback request") { |
| peek(unblockNetwork_in, ResponseMsg) { |
| DataBlock DataBlk := in_msg.DataBlk; |
| DataBlk.copyPartial(tbe.DataBlk, getOffset(tbe.PhysicalAddress), |
| tbe.Len); |
| queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency, |
| DataBlk); |
| } |
| } |
| |
| action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") { |
| peek(requestQueue_in, RequestMsg) { |
| queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency, |
| in_msg.DataBlk); |
| } |
| } |
| |
| action(zz_recycleRequest, "\z", desc="Recycle the request queue") { |
| requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency)); |
| } |
| |
| action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") { |
| peek(requestQueue_in, RequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Sender := machineID; |
| out_msg.SenderMachine := MachineType:Directory; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests |
| out_msg.Type := CoherenceResponseType:DMA_ACK; |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| } |
| } |
| } |
| |
| action(a_sendDMAAck2, "\aa", desc="Send DMA Ack that write completed, along with Inv Ack count") { |
| peek(unblockNetwork_in, ResponseMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Sender := machineID; |
| out_msg.SenderMachine := MachineType:Directory; |
| if (is_valid(tbe)) { |
| out_msg.Destination.add(tbe.Requestor); |
| } |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests |
| out_msg.Type := CoherenceResponseType:DMA_ACK; |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| } |
| } |
| } |
| |
| action(v_allocateTBE, "v", desc="Allocate TBE entry") { |
| peek (requestQueue_in, RequestMsg) { |
| TBEs.allocate(address); |
| set_tbe(TBEs[address]); |
| tbe.PhysicalAddress := in_msg.addr; |
| tbe.Len := in_msg.Len; |
| tbe.DataBlk := in_msg.DataBlk; |
| tbe.Requestor := in_msg.Requestor; |
| } |
| } |
| |
| action(w_deallocateTBE, "w", desc="Deallocate TBE entry") { |
| TBEs.deallocate(address); |
| unset_tbe(); |
| } |
| |
| |
| // TRANSITIONS |
| transition(I, GETX, MM) { |
| qf_queueMemoryFetchRequest; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(I, DMA_READ, XI_M) { |
| qf_queueMemoryFetchRequest; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(I, DMA_WRITE, XI_U) { |
| qw_queueMemoryWBRequest2; |
| a_sendDMAAck; // ack count may be zero |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(XI_M, Memory_Data, I) { |
| d_sendDataMsg; // ack count may be zero |
| q_popMemQueue; |
| } |
| |
| transition(XI_U, Exclusive_Unblock, I) { |
| cc_clearSharers; |
| c_clearOwner; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(S, GETX, MM) { |
| qf_queueMemoryFetchRequest; |
| g_sendInvalidations; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(S, DMA_READ) { |
| //qf_queueMemoryFetchRequest; |
| p_fwdDataToDMA; |
| //g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(S, DMA_WRITE, XI_U) { |
| qw_queueMemoryWBRequest2; |
| a_sendDMAAck; // ack count may be zero |
| g_sendInvalidations; // the DMA will collect invalidations |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(I, GETS, IS) { |
| qf_queueMemoryFetchRequest; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition({S, SS}, GETS, SS) { |
| qf_queueMemoryFetchRequest; |
| n_incrementOutstanding; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition({I, S}, PUTO) { |
| b_sendWriteBackNack; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition({I, S, O}, PUTX) { |
| b_sendWriteBackNack; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(O, GETX, MM) { |
| f_forwardRequest; |
| g_sendInvalidations; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(O, DMA_READ, OD) { |
| f_forwardRequest; // this will cause the data to go to DMA directly |
| //g_sendInvalidations; // this will cause acks to be sent to the DMA |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(OD, DMA_ACK, O) { |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition({O,M}, DMA_WRITE, OI_D) { |
| f_forwardRequestDirIsRequestor; // need the modified data before we can proceed |
| g_sendInvalidations; // these go to the DMA Controller |
| v_allocateTBE; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(OI_D, Data, XI_U) { |
| qw_queueMemoryWBRequestFromMessageAndTBE; |
| a_sendDMAAck2; // ack count may be zero |
| w_deallocateTBE; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition({O, OO}, GETS, OO) { |
| f_forwardRequest; |
| n_incrementOutstanding; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(M, GETX, MM) { |
| f_forwardRequest; |
| i_popIncomingRequestQueue; |
| } |
| |
| // no exclusive unblock will show up to the directory |
| transition(M, DMA_READ, MD) { |
| f_forwardRequest; // this will cause the data to go to DMA directly |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(MD, DMA_ACK, M) { |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(M, GETS, MO) { |
| f_forwardRequest; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(M, PUTX, MI) { |
| a_sendWriteBackAck; |
| i_popIncomingRequestQueue; |
| } |
| |
| // happens if M->O transition happens on-chip |
| transition(M, PUTO, MI) { |
| a_sendWriteBackAck; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(M, PUTO_SHARERS, MIS) { |
| a_sendWriteBackAck; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(O, PUTO, OS) { |
| a_sendWriteBackAck; |
| i_popIncomingRequestQueue; |
| } |
| |
| transition(O, PUTO_SHARERS, OSS) { |
| a_sendWriteBackAck; |
| i_popIncomingRequestQueue; |
| } |
| |
| |
| transition({MM, MO, MI, MIS, OS, OSS, XI_M, XI_U, OI_D, OD, MD}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) { |
| zz_recycleRequest; |
| } |
| |
| transition({MM, MO}, Exclusive_Unblock, M) { |
| cc_clearSharers; |
| e_ownerIsUnblocker; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(MO, Unblock, O) { |
| m_addUnlockerToSharers; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ, DMA_WRITE}) { |
| zz_recycleRequest; |
| } |
| |
| transition(IS, GETS) { |
| zz_recycleRequest; |
| } |
| |
| transition(IS, Unblock, S) { |
| m_addUnlockerToSharers; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(IS, Exclusive_Unblock, M) { |
| cc_clearSharers; |
| e_ownerIsUnblocker; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(SS, Unblock) { |
| m_addUnlockerToSharers; |
| o_decrementOutstanding; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(SS, Last_Unblock, S) { |
| m_addUnlockerToSharers; |
| o_decrementOutstanding; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(OO, Unblock) { |
| m_addUnlockerToSharers; |
| o_decrementOutstanding; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(OO, Last_Unblock, O) { |
| m_addUnlockerToSharers; |
| o_decrementOutstanding; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(MI, Dirty_Writeback, I) { |
| c_clearOwner; |
| cc_clearSharers; |
| qw_queueMemoryWBRequest; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(MIS, Dirty_Writeback, S) { |
| c_moveOwnerToSharer; |
| qw_queueMemoryWBRequest; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(MIS, Clean_Writeback, S) { |
| c_moveOwnerToSharer; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(OS, Dirty_Writeback, S) { |
| c_clearOwner; |
| qw_queueMemoryWBRequest; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(OSS, Dirty_Writeback, S) { |
| c_moveOwnerToSharer; |
| qw_queueMemoryWBRequest; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(OSS, Clean_Writeback, S) { |
| c_moveOwnerToSharer; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(MI, Clean_Writeback, I) { |
| c_clearOwner; |
| cc_clearSharers; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition(OS, Clean_Writeback, S) { |
| c_clearOwner; |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition({MI, MIS}, Unblock, M) { |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition({OS, OSS}, Unblock, O) { |
| j_popIncomingUnblockQueue; |
| } |
| |
| transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) { |
| d_sendDataMsg; |
| q_popMemQueue; |
| } |
| |
| transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS, XI_U, XI_M}, Memory_Ack) { |
| //a_sendAck; |
| q_popMemQueue; |
| } |
| |
| } |