| |
| /* |
| * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood |
| * All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are |
| * met: redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer; |
| * redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution; |
| * neither the name of the copyright holders nor the names of its |
| * contributors may be used to endorse or promote products derived from |
| * this software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| /* |
| * $Id: MOESI_CMP_token-dir.sm 1.6 05/01/19 15:48:35-06:00 mikem@royal16.cs.wisc.edu $ |
| */ |
| |
| // This file is copied from Yasuko Watanabe's prefetch / memory protocol |
| // Copied here by aep 12/14/07 |
| |
| |
| machine(Directory, "MESI_CMP_filter_directory protocol") |
| : DirectoryMemory * directory, |
| MemoryControl * memBuffer, |
| int to_mem_ctrl_latency = 1, |
| int directory_latency = 6 |
| { |
| |
| MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type="request"; |
| MessageBuffer responseToDir, network="From", virtual_network="1", ordered="false", vnet_type="response"; |
| |
| MessageBuffer requestFromDir, network="To", virtual_network="0", ordered="false", vnet_type="request"; |
| MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false", vnet_type="response"; |
| |
| // STATES |
| state_declaration(State, desc="Directory states", default="Directory_State_I") { |
| // Base states |
| I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid"; |
| ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I"; |
| ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I"; |
| |
| M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist"; |
| IM, AccessPermission:Busy, desc="Intermediate State I>M"; |
| MI, AccessPermission:Busy, desc="Intermediate State M>I"; |
| M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read"; |
| M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read"; |
| M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write"; |
| M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write"; |
| } |
| |
| // Events |
| enumeration(Event, desc="Directory events") { |
| Fetch, desc="A memory fetch arrives"; |
| Data, desc="writeback data arrives"; |
| Memory_Data, desc="Fetched data from memory arrives"; |
| Memory_Ack, desc="Writeback Ack from memory arrives"; |
| //added by SS for dma |
| DMA_READ, desc="A DMA Read memory request"; |
| DMA_WRITE, desc="A DMA Write memory request"; |
| CleanReplacement, desc="Clean Replacement in L2 cache"; |
| |
| } |
| |
| // TYPES |
| |
| // DirectoryEntry |
| structure(Entry, desc="...", interface="AbstractEntry") { |
| State DirectoryState, desc="Directory state"; |
| DataBlock DataBlk, desc="data for the block"; |
| NetDest Sharers, desc="Sharers for this block"; |
| NetDest Owner, desc="Owner of this block"; |
| } |
| |
| // TBE entries for DMA requests |
| structure(TBE, desc="TBE entries for outstanding DMA requests") { |
| Address PhysicalAddress, desc="physical address"; |
| State TBEState, desc="Transient State"; |
| DataBlock DataBlk, desc="Data to be written (DMA write only)"; |
| int Len, desc="..."; |
| } |
| |
| structure(TBETable, external="yes") { |
| TBE lookup(Address); |
| void allocate(Address); |
| void deallocate(Address); |
| bool isPresent(Address); |
| } |
| |
| |
| // ** OBJECTS ** |
| |
| TBETable TBEs, template_hack="<Directory_TBE>"; |
| |
| void set_tbe(TBE tbe); |
| void unset_tbe(); |
| |
| Entry getDirectoryEntry(Address addr), return_by_pointer="yes" { |
| Entry dir_entry := static_cast(Entry, "pointer", directory[addr]); |
| |
| if (is_valid(dir_entry)) { |
| return dir_entry; |
| } |
| |
| dir_entry := static_cast(Entry, "pointer", |
| directory.allocate(addr, new Entry)); |
| return dir_entry; |
| } |
| |
| State getState(TBE tbe, Address addr) { |
| if (is_valid(tbe)) { |
| return tbe.TBEState; |
| } else if (directory.isPresent(addr)) { |
| return getDirectoryEntry(addr).DirectoryState; |
| } else { |
| return State:I; |
| } |
| } |
| |
| void setState(TBE tbe, Address addr, State state) { |
| |
| if (is_valid(tbe)) { |
| tbe.TBEState := state; |
| } |
| |
| if (directory.isPresent(addr)) { |
| |
| if (state == State:I) { |
| assert(getDirectoryEntry(addr).Owner.count() == 0); |
| assert(getDirectoryEntry(addr).Sharers.count() == 0); |
| } else if (state == State:M) { |
| assert(getDirectoryEntry(addr).Owner.count() == 1); |
| assert(getDirectoryEntry(addr).Sharers.count() == 0); |
| } |
| |
| getDirectoryEntry(addr).DirectoryState := state; |
| } |
| } |
| |
| AccessPermission getAccessPermission(Address addr) { |
| TBE tbe := TBEs[addr]; |
| if(is_valid(tbe)) { |
| DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState)); |
| return Directory_State_to_permission(tbe.TBEState); |
| } |
| |
| if(directory.isPresent(addr)) { |
| DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState)); |
| return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState); |
| } |
| |
| DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent); |
| return AccessPermission:NotPresent; |
| } |
| |
| DataBlock getDataBlock(Address addr), return_by_ref="yes" { |
| return getDirectoryEntry(addr).DataBlk; |
| } |
| |
| void setAccessPermission(Address addr, State state) { |
| if (directory.isPresent(addr)) { |
| getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state)); |
| } |
| } |
| |
| bool isGETRequest(CoherenceRequestType type) { |
| return (type == CoherenceRequestType:GETS) || |
| (type == CoherenceRequestType:GET_INSTR) || |
| (type == CoherenceRequestType:GETX); |
| } |
| |
| |
| // ** OUT_PORTS ** |
| out_port(responseNetwork_out, ResponseMsg, responseFromDir); |
| out_port(memQueue_out, MemoryMsg, memBuffer); |
| |
| // ** IN_PORTS ** |
| |
| in_port(requestNetwork_in, RequestMsg, requestToDir) { |
| if (requestNetwork_in.isReady()) { |
| peek(requestNetwork_in, RequestMsg) { |
| assert(in_msg.Destination.isElement(machineID)); |
| if (isGETRequest(in_msg.Type)) { |
| trigger(Event:Fetch, in_msg.Address, TBEs[in_msg.Address]); |
| } else if (in_msg.Type == CoherenceRequestType:DMA_READ) { |
| trigger(Event:DMA_READ, makeLineAddress(in_msg.Address), |
| TBEs[makeLineAddress(in_msg.Address)]); |
| } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) { |
| trigger(Event:DMA_WRITE, makeLineAddress(in_msg.Address), |
| TBEs[makeLineAddress(in_msg.Address)]); |
| } else { |
| DPRINTF(RubySlicc, "%s\n", in_msg); |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| in_port(responseNetwork_in, ResponseMsg, responseToDir) { |
| if (responseNetwork_in.isReady()) { |
| peek(responseNetwork_in, ResponseMsg) { |
| assert(in_msg.Destination.isElement(machineID)); |
| if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) { |
| trigger(Event:Data, in_msg.Address, TBEs[in_msg.Address]); |
| } else if (in_msg.Type == CoherenceResponseType:ACK) { |
| trigger(Event:CleanReplacement, in_msg.Address, TBEs[in_msg.Address]); |
| } else { |
| DPRINTF(RubySlicc, "%s\n", in_msg.Type); |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| // off-chip memory request/response is done |
| in_port(memQueue_in, MemoryMsg, memBuffer) { |
| if (memQueue_in.isReady()) { |
| peek(memQueue_in, MemoryMsg) { |
| if (in_msg.Type == MemoryRequestType:MEMORY_READ) { |
| trigger(Event:Memory_Data, in_msg.Address, TBEs[in_msg.Address]); |
| } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) { |
| trigger(Event:Memory_Ack, in_msg.Address, TBEs[in_msg.Address]); |
| } else { |
| DPRINTF(RubySlicc, "%s\n", in_msg.Type); |
| error("Invalid message"); |
| } |
| } |
| } |
| } |
| |
| |
| |
| // Actions |
| action(a_sendAck, "a", desc="Send ack to L2") { |
| peek(responseNetwork_in, ResponseMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:MEMORY_ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Sender); |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| } |
| |
| action(d_sendData, "d", desc="Send data to requestor") { |
| peek(memQueue_in, MemoryMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:MEMORY_DATA; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.OriginalRequestorMachId); |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Dirty := false; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| } |
| |
| // Actions |
| action(aa_sendAck, "aa", desc="Send ack to L2") { |
| peek(memQueue_in, MemoryMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:MEMORY_ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.OriginalRequestorMachId); |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| } |
| |
| action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") { |
| requestNetwork_in.dequeue(); |
| } |
| |
| action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") { |
| responseNetwork_in.dequeue(); |
| } |
| |
| action(l_popMemQueue, "q", desc="Pop off-chip request queue") { |
| memQueue_in.dequeue(); |
| } |
| |
| action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") { |
| peek(requestNetwork_in, RequestMsg) { |
| enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := MemoryRequestType:MEMORY_READ; |
| out_msg.Sender := machineID; |
| out_msg.OriginalRequestorMachId := in_msg.Requestor; |
| out_msg.MessageSize := in_msg.MessageSize; |
| out_msg.Prefetch := in_msg.Prefetch; |
| out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk; |
| |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| } |
| |
| action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") { |
| peek(responseNetwork_in, ResponseMsg) { |
| enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := MemoryRequestType:MEMORY_WB; |
| out_msg.Sender := machineID; |
| out_msg.OriginalRequestorMachId := in_msg.Sender; |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.MessageSize := in_msg.MessageSize; |
| //out_msg.Prefetch := in_msg.Prefetch; |
| |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| } |
| |
| action(m_writeDataToMemory, "m", desc="Write dirty writeback to memory") { |
| peek(responseNetwork_in, ResponseMsg) { |
| getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk; |
| DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n", |
| in_msg.Address, in_msg.DataBlk); |
| } |
| } |
| //added by SS for dma |
| action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") { |
| peek(requestNetwork_in, RequestMsg) { |
| enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := MemoryRequestType:MEMORY_READ; |
| out_msg.Sender := machineID; |
| out_msg.OriginalRequestorMachId := machineID; |
| out_msg.MessageSize := in_msg.MessageSize; |
| out_msg.DataBlk := getDirectoryEntry(address).DataBlk; |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| } |
| |
| action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") { |
| requestNetwork_in.dequeue(); |
| } |
| |
| action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") { |
| peek(memQueue_in, MemoryMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:DATA; |
| out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be |
| out_msg.Destination.add(map_Address_to_DMA(address)); |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| } |
| |
| action(dw_writeDMAData, "dw", desc="DMA Write data to memory") { |
| peek(requestNetwork_in, RequestMsg) { |
| getDirectoryEntry(address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len); |
| } |
| } |
| |
| action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") { |
| peek(requestNetwork_in, RequestMsg) { |
| enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := MemoryRequestType:MEMORY_WB; |
| out_msg.OriginalRequestorMachId := machineID; |
| //out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len); |
| |
| |
| out_msg.MessageSize := in_msg.MessageSize; |
| //out_msg.Prefetch := in_msg.Prefetch; |
| |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| } |
| |
| action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") { |
| enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:ACK; |
| out_msg.Destination.add(map_Address_to_DMA(address)); |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| } |
| } |
| |
| action(z_recycleRequestQueue, "z", desc="recycle request queue") { |
| requestNetwork_in.recycle(); |
| } |
| |
| action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") { |
| requestNetwork_in.recycle(); |
| } |
| |
| |
| action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") { |
| peek(requestNetwork_in, RequestMsg) { |
| getDirectoryEntry(address).Owner.clear(); |
| getDirectoryEntry(address).Owner.add(in_msg.Requestor); |
| } |
| } |
| |
| |
| action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") { |
| peek(requestNetwork_in, RequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, latency=directory_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:INV; |
| out_msg.Sender := machineID; |
| out_msg.Destination := getDirectoryEntry(address).Owner; |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| } |
| |
| |
| action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") { |
| peek(responseNetwork_in, ResponseMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, latency=to_mem_ctrl_latency) { |
| out_msg.Address := address; |
| out_msg.Type := CoherenceResponseType:DATA; |
| out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be |
| out_msg.Destination.add(map_Address_to_DMA(address)); |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| } |
| |
| action(c_clearOwner, "c", desc="Clear the owner field") { |
| getDirectoryEntry(address).Owner.clear(); |
| } |
| |
| action(v_allocateTBE, "v", desc="Allocate TBE") { |
| peek(requestNetwork_in, RequestMsg) { |
| TBEs.allocate(address); |
| set_tbe(TBEs[address]); |
| tbe.DataBlk := in_msg.DataBlk; |
| tbe.PhysicalAddress := in_msg.Address; |
| tbe.Len := in_msg.Len; |
| } |
| } |
| |
| action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") { |
| assert(is_valid(tbe)); |
| //getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len); |
| getDirectoryEntry(address).DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len); |
| |
| |
| } |
| |
| |
| action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") { |
| peek(responseNetwork_in, ResponseMsg) { |
| enqueue(memQueue_out, MemoryMsg, latency=to_mem_ctrl_latency) { |
| assert(is_valid(tbe)); |
| out_msg.Address := address; |
| out_msg.Type := MemoryRequestType:MEMORY_WB; |
| out_msg.OriginalRequestorMachId := in_msg.Sender; |
| //out_msg.DataBlk := in_msg.DataBlk; |
| //out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len); |
| out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len); |
| |
| out_msg.MessageSize := in_msg.MessageSize; |
| //out_msg.Prefetch := in_msg.Prefetch; |
| |
| DPRINTF(RubySlicc, "%s\n", out_msg); |
| } |
| } |
| } |
| |
| action(w_deallocateTBE, "w", desc="Deallocate TBE") { |
| TBEs.deallocate(address); |
| unset_tbe(); |
| } |
| |
| |
| // TRANSITIONS |
| |
| |
| transition(I, Fetch, IM) { |
| qf_queueMemoryFetchRequest; |
| e_ownerIsRequestor; |
| j_popIncomingRequestQueue; |
| } |
| |
| transition(IM, Memory_Data, M) { |
| d_sendData; |
| l_popMemQueue; |
| } |
| //added by SS |
| transition(M, CleanReplacement, I) { |
| c_clearOwner; |
| a_sendAck; |
| k_popIncomingResponseQueue; |
| } |
| |
| transition(M, Data, MI) { |
| m_writeDataToMemory; |
| qw_queueMemoryWBRequest; |
| k_popIncomingResponseQueue; |
| } |
| |
| transition(MI, Memory_Ack, I) { |
| c_clearOwner; |
| aa_sendAck; |
| l_popMemQueue; |
| } |
| |
| |
| //added by SS for dma support |
| transition(I, DMA_READ, ID) { |
| qf_queueMemoryFetchRequestDMA; |
| j_popIncomingRequestQueue; |
| } |
| |
| transition(ID, Memory_Data, I) { |
| dr_sendDMAData; |
| l_popMemQueue; |
| } |
| |
| transition(I, DMA_WRITE, ID_W) { |
| dw_writeDMAData; |
| qw_queueMemoryWBRequest_partial; |
| j_popIncomingRequestQueue; |
| } |
| |
| transition(ID_W, Memory_Ack, I) { |
| da_sendDMAAck; |
| l_popMemQueue; |
| } |
| |
| transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) { |
| z_recycleRequestQueue; |
| } |
| |
| transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) { |
| zz_recycleDMAQueue; |
| } |
| |
| |
| transition(M, DMA_READ, M_DRD) { |
| inv_sendCacheInvalidate; |
| j_popIncomingRequestQueue; |
| } |
| |
| transition(M_DRD, Data, M_DRDI) { |
| drp_sendDMAData; |
| m_writeDataToMemory; |
| qw_queueMemoryWBRequest; |
| k_popIncomingResponseQueue; |
| } |
| |
| transition(M_DRDI, Memory_Ack, I) { |
| aa_sendAck; |
| c_clearOwner; |
| l_popMemQueue; |
| } |
| |
| transition(M, DMA_WRITE, M_DWR) { |
| v_allocateTBE; |
| inv_sendCacheInvalidate; |
| j_popIncomingRequestQueue; |
| } |
| |
| transition(M_DWR, Data, M_DWRI) { |
| m_writeDataToMemory; |
| qw_queueMemoryWBRequest_partialTBE; |
| k_popIncomingResponseQueue; |
| } |
| |
| transition(M_DWRI, Memory_Ack, I) { |
| dwt_writeDMADataFromTBE; |
| aa_sendAck; |
| c_clearOwner; |
| da_sendDMAAck; |
| w_deallocateTBE; |
| l_popMemQueue; |
| } |
| |
| } |