| /* |
| * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood |
| * All rights reserved. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are |
| * met: redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer; |
| * redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution; |
| * neither the name of the copyright holders nor the names of its |
| * contributors may be used to endorse or promote products derived from |
| * this software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| machine(MachineType:L2Cache, "Token protocol") |
| : CacheMemory * L2cache; |
| int N_tokens; |
| Cycles l2_request_latency := 5; |
| Cycles l2_response_latency := 5; |
| bool filtering_enabled := "True"; |
| |
| // L2 BANK QUEUES |
| // From local bank of L2 cache TO the network |
| |
| // this L2 bank -> a local L1 || mod-directory |
| MessageBuffer * responseFromL2Cache, network="To", virtual_network="4", |
| vnet_type="response"; |
| // this L2 bank -> mod-directory |
| MessageBuffer * GlobalRequestFromL2Cache, network="To", virtual_network="2", |
| vnet_type="request"; |
| // this L2 bank -> a local L1 |
| MessageBuffer * L1RequestFromL2Cache, network="To", virtual_network="1", |
| vnet_type="request"; |
| |
| |
| // FROM the network to this local bank of L2 cache |
| |
| // a local L1 || mod-directory -> this L2 bank |
| MessageBuffer * responseToL2Cache, network="From", virtual_network="4", |
| vnet_type="response"; |
| MessageBuffer * persistentToL2Cache, network="From", virtual_network="3", |
| vnet_type="persistent"; |
| // mod-directory -> this L2 bank |
| MessageBuffer * GlobalRequestToL2Cache, network="From", virtual_network="2", |
| vnet_type="request"; |
| // a local L1 -> this L2 bank |
| MessageBuffer * L1RequestToL2Cache, network="From", virtual_network="1", |
| vnet_type="request"; |
| |
| { |
| // STATES |
| state_declaration(State, desc="L2 Cache states", default="L2Cache_State_I") { |
| // Base states |
| NP, AccessPermission:Invalid, desc="Not Present"; |
| I, AccessPermission:Invalid, desc="Idle"; |
| S, AccessPermission:Read_Only, desc="Shared, not present in any local L1s"; |
| O, AccessPermission:Read_Only, desc="Owned, not present in any L1s"; |
| M, AccessPermission:Read_Write, desc="Modified, not present in any L1s"; |
| |
| // Locked states |
| I_L, AccessPermission:Busy, "I^L", desc="Invalid, Locked"; |
| S_L, AccessPermission:Busy, "S^L", desc="Shared, Locked"; |
| } |
| |
| // EVENTS |
| enumeration(Event, desc="Cache events") { |
| |
| // Requests |
| L1_GETS, desc="local L1 GETS request"; |
| L1_GETS_Last_Token, desc="local L1 GETS request"; |
| L1_GETX, desc="local L1 GETX request"; |
| L1_INV, desc="L1 no longer has tokens"; |
| Transient_GETX, desc="A GetX from another processor"; |
| Transient_GETS, desc="A GetS from another processor"; |
| Transient_GETS_Last_Token, desc="A GetS from another processor"; |
| |
| // events initiated by this L2 |
| L2_Replacement, desc="L2 Replacement", format="!r"; |
| |
| // events of external L2 responses |
| |
| // Responses |
| Writeback_Tokens, desc="Received a writeback from L1 with only tokens (no data)"; |
| Writeback_Shared_Data, desc="Received a writeback from L1 that includes clean data"; |
| Writeback_All_Tokens, desc="Received a writeback from L1"; |
| Writeback_Owned, desc="Received a writeback from L1"; |
| |
| |
| Data_Shared, desc="Received a data message, we are now a sharer"; |
| Data_Owner, desc="Received a data message, we are now the owner"; |
| Data_All_Tokens, desc="Received a data message, we are now the owner, we now have all the tokens"; |
| Ack, desc="Received an ack message"; |
| Ack_All_Tokens, desc="Received an ack message, we now have all the tokens"; |
| |
| // Lock/Unlock |
| Persistent_GETX, desc="Another processor has priority to read/write"; |
| Persistent_GETS, desc="Another processor has priority to read"; |
| Persistent_GETS_Last_Token, desc="Another processor has priority to read"; |
| Own_Lock_or_Unlock, desc="This processor now has priority"; |
| } |
| |
| // TYPES |
| |
| // CacheEntry |
| structure(Entry, desc="...", interface="AbstractCacheEntry") { |
| State CacheState, desc="cache state"; |
| bool Dirty, desc="Is the data dirty (different than memory)?"; |
| int Tokens, desc="The number of tokens we're holding for the line"; |
| DataBlock DataBlk, desc="data for the block"; |
| } |
| |
| structure(DirEntry, desc="...", interface="AbstractEntry") { |
| Set Sharers, desc="Set of the internal processors that want the block in shared state"; |
| bool exclusive, default="false", desc="if local exclusive is likely"; |
| } |
| |
| structure(PerfectCacheMemory, external="yes") { |
| void allocate(Addr); |
| void deallocate(Addr); |
| DirEntry lookup(Addr); |
| bool isTagPresent(Addr); |
| } |
| |
| structure(PersistentTable, external="yes") { |
| void persistentRequestLock(Addr, MachineID, AccessType); |
| void persistentRequestUnlock(Addr, MachineID); |
| MachineID findSmallest(Addr); |
| AccessType typeOfSmallest(Addr); |
| void markEntries(Addr); |
| bool isLocked(Addr); |
| int countStarvingForAddress(Addr); |
| int countReadStarvingForAddress(Addr); |
| } |
| |
| PersistentTable persistentTable; |
| PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>"; |
| |
| Tick clockEdge(); |
| void set_cache_entry(AbstractCacheEntry b); |
| void unset_cache_entry(); |
| MachineID mapAddressToMachine(Addr addr, MachineType mtype); |
| |
| Entry getCacheEntry(Addr address), return_by_pointer="yes" { |
| Entry cache_entry := static_cast(Entry, "pointer", L2cache.lookup(address)); |
| return cache_entry; |
| } |
| |
| DirEntry getDirEntry(Addr address), return_by_pointer="yes" { |
| return localDirectory.lookup(address); |
| } |
| |
| void functionalRead(Addr addr, Packet *pkt) { |
| testAndRead(addr, getCacheEntry(addr).DataBlk, pkt); |
| } |
| |
| int functionalWrite(Addr addr, Packet *pkt) { |
| int num_functional_writes := 0; |
| num_functional_writes := num_functional_writes + |
| testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt); |
| return num_functional_writes; |
| } |
| |
| int getTokens(Entry cache_entry) { |
| if (is_valid(cache_entry)) { |
| return cache_entry.Tokens; |
| } else { |
| return 0; |
| } |
| } |
| |
| State getState(Entry cache_entry, Addr addr) { |
| if (is_valid(cache_entry)) { |
| return cache_entry.CacheState; |
| } else if (persistentTable.isLocked(addr)) { |
| return State:I_L; |
| } else { |
| return State:NP; |
| } |
| } |
| |
| void setState(Entry cache_entry, Addr addr, State state) { |
| |
| if (is_valid(cache_entry)) { |
| // Make sure the token count is in range |
| assert(cache_entry.Tokens >= 0); |
| assert(cache_entry.Tokens <= max_tokens()); |
| assert(cache_entry.Tokens != (max_tokens() / 2)); |
| |
| // Make sure we have no tokens in L |
| if ((state == State:I_L) ) { |
| assert(cache_entry.Tokens == 0); |
| } |
| |
| // in M and E you have all the tokens |
| if (state == State:M ) { |
| assert(cache_entry.Tokens == max_tokens()); |
| } |
| |
| // in NP you have no tokens |
| if (state == State:NP) { |
| assert(cache_entry.Tokens == 0); |
| } |
| |
| // You have at least one token in S-like states |
| if (state == State:S ) { |
| assert(cache_entry.Tokens > 0); |
| } |
| |
| // You have at least half the token in O-like states |
| if (state == State:O ) { |
| assert(cache_entry.Tokens > (max_tokens() / 2)); |
| } |
| |
| cache_entry.CacheState := state; |
| } |
| } |
| |
| AccessPermission getAccessPermission(Addr addr) { |
| Entry cache_entry := getCacheEntry(addr); |
| if(is_valid(cache_entry)) { |
| return L2Cache_State_to_permission(cache_entry.CacheState); |
| } |
| |
| return AccessPermission:NotPresent; |
| } |
| |
| void setAccessPermission(Entry cache_entry, Addr addr, State state) { |
| if (is_valid(cache_entry)) { |
| cache_entry.changePermission(L2Cache_State_to_permission(state)); |
| } |
| } |
| |
| void removeSharer(Addr addr, NodeID id) { |
| |
| if (localDirectory.isTagPresent(addr)) { |
| DirEntry dir_entry := getDirEntry(addr); |
| dir_entry.Sharers.remove(id); |
| if (dir_entry.Sharers.count() == 0) { |
| localDirectory.deallocate(addr); |
| } |
| } |
| } |
| |
| bool sharersExist(Addr addr) { |
| if (localDirectory.isTagPresent(addr)) { |
| DirEntry dir_entry := getDirEntry(addr); |
| if (dir_entry.Sharers.count() > 0) { |
| return true; |
| } |
| else { |
| return false; |
| } |
| } |
| else { |
| return false; |
| } |
| } |
| |
| bool exclusiveExists(Addr addr) { |
| if (localDirectory.isTagPresent(addr)) { |
| DirEntry dir_entry := getDirEntry(addr); |
| if (dir_entry.exclusive) { |
| return true; |
| } |
| else { |
| return false; |
| } |
| } |
| else { |
| return false; |
| } |
| } |
| |
| // assumes that caller will check to make sure tag is present |
| Set getSharers(Addr addr) { |
| DirEntry dir_entry := getDirEntry(addr); |
| return dir_entry.Sharers; |
| } |
| |
| void setNewWriter(Addr addr, NodeID id) { |
| if (localDirectory.isTagPresent(addr) == false) { |
| localDirectory.allocate(addr); |
| } |
| DirEntry dir_entry := getDirEntry(addr); |
| dir_entry.Sharers.clear(); |
| dir_entry.Sharers.add(id); |
| dir_entry.exclusive := true; |
| } |
| |
| void addNewSharer(Addr addr, NodeID id) { |
| if (localDirectory.isTagPresent(addr) == false) { |
| localDirectory.allocate(addr); |
| } |
| DirEntry dir_entry := getDirEntry(addr); |
| dir_entry.Sharers.add(id); |
| // dir_entry.exclusive := false; |
| } |
| |
| void clearExclusiveBitIfExists(Addr addr) { |
| if (localDirectory.isTagPresent(addr)) { |
| DirEntry dir_entry := getDirEntry(addr); |
| dir_entry.exclusive := false; |
| } |
| } |
| |
| // ** OUT_PORTS ** |
| out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache); |
| out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache); |
| out_port(responseNetwork_out, ResponseMsg, responseFromL2Cache); |
| |
| |
| |
| // ** IN_PORTS ** |
| |
| // Persistent Network |
| in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) { |
| if (persistentNetwork_in.isReady(clockEdge())) { |
| peek(persistentNetwork_in, PersistentMsg) { |
| assert(in_msg.Destination.isElement(machineID)); |
| |
| if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) { |
| persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Write); |
| } else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) { |
| persistentTable.persistentRequestLock(in_msg.addr, in_msg.Requestor, AccessType:Read); |
| } else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) { |
| persistentTable.persistentRequestUnlock(in_msg.addr, in_msg.Requestor); |
| } else { |
| error("Unexpected message"); |
| } |
| |
| Entry cache_entry := getCacheEntry(in_msg.addr); |
| // React to the message based on the current state of the table |
| if (persistentTable.isLocked(in_msg.addr)) { |
| |
| if (persistentTable.typeOfSmallest(in_msg.addr) == AccessType:Read) { |
| if (getTokens(cache_entry) == 1 || |
| getTokens(cache_entry) == (max_tokens() / 2) + 1) { |
| trigger(Event:Persistent_GETS_Last_Token, in_msg.addr, |
| cache_entry); |
| } else { |
| trigger(Event:Persistent_GETS, in_msg.addr, cache_entry); |
| } |
| } else { |
| trigger(Event:Persistent_GETX, in_msg.addr, cache_entry); |
| } |
| } |
| else { |
| trigger(Event:Own_Lock_or_Unlock, in_msg.addr, cache_entry); |
| } |
| } |
| } |
| } |
| |
| |
| // Request Network |
| in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) { |
| if (requestNetwork_in.isReady(clockEdge())) { |
| peek(requestNetwork_in, RequestMsg) { |
| assert(in_msg.Destination.isElement(machineID)); |
| |
| Entry cache_entry := getCacheEntry(in_msg.addr); |
| if (in_msg.Type == CoherenceRequestType:GETX) { |
| trigger(Event:Transient_GETX, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceRequestType:GETS) { |
| if (getTokens(cache_entry) == 1) { |
| trigger(Event:Transient_GETS_Last_Token, in_msg.addr, |
| cache_entry); |
| } |
| else { |
| trigger(Event:Transient_GETS, in_msg.addr, cache_entry); |
| } |
| } else { |
| error("Unexpected message"); |
| } |
| } |
| } |
| } |
| |
| in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) { |
| if (L1requestNetwork_in.isReady(clockEdge())) { |
| peek(L1requestNetwork_in, RequestMsg) { |
| assert(in_msg.Destination.isElement(machineID)); |
| Entry cache_entry := getCacheEntry(in_msg.addr); |
| if (in_msg.Type == CoherenceRequestType:GETX) { |
| trigger(Event:L1_GETX, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceRequestType:GETS) { |
| if (getTokens(cache_entry) == 1 || |
| getTokens(cache_entry) == (max_tokens() / 2) + 1) { |
| trigger(Event:L1_GETS_Last_Token, in_msg.addr, cache_entry); |
| } |
| else { |
| trigger(Event:L1_GETS, in_msg.addr, cache_entry); |
| } |
| } else { |
| error("Unexpected message"); |
| } |
| } |
| } |
| } |
| |
| |
| // Response Network |
| in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) { |
| if (responseNetwork_in.isReady(clockEdge())) { |
| peek(responseNetwork_in, ResponseMsg) { |
| assert(in_msg.Destination.isElement(machineID)); |
| Entry cache_entry := getCacheEntry(in_msg.addr); |
| |
| if (getTokens(cache_entry) + in_msg.Tokens != max_tokens()) { |
| if (in_msg.Type == CoherenceResponseType:ACK) { |
| assert(in_msg.Tokens < (max_tokens() / 2)); |
| trigger(Event:Ack, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER) { |
| trigger(Event:Data_Owner, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:DATA_SHARED) { |
| trigger(Event:Data_Shared, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || |
| in_msg.Type == CoherenceResponseType:WB_OWNED || |
| in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) { |
| |
| if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) { |
| |
| // either room is available or the block is already present |
| |
| if (in_msg.Type == CoherenceResponseType:WB_TOKENS) { |
| assert(in_msg.Dirty == false); |
| trigger(Event:Writeback_Tokens, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) { |
| assert(in_msg.Dirty == false); |
| trigger(Event:Writeback_Shared_Data, in_msg.addr, cache_entry); |
| } |
| else if (in_msg.Type == CoherenceResponseType:WB_OWNED) { |
| //assert(in_msg.Dirty == false); |
| trigger(Event:Writeback_Owned, in_msg.addr, cache_entry); |
| } |
| } |
| else { |
| trigger(Event:L2_Replacement, |
| L2cache.cacheProbe(in_msg.addr), |
| getCacheEntry(L2cache.cacheProbe(in_msg.addr))); |
| } |
| } else if (in_msg.Type == CoherenceResponseType:INV) { |
| trigger(Event:L1_INV, in_msg.addr, cache_entry); |
| } else { |
| error("Unexpected message"); |
| } |
| } else { |
| if (in_msg.Type == CoherenceResponseType:ACK) { |
| assert(in_msg.Tokens < (max_tokens() / 2)); |
| trigger(Event:Ack_All_Tokens, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:DATA_OWNER || |
| in_msg.Type == CoherenceResponseType:DATA_SHARED) { |
| trigger(Event:Data_All_Tokens, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:WB_TOKENS || |
| in_msg.Type == CoherenceResponseType:WB_OWNED || |
| in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) { |
| if (L2cache.cacheAvail(in_msg.addr) || is_valid(cache_entry)) { |
| |
| // either room is available or the block is already present |
| |
| if (in_msg.Type == CoherenceResponseType:WB_TOKENS) { |
| assert(in_msg.Dirty == false); |
| assert( (getState(cache_entry, in_msg.addr) != State:NP) |
| && (getState(cache_entry, in_msg.addr) != State:I) ); |
| trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry); |
| } else if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) { |
| assert(in_msg.Dirty == false); |
| trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry); |
| } |
| else if (in_msg.Type == CoherenceResponseType:WB_OWNED) { |
| trigger(Event:Writeback_All_Tokens, in_msg.addr, cache_entry); |
| } |
| } |
| else { |
| trigger(Event:L2_Replacement, |
| L2cache.cacheProbe(in_msg.addr), |
| getCacheEntry(L2cache.cacheProbe(in_msg.addr))); |
| } |
| } else if (in_msg.Type == CoherenceResponseType:INV) { |
| trigger(Event:L1_INV, in_msg.addr, cache_entry); |
| } else { |
| DPRINTF(RubySlicc, "%s\n", in_msg.Type); |
| error("Unexpected message"); |
| } |
| } |
| } |
| } |
| } |
| |
| |
| // ACTIONS |
| |
| action(a_broadcastLocalRequest, "a", desc="broadcast local request globally") { |
| |
| peek(L1requestNetwork_in, RequestMsg) { |
| |
| // if this is a retry or no local sharers, broadcast normally |
| enqueue(globalRequestNetwork_out, RequestMsg, l2_request_latency) { |
| out_msg.addr := in_msg.addr; |
| out_msg.Type := in_msg.Type; |
| out_msg.Requestor := in_msg.Requestor; |
| out_msg.RetryNum := in_msg.RetryNum; |
| |
| // |
| // If a statically shared L2 cache, then no other L2 caches can |
| // store the block |
| // |
| //out_msg.Destination.broadcast(MachineType:L2Cache); |
| //out_msg.Destination.addNetDest(getAllPertinentL2Banks(address)); |
| //out_msg.Destination.remove(map_L1CacheMachId_to_L2Cache(address, in_msg.Requestor)); |
| |
| out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); |
| out_msg.MessageSize := MessageSizeType:Request_Control; |
| out_msg.AccessMode := in_msg.AccessMode; |
| out_msg.Prefetch := in_msg.Prefetch; |
| } //enqueue |
| // } // if |
| |
| //profile_filter_action(0); |
| } // peek |
| } //action |
| |
| |
| action(bb_bounceResponse, "\b", desc="Bounce tokens and data to memory") { |
| peek(responseNetwork_in, ResponseMsg) { |
| // FIXME, should use a 3rd vnet |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := in_msg.Type; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); |
| out_msg.Tokens := in_msg.Tokens; |
| out_msg.MessageSize := in_msg.MessageSize; |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Dirty := in_msg.Dirty; |
| } |
| } |
| } |
| |
| action(c_cleanReplacement, "c", desc="Issue clean writeback") { |
| assert(is_valid(cache_entry)); |
| if (cache_entry.Tokens > 0) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| } |
| cache_entry.Tokens := 0; |
| } |
| } |
| |
| action(cc_dirtyReplacement, "\c", desc="Issue dirty writeback") { |
| assert(is_valid(cache_entry)); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(mapAddressToMachine(address, MachineType:Directory)); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| |
| if (cache_entry.Dirty) { |
| out_msg.MessageSize := MessageSizeType:Writeback_Data; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| } else { |
| out_msg.MessageSize := MessageSizeType:Writeback_Control; |
| out_msg.Type := CoherenceResponseType:ACK_OWNER; |
| } |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| action(d_sendDataWithTokens, "d", desc="Send data and a token from cache to requestor") { |
| peek(requestNetwork_in, RequestMsg) { |
| assert(is_valid(cache_entry)); |
| if (cache_entry.Tokens > (N_tokens + (max_tokens() / 2))) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_SHARED; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.Tokens := N_tokens; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := false; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| cache_entry.Tokens := cache_entry.Tokens - N_tokens; |
| } |
| else { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_SHARED; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.Tokens := 1; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := false; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| cache_entry.Tokens := cache_entry.Tokens - 1; |
| } |
| } |
| } |
| |
| action(dd_sendDataWithAllTokens, "\d", desc="Send data and all tokens from cache to requestor") { |
| assert(is_valid(cache_entry)); |
| peek(requestNetwork_in, RequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| assert(cache_entry.Tokens >= 1); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| action(e_sendAckWithCollectedTokens, "e", desc="Send ack with the tokens we've collected thus far.") { |
| assert(is_valid(cache_entry)); |
| if (cache_entry.Tokens > 0) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| assert(cache_entry.Tokens >= 1); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| action(ee_sendDataWithAllTokens, "\e", desc="Send data and all tokens from cache to starver") { |
| assert(is_valid(cache_entry)); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| assert(cache_entry.Tokens >= 1); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| action(f_sendAckWithAllButOneTokens, "f", desc="Send ack with all our tokens but one to starver.") { |
| //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself |
| assert(is_valid(cache_entry)); |
| assert(cache_entry.Tokens > 0); |
| if (cache_entry.Tokens > 1) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| assert(cache_entry.Tokens >= 1); |
| out_msg.Tokens := cache_entry.Tokens - 1; |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| cache_entry.Tokens := 1; |
| } |
| |
| action(ff_sendDataWithAllButOneTokens, "\f", desc="Send data and out tokens but one to starver") { |
| //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself |
| assert(is_valid(cache_entry)); |
| assert(cache_entry.Tokens > (max_tokens() / 2) + 1); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| out_msg.Tokens := cache_entry.Tokens - 1; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| cache_entry.Tokens := 1; |
| } |
| |
| action(fa_sendDataWithAllTokens, "fa", desc="Send data and out tokens but one to starver") { |
| //assert(persistentTable.findSmallest(address) != id); // Make sure we never bounce tokens to ourself |
| assert(is_valid(cache_entry)); |
| assert(cache_entry.Tokens == (max_tokens() / 2) + 1); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| out_msg.MessageSize := MessageSizeType:Response_Data; |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| |
| |
| action(gg_bounceResponseToStarver, "\g", desc="Redirect response to starving processor") { |
| // assert(persistentTable.isLocked(address)); |
| peek(responseNetwork_in, ResponseMsg) { |
| // FIXME, should use a 3rd vnet in some cases |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := in_msg.Type; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| out_msg.Tokens := in_msg.Tokens; |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Dirty := in_msg.Dirty; |
| out_msg.MessageSize := in_msg.MessageSize; |
| } |
| } |
| } |
| |
| action(gg_bounceWBSharedToStarver, "\gg", desc="Redirect response to starving processor") { |
| //assert(persistentTable.isLocked(address)); |
| peek(responseNetwork_in, ResponseMsg) { |
| // FIXME, should use a 3rd vnet in some cases |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| if (in_msg.Type == CoherenceResponseType:WB_SHARED_DATA) { |
| out_msg.Type := CoherenceResponseType:DATA_SHARED; |
| } else { |
| assert(in_msg.Tokens < (max_tokens() / 2)); |
| out_msg.Type := CoherenceResponseType:ACK; |
| } |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| out_msg.Tokens := in_msg.Tokens; |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Dirty := in_msg.Dirty; |
| out_msg.MessageSize := in_msg.MessageSize; |
| } |
| } |
| } |
| |
| action(gg_bounceWBOwnedToStarver, "\ggg", desc="Redirect response to starving processor") { |
| // assert(persistentTable.isLocked(address)); |
| peek(responseNetwork_in, ResponseMsg) { |
| // FIXME, should use a 3rd vnet in some cases |
| enqueue(responseNetwork_out, ResponseMsg, 1) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(persistentTable.findSmallest(address)); |
| out_msg.Tokens := in_msg.Tokens; |
| out_msg.DataBlk := in_msg.DataBlk; |
| out_msg.Dirty := in_msg.Dirty; |
| out_msg.MessageSize := in_msg.MessageSize; |
| } |
| } |
| } |
| |
| |
| action(h_updateFilterFromL1HintOrWB, "h", desc="update filter from received writeback") { |
| peek(responseNetwork_in, ResponseMsg) { |
| removeSharer(in_msg.addr, machineIDToNodeID(in_msg.Sender)); |
| } |
| } |
| |
| action(j_forwardTransientRequestToLocalSharers, "j", desc="Forward external transient request to local sharers") { |
| peek(requestNetwork_in, RequestMsg) { |
| if (filtering_enabled && in_msg.RetryNum == 0 && sharersExist(in_msg.addr) == false) { |
| //profile_filter_action(1); |
| DPRINTF(RubySlicc, "filtered message, Retry Num: %d\n", |
| in_msg.RetryNum); |
| } |
| else { |
| enqueue(localRequestNetwork_out, RequestMsg, l2_response_latency ) { |
| out_msg.addr := in_msg.addr; |
| out_msg.Requestor := in_msg.Requestor; |
| |
| // |
| // Currently assuming only one chip so all L1s are local |
| // |
| //out_msg.Destination := getLocalL1IDs(machineID); |
| out_msg.Destination.broadcast(MachineType:L1Cache); |
| out_msg.Destination.remove(in_msg.Requestor); |
| |
| out_msg.Type := in_msg.Type; |
| out_msg.isLocal := false; |
| out_msg.MessageSize := MessageSizeType:Broadcast_Control; |
| out_msg.AccessMode := in_msg.AccessMode; |
| out_msg.Prefetch := in_msg.Prefetch; |
| } |
| //profile_filter_action(0); |
| } |
| } |
| } |
| |
| action(k_dataFromL2CacheToL1Requestor, "k", desc="Send data and a token from cache to L1 requestor") { |
| peek(L1requestNetwork_in, RequestMsg) { |
| assert(is_valid(cache_entry)); |
| assert(cache_entry.Tokens > 0); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_SHARED; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := false; |
| out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data; |
| out_msg.Tokens := 1; |
| } |
| cache_entry.Tokens := cache_entry.Tokens - 1; |
| } |
| } |
| |
| action(k_dataOwnerFromL2CacheToL1Requestor, "\k", desc="Send data and a token from cache to L1 requestor") { |
| peek(L1requestNetwork_in, RequestMsg) { |
| assert(is_valid(cache_entry)); |
| assert(cache_entry.Tokens == (max_tokens() / 2) + 1); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data; |
| out_msg.Tokens := cache_entry.Tokens; |
| } |
| cache_entry.Tokens := 0; |
| } |
| } |
| |
| action(k_dataAndAllTokensFromL2CacheToL1Requestor, "\kk", desc="Send data and a token from cache to L1 requestor") { |
| peek(L1requestNetwork_in, RequestMsg) { |
| assert(is_valid(cache_entry)); |
| // assert(cache_entry.Tokens == max_tokens()); |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:DATA_OWNER; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| out_msg.DataBlk := cache_entry.DataBlk; |
| out_msg.Dirty := cache_entry.Dirty; |
| out_msg.MessageSize := MessageSizeType:ResponseL2hit_Data; |
| //out_msg.Tokens := max_tokens(); |
| out_msg.Tokens := cache_entry.Tokens; |
| } |
| cache_entry.Tokens := 0; |
| } |
| } |
| |
| action(l_popPersistentQueue, "l", desc="Pop persistent queue.") { |
| persistentNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(m_popRequestQueue, "m", desc="Pop request queue.") { |
| requestNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(n_popResponseQueue, "n", desc="Pop response queue") { |
| responseNetwork_in.dequeue(clockEdge()); |
| } |
| |
| action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") { |
| L1requestNetwork_in.dequeue(clockEdge()); |
| } |
| |
| |
| action(q_updateTokensFromResponse, "q", desc="Update the token count based on the incoming response message") { |
| peek(responseNetwork_in, ResponseMsg) { |
| assert(is_valid(cache_entry)); |
| assert(in_msg.Tokens != 0); |
| cache_entry.Tokens := cache_entry.Tokens + in_msg.Tokens; |
| |
| // this should ideally be in u_writeDataToCache, but Writeback_All_Tokens |
| // may not trigger this action. |
| if ( (in_msg.Type == CoherenceResponseType:DATA_OWNER || in_msg.Type == CoherenceResponseType:WB_OWNED) && in_msg.Dirty) { |
| cache_entry.Dirty := true; |
| } |
| } |
| } |
| |
| action(r_markNewSharer, "r", desc="Mark the new local sharer from local request message") { |
| peek(L1requestNetwork_in, RequestMsg) { |
| if (machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) { |
| if (in_msg.Type == CoherenceRequestType:GETX) { |
| setNewWriter(in_msg.addr, machineIDToNodeID(in_msg.Requestor)); |
| } else if (in_msg.Type == CoherenceRequestType:GETS) { |
| addNewSharer(in_msg.addr, machineIDToNodeID(in_msg.Requestor)); |
| } |
| } |
| } |
| } |
| |
| action(r_clearExclusive, "\rrr", desc="clear exclusive bit") { |
| clearExclusiveBitIfExists(address); |
| } |
| |
| action(r_setMRU, "\rr", desc="manually set the MRU bit for cache line" ) { |
| peek(L1requestNetwork_in, RequestMsg) { |
| if ((machineIDToMachineType(in_msg.Requestor) == MachineType:L1Cache) && |
| (is_valid(cache_entry))) { |
| L2cache.setMRU(address); |
| } |
| } |
| } |
| |
| action(t_sendAckWithCollectedTokens, "t", desc="Send ack with the tokens we've collected thus far.") { |
| assert(is_valid(cache_entry)); |
| if (cache_entry.Tokens > 0) { |
| peek(requestNetwork_in, RequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| assert(cache_entry.Tokens >= 1); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| action(tt_sendLocalAckWithCollectedTokens, "tt", desc="Send ack with the tokens we've collected thus far.") { |
| assert(is_valid(cache_entry)); |
| if (cache_entry.Tokens > 0) { |
| peek(L1requestNetwork_in, RequestMsg) { |
| enqueue(responseNetwork_out, ResponseMsg, l2_response_latency) { |
| out_msg.addr := address; |
| out_msg.Type := CoherenceResponseType:ACK; |
| out_msg.Sender := machineID; |
| out_msg.Destination.add(in_msg.Requestor); |
| assert(cache_entry.Tokens >= 1); |
| out_msg.Tokens := cache_entry.Tokens; |
| out_msg.MessageSize := MessageSizeType:Response_Control; |
| } |
| } |
| } |
| cache_entry.Tokens := 0; |
| } |
| |
| action(u_writeDataToCache, "u", desc="Write data to cache") { |
| peek(responseNetwork_in, ResponseMsg) { |
| assert(is_valid(cache_entry)); |
| cache_entry.DataBlk := in_msg.DataBlk; |
| if ((cache_entry.Dirty == false) && in_msg.Dirty) { |
| cache_entry.Dirty := in_msg.Dirty; |
| } |
| } |
| } |
| |
| action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") { |
| set_cache_entry(L2cache.allocate(address, new Entry)); |
| } |
| |
| action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") { |
| L2cache.deallocate(address); |
| unset_cache_entry(); |
| } |
| |
| action(uu_profileMiss, "\um", desc="Profile the demand miss") { |
| ++L2cache.demand_misses; |
| } |
| |
| action(uu_profileHit, "\uh", desc="Profile the demand hit") { |
| ++L2cache.demand_hits; |
| } |
| |
| action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") { |
| peek(responseNetwork_in, ResponseMsg) { |
| if (in_msg.Type != CoherenceResponseType:ACK && |
| in_msg.Type != CoherenceResponseType:WB_TOKENS) { |
| assert(is_valid(cache_entry)); |
| assert(cache_entry.DataBlk == in_msg.DataBlk); |
| } |
| } |
| } |
| |
| |
| //***************************************************** |
| // TRANSITIONS |
| //***************************************************** |
| |
| transition({NP, I, S, O, M, I_L, S_L}, L1_INV) { |
| |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition({NP, I, S, O, M}, Own_Lock_or_Unlock) { |
| l_popPersistentQueue; |
| } |
| |
| |
| // Transitions from NP |
| |
| transition(NP, {Transient_GETX, Transient_GETS}) { |
| // forward message to local sharers |
| r_clearExclusive; |
| j_forwardTransientRequestToLocalSharers; |
| m_popRequestQueue; |
| } |
| |
| |
| transition(NP, {L1_GETS, L1_GETX}) { |
| a_broadcastLocalRequest; |
| r_markNewSharer; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| transition(NP, {Ack, Data_Shared, Data_Owner, Data_All_Tokens}) { |
| bb_bounceResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(NP, Writeback_Shared_Data, S) { |
| vv_allocateL2CacheBlock; |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(NP, Writeback_Tokens, I) { |
| vv_allocateL2CacheBlock; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(NP, Writeback_All_Tokens, M) { |
| vv_allocateL2CacheBlock; |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(NP, Writeback_Owned, O) { |
| vv_allocateL2CacheBlock; |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| |
| transition(NP, |
| {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, |
| I_L) { |
| l_popPersistentQueue; |
| } |
| |
| // Transitions from Idle |
| |
| transition(I, {L1_GETS, L1_GETS_Last_Token}) { |
| a_broadcastLocalRequest; |
| tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected |
| r_markNewSharer; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| transition(I, L1_GETX) { |
| a_broadcastLocalRequest; |
| tt_sendLocalAckWithCollectedTokens; // send any tokens we have collected |
| r_markNewSharer; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| transition(I, L2_Replacement) { |
| c_cleanReplacement; // Only needed in some cases |
| rr_deallocateL2CacheBlock; |
| } |
| |
| transition(I, {Transient_GETX, Transient_GETS, Transient_GETS_Last_Token}) { |
| r_clearExclusive; |
| t_sendAckWithCollectedTokens; |
| j_forwardTransientRequestToLocalSharers; |
| m_popRequestQueue; |
| } |
| |
| transition(I, |
| {Persistent_GETX, Persistent_GETS, Persistent_GETS_Last_Token}, |
| I_L) { |
| e_sendAckWithCollectedTokens; |
| l_popPersistentQueue; |
| } |
| |
| |
| transition(I, Ack) { |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(I, Data_Shared, S) { |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(I, Writeback_Shared_Data, S) { |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(I, Writeback_Tokens) { |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(I, Data_Owner, O) { |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(I, Writeback_Owned, O) { |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(I, Data_All_Tokens, M) { |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| |
| transition(I, Writeback_All_Tokens, M) { |
| u_writeDataToCache; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| // Transitions from Shared |
| |
| transition(S, L2_Replacement, I) { |
| c_cleanReplacement; |
| rr_deallocateL2CacheBlock; |
| } |
| |
| transition(S, Transient_GETX, I) { |
| r_clearExclusive; |
| t_sendAckWithCollectedTokens; |
| j_forwardTransientRequestToLocalSharers; |
| m_popRequestQueue; |
| } |
| |
| transition(S, {Transient_GETS, Transient_GETS_Last_Token}) { |
| j_forwardTransientRequestToLocalSharers; |
| r_clearExclusive; |
| m_popRequestQueue; |
| } |
| |
| transition(S, Persistent_GETX, I_L) { |
| e_sendAckWithCollectedTokens; |
| l_popPersistentQueue; |
| } |
| |
| |
| transition(S, {Persistent_GETS, Persistent_GETS_Last_Token}, S_L) { |
| f_sendAckWithAllButOneTokens; |
| l_popPersistentQueue; |
| } |
| |
| |
| transition(S, Ack) { |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(S, Data_Shared) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(S, Writeback_Tokens) { |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(S, Writeback_Shared_Data) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| |
| transition(S, Data_Owner, O) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(S, Writeback_Owned, O) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(S, Data_All_Tokens, M) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(S, Writeback_All_Tokens, M) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(S, L1_GETX, I) { |
| a_broadcastLocalRequest; |
| tt_sendLocalAckWithCollectedTokens; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| |
| transition(S, L1_GETS) { |
| k_dataFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| transition(S, L1_GETS_Last_Token, I) { |
| |
| k_dataFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| // Transitions from Owned |
| |
| transition(O, L2_Replacement, I) { |
| cc_dirtyReplacement; |
| rr_deallocateL2CacheBlock; |
| } |
| |
| transition(O, Transient_GETX, I) { |
| r_clearExclusive; |
| dd_sendDataWithAllTokens; |
| j_forwardTransientRequestToLocalSharers; |
| m_popRequestQueue; |
| } |
| |
| transition(O, Persistent_GETX, I_L) { |
| ee_sendDataWithAllTokens; |
| l_popPersistentQueue; |
| } |
| |
| transition(O, Persistent_GETS, S_L) { |
| ff_sendDataWithAllButOneTokens; |
| l_popPersistentQueue; |
| } |
| |
| transition(O, Persistent_GETS_Last_Token, I_L) { |
| fa_sendDataWithAllTokens; |
| l_popPersistentQueue; |
| } |
| |
| transition(O, Transient_GETS) { |
| // send multiple tokens |
| r_clearExclusive; |
| d_sendDataWithTokens; |
| m_popRequestQueue; |
| } |
| |
| transition(O, Transient_GETS_Last_Token) { |
| // WAIT FOR IT TO GO PERSISTENT |
| r_clearExclusive; |
| m_popRequestQueue; |
| } |
| |
| transition(O, Ack) { |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(O, Ack_All_Tokens, M) { |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(O, Data_Shared) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| |
| transition(O, {Writeback_Tokens, Writeback_Shared_Data}) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(O, Data_All_Tokens, M) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| n_popResponseQueue; |
| } |
| |
| transition(O, Writeback_All_Tokens, M) { |
| w_assertIncomingDataAndCacheDataMatch; |
| q_updateTokensFromResponse; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(O, L1_GETS) { |
| k_dataFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| transition(O, L1_GETS_Last_Token, I) { |
| k_dataOwnerFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| transition(O, L1_GETX, I) { |
| a_broadcastLocalRequest; |
| k_dataAndAllTokensFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| // Transitions from M |
| |
| transition(M, L2_Replacement, I) { |
| cc_dirtyReplacement; |
| rr_deallocateL2CacheBlock; |
| } |
| |
| // MRM_DEBUG: Give up all tokens even for GETS? ??? |
| transition(M, {Transient_GETX, Transient_GETS}, I) { |
| r_clearExclusive; |
| dd_sendDataWithAllTokens; |
| m_popRequestQueue; |
| } |
| |
| transition(M, {Persistent_GETS, Persistent_GETX}, I_L) { |
| ee_sendDataWithAllTokens; |
| l_popPersistentQueue; |
| } |
| |
| |
| transition(M, L1_GETS, O) { |
| k_dataFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| transition(M, L1_GETX, I) { |
| k_dataAndAllTokensFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| |
| //Transitions from locked states |
| |
| transition({I_L, S_L}, Ack) { |
| gg_bounceResponseToStarver; |
| n_popResponseQueue; |
| } |
| |
| transition({I_L, S_L}, {Data_Shared, Data_Owner, Data_All_Tokens}) { |
| gg_bounceResponseToStarver; |
| n_popResponseQueue; |
| } |
| |
| transition({I_L, S_L}, {Writeback_Tokens, Writeback_Shared_Data}) { |
| gg_bounceWBSharedToStarver; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition({I_L, S_L}, {Writeback_Owned, Writeback_All_Tokens}) { |
| gg_bounceWBOwnedToStarver; |
| h_updateFilterFromL1HintOrWB; |
| n_popResponseQueue; |
| } |
| |
| transition(S_L, L2_Replacement, I) { |
| c_cleanReplacement; |
| rr_deallocateL2CacheBlock; |
| } |
| |
| transition(I_L, L2_Replacement, I) { |
| rr_deallocateL2CacheBlock; |
| } |
| |
| transition(I_L, Own_Lock_or_Unlock, I) { |
| l_popPersistentQueue; |
| } |
| |
| transition(S_L, Own_Lock_or_Unlock, S) { |
| l_popPersistentQueue; |
| } |
| |
| transition({I_L, S_L}, {Transient_GETS_Last_Token, Transient_GETS, Transient_GETX}) { |
| r_clearExclusive; |
| m_popRequestQueue; |
| } |
| |
| transition(I_L, {L1_GETX, L1_GETS}) { |
| a_broadcastLocalRequest; |
| r_markNewSharer; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| transition(S_L, L1_GETX, I_L) { |
| a_broadcastLocalRequest; |
| tt_sendLocalAckWithCollectedTokens; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileMiss; |
| o_popL1RequestQueue; |
| } |
| |
| transition(S_L, L1_GETS) { |
| k_dataFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| transition(S_L, L1_GETS_Last_Token, I_L) { |
| k_dataFromL2CacheToL1Requestor; |
| r_markNewSharer; |
| r_setMRU; |
| uu_profileHit; |
| o_popL1RequestQueue; |
| } |
| |
| transition(S_L, Persistent_GETX, I_L) { |
| e_sendAckWithCollectedTokens; |
| l_popPersistentQueue; |
| } |
| |
| transition(S_L, {Persistent_GETS, Persistent_GETS_Last_Token}) { |
| l_popPersistentQueue; |
| } |
| |
| transition(I_L, {Persistent_GETX, Persistent_GETS}) { |
| l_popPersistentQueue; |
| } |
| } |