blob: 7c83478db58c2d2eab5b266ca8c976d48d543189 [file] [log] [blame]
/*
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
machine(MachineType:L1Cache, "MESI Directory L1 Cache CMP")
: Sequencer * sequencer;
CacheMemory * L1Icache;
CacheMemory * L1Dcache;
Prefetcher * prefetcher;
int l2_select_num_bits;
Cycles l1_request_latency := 2;
Cycles l1_response_latency := 2;
Cycles to_l2_latency := 1;
bool send_evictions;
bool enable_prefetch := "False";
// Message Queues
// From this node's L1 cache TO the network
// a local L1 -> this L2 bank, currently ordered with directory forwarded requests
MessageBuffer * requestFromL1Cache, network="To", virtual_network="0",
vnet_type="request";
// a local L1 -> this L2 bank
MessageBuffer * responseFromL1Cache, network="To", virtual_network="1",
vnet_type="response";
MessageBuffer * unblockFromL1Cache, network="To", virtual_network="2",
vnet_type="unblock";
// To this node's L1 cache FROM the network
// a L2 bank -> this L1
MessageBuffer * requestToL1Cache, network="From", virtual_network="2",
vnet_type="request";
// a L2 bank -> this L1
MessageBuffer * responseToL1Cache, network="From", virtual_network="1",
vnet_type="response";
// Request Buffer for prefetches
MessageBuffer * optionalQueue;
// Buffer for requests generated by the processor core.
MessageBuffer * mandatoryQueue;
{
// STATES
state_declaration(State, desc="Cache states", default="L1Cache_State_I") {
// Base states
NP, AccessPermission:Invalid, desc="Not present in either cache";
I, AccessPermission:Invalid, desc="a L1 cache entry Idle";
S, AccessPermission:Read_Only, desc="a L1 cache entry Shared";
E, AccessPermission:Read_Only, desc="a L1 cache entry Exclusive";
M, AccessPermission:Read_Write, desc="a L1 cache entry Modified", format="!b";
// Transient States
IS, AccessPermission:Busy, desc="L1 idle, issued GETS, have not seen response yet";
IM, AccessPermission:Busy, desc="L1 idle, issued GETX, have not seen response yet";
SM, AccessPermission:Read_Only, desc="L1 idle, issued GETX, have not seen response yet";
IS_I, AccessPermission:Busy, desc="L1 idle, issued GETS, saw Inv before data because directory doesn't block on GETS hit";
M_I, AccessPermission:Busy, desc="L1 replacing, waiting for ACK";
SINK_WB_ACK, AccessPermission:Busy, desc="This is to sink WB_Acks from L2";
// Transient States in which block is being prefetched
PF_IS, AccessPermission:Busy, desc="Issued GETS, have not seen response yet";
PF_IM, AccessPermission:Busy, desc="Issued GETX, have not seen response yet";
PF_SM, AccessPermission:Busy, desc="Issued GETX, received data, waiting for acks";
PF_IS_I, AccessPermission:Busy, desc="Issued GETs, saw inv before data";
}
// EVENTS
enumeration(Event, desc="Cache events") {
// L1 events
Load, desc="Load request from the home processor";
Ifetch, desc="I-fetch request from the home processor";
Store, desc="Store request from the home processor";
Inv, desc="Invalidate request from L2 bank";
// internal generated request
L1_Replacement, desc="L1 Replacement", format="!r";
PF_L1_Replacement, desc="Prefetch L1 Replacement", format="!pr";
// other requests
Fwd_GETX, desc="GETX from other processor";
Fwd_GETS, desc="GETS from other processor";
Fwd_GET_INSTR, desc="GET_INSTR from other processor";
Data, desc="Data for processor";
Data_Exclusive, desc="Data for processor";
DataS_fromL1, desc="data for GETS request, need to unblock directory";
Data_all_Acks, desc="Data for processor, all acks";
Ack, desc="Ack for processor";
Ack_all, desc="Last ack for processor";
WB_Ack, desc="Ack for replacement";
PF_Load, desc="load request from prefetcher";
PF_Ifetch, desc="instruction fetch request from prefetcher";
PF_Store, desc="exclusive load request from prefetcher";
}
// TYPES
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry" ) {
State CacheState, desc="cache state";
DataBlock DataBlk, desc="data for the block";
bool Dirty, default="false", desc="data is dirty";
bool isPrefetch, desc="Set if this block was prefetched and not yet accessed";
}
// TBE fields
structure(TBE, desc="...") {
Addr addr, desc="Physical address for this TBE";
State TBEState, desc="Transient state";
DataBlock DataBlk, desc="Buffer for the data block";
bool Dirty, default="false", desc="data is dirty";
bool isPrefetch, desc="Set if this was caused by a prefetch";
int pendingAcks, default="0", desc="number of pending acks";
}
structure(TBETable, external="yes") {
TBE lookup(Addr);
void allocate(Addr);
void deallocate(Addr);
bool isPresent(Addr);
}
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
Tick clockEdge();
Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
void unset_tbe();
void wakeUpBuffers(Addr a);
void profileMsgDelay(int virtualNetworkType, Cycles c);
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
State getState(TBE tbe, Entry cache_entry, Addr addr) {
assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
if(is_valid(tbe)) {
return tbe.TBEState;
} else if (is_valid(cache_entry)) {
return cache_entry.CacheState;
}
return State:NP;
}
void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
// MUST CHANGE
if(is_valid(tbe)) {
tbe.TBEState := state;
}
if (is_valid(cache_entry)) {
cache_entry.CacheState := state;
}
}
AccessPermission getAccessPermission(Addr addr) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
}
Entry cache_entry := getCacheEntry(addr);
if(is_valid(cache_entry)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(cache_entry.CacheState));
return L1Cache_State_to_permission(cache_entry.CacheState);
}
DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
return AccessPermission:NotPresent;
}
void functionalRead(Addr addr, Packet *pkt) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
}
}
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
}
num_functional_writes := num_functional_writes +
testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt);
return num_functional_writes;
}
void setAccessPermission(Entry cache_entry, Addr addr, State state) {
if (is_valid(cache_entry)) {
cache_entry.changePermission(L1Cache_State_to_permission(state));
}
}
Event mandatory_request_type_to_event(RubyRequestType type) {
if (type == RubyRequestType:LD) {
return Event:Load;
} else if (type == RubyRequestType:IFETCH) {
return Event:Ifetch;
} else if ((type == RubyRequestType:ST) || (type == RubyRequestType:ATOMIC)) {
return Event:Store;
} else {
error("Invalid RubyRequestType");
}
}
Event prefetch_request_type_to_event(RubyRequestType type) {
if (type == RubyRequestType:LD) {
return Event:PF_Load;
} else if (type == RubyRequestType:IFETCH) {
return Event:PF_Ifetch;
} else if ((type == RubyRequestType:ST) ||
(type == RubyRequestType:ATOMIC)) {
return Event:PF_Store;
} else {
error("Invalid RubyRequestType");
}
}
int getPendingAcks(TBE tbe) {
return tbe.pendingAcks;
}
out_port(requestL1Network_out, RequestMsg, requestFromL1Cache);
out_port(responseL1Network_out, ResponseMsg, responseFromL1Cache);
out_port(unblockNetwork_out, ResponseMsg, unblockFromL1Cache);
out_port(optionalQueue_out, RubyRequest, optionalQueue);
// Prefetch queue between the controller and the prefetcher
// As per Spracklen et al. (HPCA 2005), the prefetch queue should be
// implemented as a LIFO structure. The structure would allow for fast
// searches of all entries in the queue, not just the head msg. All
// msgs in the structure can be invalidated if a demand miss matches.
in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
if (optionalQueue_in.isReady(clockEdge())) {
peek(optionalQueue_in, RubyRequest) {
// Instruction Prefetch
if (in_msg.Type == RubyRequestType:IFETCH) {
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The block to be prefetched is already present in the
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1 cache. We should drop
// this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
Addr victim := L1Icache.cacheProbe(in_msg.LineAddress);
trigger(Event:PF_L1_Replacement,
victim, getL1ICacheEntry(victim), TBEs[victim]);
}
} else {
// Data prefetch
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The block to be prefetched is already present in the
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1. Just drop the prefetch
// request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in
// the L1 let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
Addr victim := L1Dcache.cacheProbe(in_msg.LineAddress);
trigger(Event:PF_L1_Replacement,
victim, getL1DCacheEntry(victim), TBEs[victim]);
}
}
}
}
}
// Response L1 Network - response msg to this L1 cache
in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
if (responseL1Network_in.isReady(clockEdge())) {
peek(responseL1Network_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
} else if(in_msg.Type == CoherenceResponseType:DATA) {
if ((getState(tbe, cache_entry, in_msg.addr) == State:IS ||
getState(tbe, cache_entry, in_msg.addr) == State:IS_I ||
getState(tbe, cache_entry, in_msg.addr) == State:PF_IS ||
getState(tbe, cache_entry, in_msg.addr) == State:PF_IS_I) &&
machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
trigger(Event:DataS_fromL1, in_msg.addr, cache_entry, tbe);
} else if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
trigger(Event:Data_all_Acks, in_msg.addr, cache_entry, tbe);
} else {
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
}
} else if (in_msg.Type == CoherenceResponseType:ACK) {
if ( (getPendingAcks(tbe) - in_msg.AckCount) == 0 ) {
trigger(Event:Ack_all, in_msg.addr, cache_entry, tbe);
} else {
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
}
} else if (in_msg.Type == CoherenceResponseType:WB_ACK) {
trigger(Event:WB_Ack, in_msg.addr, cache_entry, tbe);
} else {
error("Invalid L1 response type");
}
}
}
}
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
if(requestL1Network_in.isReady(clockEdge())) {
peek(requestL1Network_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX ||
in_msg.Type == CoherenceRequestType:UPGRADE) {
// upgrade transforms to GETX due to race
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GET_INSTR) {
trigger(Event:Fwd_GET_INSTR, in_msg.addr, cache_entry, tbe);
} else {
error("Invalid forwarded request type");
}
}
}
}
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
// Check if the line we want to evict is not locked
Addr addr := L1Icache.cacheProbe(in_msg.LineAddress);
check_on_cache_probe(mandatoryQueue_in, addr);
trigger(Event:L1_Replacement, addr,
getL1ICacheEntry(addr),
TBEs[addr]);
}
}
} else {
// *** DATA ACCESS ***
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
// Check if the line we want to evict is not locked
Addr addr := L1Dcache.cacheProbe(in_msg.LineAddress);
check_on_cache_probe(mandatoryQueue_in, addr);
trigger(Event:L1_Replacement, addr,
getL1DCacheEntry(addr),
TBEs[addr]);
}
}
}
}
}
}
void enqueuePrefetch(Addr address, RubyRequestType type) {
enqueue(optionalQueue_out, RubyRequest, 1) {
out_msg.LineAddress := address;
out_msg.Type := type;
out_msg.AccessMode := RubyAccessMode:Supervisor;
}
}
// ACTIONS
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(pa_issuePfGETS, "pa", desc="Issue prefetch GETS") {
peek(optionalQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(ai_issueGETINSTR, "ai", desc="Issue GETINSTR") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(pai_issuePfGETINSTR, "pai",
desc="Issue GETINSTR for prefetch request") {
peek(optionalQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GET_INSTR;
out_msg.Requestor := machineID;
out_msg.Destination.add(
mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
}
}
}
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
DPRINTF(RubySlicc, "%s\n", machineID);
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(pb_issuePfGETX, "pb", desc="Issue prefetch GETX") {
peek(optionalQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
DPRINTF(RubySlicc, "%s\n", machineID);
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(c_issueUPGRADE, "c", desc="Issue GETX") {
peek(mandatoryQueue_in, RubyRequest) {
enqueue(requestL1Network_out, RequestMsg, l1_request_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:UPGRADE;
out_msg.Requestor := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
DPRINTF(RubySlicc, "address: %#x, destination: %s\n",
address, out_msg.Destination);
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Prefetch := in_msg.Prefetch;
out_msg.AccessMode := in_msg.AccessMode;
}
}
}
action(d_sendDataToRequestor, "d", desc="send data to requestor") {
peek(requestL1Network_in, RequestMsg) {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(d2_sendDataToL2, "d2", desc="send data to the L2 cache because of M downgrade") {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
action(dt_sendDataToRequestor_fromTBE, "dt", desc="send data to requestor") {
peek(requestL1Network_in, RequestMsg) {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := tbe.DataBlk;
out_msg.Dirty := tbe.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(d2t_sendDataToL2_fromTBE, "d2t", desc="send data to the L2 cache") {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := tbe.DataBlk;
out_msg.Dirty := tbe.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
action(e_sendAckToRequestor, "e", desc="send invalidate ack to requestor (could be L2 or L1)") {
peek(requestL1Network_in, RequestMsg) {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(f_sendDataToL2, "f", desc="send data to the L2 cache") {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
}
action(ft_sendDataToL2_fromTBE, "ft", desc="send data to the L2 cache") {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := tbe.DataBlk;
out_msg.Dirty := tbe.Dirty;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Writeback_Data;
}
}
action(fi_sendInvAck, "fi", desc="send data to the L2 cache") {
peek(requestL1Network_in, RequestMsg) {
enqueue(responseL1Network_out, ResponseMsg, l1_response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Control;
out_msg.AckCount := 1;
}
}
}
action(forward_eviction_to_cpu, "\cc", desc="sends eviction information to the processor") {
if (send_evictions) {
DPRINTF(RubySlicc, "Sending invalidation for %#x to the CPU\n", address);
sequencer.evictionCallback(address);
}
}
action(g_issuePUTX, "g", desc="send data to the L2 cache") {
enqueue(requestL1Network_out, RequestMsg, l1_response_latency) {
assert(is_valid(cache_entry));
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.Dirty := cache_entry.Dirty;
out_msg.Requestor:= machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
if (cache_entry.Dirty) {
out_msg.MessageSize := MessageSizeType:Writeback_Data;
} else {
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
}
action(j_sendUnblock, "j", desc="send unblock to the L2 cache") {
enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Response_Control;
DPRINTF(RubySlicc, "%#x\n", address);
}
}
action(jj_sendExclusiveUnblock, "\j", desc="send unblock to the L2 cache") {
enqueue(unblockNetwork_out, ResponseMsg, to_l2_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:EXCLUSIVE_UNBLOCK;
out_msg.Sender := machineID;
out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
l2_select_low_bit, l2_select_num_bits, intToID(0)));
out_msg.MessageSize := MessageSizeType:Response_Control;
DPRINTF(RubySlicc, "%#x\n", address);
}
}
action(dg_invalidate_sc, "dg",
desc="Invalidate store conditional as the cache lost permissions") {
sequencer.invalidateSC(address);
}
action(h_load_hit, "hd",
desc="Notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
L1Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
L1Icache.setMRU(address);
L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
L1Icache.setMRU(address);
L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
action(i_allocateTBE, "i", desc="Allocate TBE (isPrefetch=0, number of invalidates=0)") {
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.isPrefetch := false;
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
Tick delay := requestL1Network_in.dequeue(clockEdge());
profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
Tick delay := responseL1Network_in.dequeue(clockEdge());
profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
TBEs.deallocate(address);
unset_tbe();
}
action(u_writeDataToL1Cache, "u", desc="Write data to cache") {
peek(responseL1Network_in, ResponseMsg) {
assert(is_valid(cache_entry));
cache_entry.DataBlk := in_msg.DataBlk;
cache_entry.Dirty := in_msg.Dirty;
}
}
action(q_updateAckCount, "q", desc="Update ack count") {
peek(responseL1Network_in, ResponseMsg) {
assert(is_valid(tbe));
tbe.pendingAcks := tbe.pendingAcks - in_msg.AckCount;
APPEND_TRANSITION_COMMENT(in_msg.AckCount);
APPEND_TRANSITION_COMMENT(" p: ");
APPEND_TRANSITION_COMMENT(tbe.pendingAcks);
}
}
action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
if (L1Dcache.isTagPresent(address)) {
L1Dcache.deallocate(address);
} else {
L1Icache.deallocate(address);
}
unset_cache_entry();
}
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
set_cache_entry(L1Dcache.allocate(address, new Entry));
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
set_cache_entry(L1Icache.allocate(address, new Entry));
}
}
action(z_stallAndWaitMandatoryQueue, "\z", desc="Stall and wait the L1 mandatory request queue") {
stall_and_wait(mandatoryQueue_in, address);
}
action(z_stallAndWaitOptionalQueue, "\pz", desc="Stall and wait the L1 prefetch request queue") {
stall_and_wait(optionalQueue_in, address);
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
wakeUpBuffers(address);
}
action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
++L1Icache.demand_misses;
}
action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
++L1Icache.demand_hits;
}
action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
++L1Dcache.demand_misses;
}
action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
++L1Dcache.demand_hits;
}
action(po_observeHit, "\ph", desc="Inform the prefetcher about the hit") {
peek(mandatoryQueue_in, RubyRequest) {
if (cache_entry.isPrefetch) {
prefetcher.observePfHit(in_msg.LineAddress);
cache_entry.isPrefetch := false;
}
}
}
action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
peek(mandatoryQueue_in, RubyRequest) {
if (enable_prefetch) {
prefetcher.observeMiss(in_msg.LineAddress, in_msg.Type);
}
}
}
action(ppm_observePfMiss, "\ppm",
desc="Inform the prefetcher about the partial miss") {
peek(mandatoryQueue_in, RubyRequest) {
prefetcher.observePfMiss(in_msg.LineAddress);
}
}
action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
optionalQueue_in.dequeue(clockEdge());
}
action(mp_markPrefetched, "mp", desc="Set the isPrefetch flag") {
assert(is_valid(cache_entry));
cache_entry.isPrefetch := true;
}
//*****************************************************
// TRANSITIONS
//*****************************************************
// Transitions for Load/Store/Replacement/WriteBack from transient states
transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK}, {Load, Ifetch, Store, L1_Replacement}) {
z_stallAndWaitMandatoryQueue;
}
transition({PF_IS, PF_IS_I}, {Store, L1_Replacement}) {
z_stallAndWaitMandatoryQueue;
}
transition({PF_IM, PF_SM}, {Load, Ifetch, L1_Replacement}) {
z_stallAndWaitMandatoryQueue;
}
transition({IS, IM, IS_I, M_I, SM, SINK_WB_ACK, PF_IS, PF_IS_I, PF_IM, PF_SM}, PF_L1_Replacement) {
z_stallAndWaitOptionalQueue;
}
// Transitions from Idle
transition({NP,I}, {L1_Replacement, PF_L1_Replacement}) {
ff_deallocateL1CacheBlock;
}
transition({S,E,M,IS,IM,SM,IS_I,PF_IS_I,M_I,SINK_WB_ACK,PF_IS,PF_IM},
{PF_Load, PF_Store, PF_Ifetch}) {
pq_popPrefetchQueue;
}
transition({NP,I}, Load, IS) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
uu_profileDataMiss;
po_observeMiss;
k_popMandatoryQueue;
}
transition({NP,I}, PF_Load, PF_IS) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
pa_issuePfGETS;
pq_popPrefetchQueue;
}
transition(PF_IS, Load, IS) {
uu_profileDataMiss;
ppm_observePfMiss;
k_popMandatoryQueue;
}
transition(PF_IS_I, Load, IS_I) {
uu_profileDataMiss;
ppm_observePfMiss;
k_popMandatoryQueue;
}
transition(PF_IS_I, Ifetch, IS_I) {
uu_profileInstMiss;
ppm_observePfMiss;
k_popMandatoryQueue;
}
transition({NP,I}, Ifetch, IS) {
pp_allocateL1ICacheBlock;
i_allocateTBE;
ai_issueGETINSTR;
uu_profileInstMiss;
po_observeMiss;
k_popMandatoryQueue;
}
transition({NP,I}, PF_Ifetch, PF_IS) {
pp_allocateL1ICacheBlock;
i_allocateTBE;
pai_issuePfGETINSTR;
pq_popPrefetchQueue;
}
// We proactively assume that the prefetch is in to
// the instruction cache
transition(PF_IS, Ifetch, IS) {
uu_profileDataMiss;
ppm_observePfMiss;
k_popMandatoryQueue;
}
transition({NP,I}, Store, IM) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
uu_profileDataMiss;
po_observeMiss;
k_popMandatoryQueue;
}
transition({NP,I}, PF_Store, PF_IM) {
oo_allocateL1DCacheBlock;
i_allocateTBE;
pb_issuePfGETX;
pq_popPrefetchQueue;
}
transition(PF_IM, Store, IM) {
uu_profileDataMiss;
ppm_observePfMiss;
k_popMandatoryQueue;
}
transition(PF_SM, Store, SM) {
uu_profileDataMiss;
ppm_observePfMiss;
k_popMandatoryQueue;
}
transition({NP, I}, Inv) {
fi_sendInvAck;
l_popRequestQueue;
}
// Transitions from Shared
transition({S,E,M}, Load) {
h_load_hit;
uu_profileDataHit;
po_observeHit;
k_popMandatoryQueue;
}
transition({S,E,M}, Ifetch) {
h_ifetch_hit;
uu_profileInstHit;
po_observeHit;
k_popMandatoryQueue;
}
transition(S, Store, SM) {
i_allocateTBE;
c_issueUPGRADE;
uu_profileDataMiss;
k_popMandatoryQueue;
}
transition(S, {L1_Replacement, PF_L1_Replacement}, I) {
forward_eviction_to_cpu;
ff_deallocateL1CacheBlock;
}
transition(S, Inv, I) {
forward_eviction_to_cpu;
fi_sendInvAck;
l_popRequestQueue;
}
// Transitions from Exclusive
transition({E,M}, Store, M) {
hh_store_hit;
uu_profileDataHit;
po_observeHit;
k_popMandatoryQueue;
}
transition(E, {L1_Replacement, PF_L1_Replacement}, M_I) {
// silent E replacement??
forward_eviction_to_cpu;
i_allocateTBE;
g_issuePUTX; // send data, but hold in case forwarded request
ff_deallocateL1CacheBlock;
}
transition(E, Inv, I) {
// don't send data
forward_eviction_to_cpu;
fi_sendInvAck;
l_popRequestQueue;
}
transition(E, Fwd_GETX, I) {
forward_eviction_to_cpu;
d_sendDataToRequestor;
l_popRequestQueue;
}
transition(E, {Fwd_GETS, Fwd_GET_INSTR}, S) {
d_sendDataToRequestor;
d2_sendDataToL2;
l_popRequestQueue;
}
// Transitions from Modified
transition(M, {L1_Replacement, PF_L1_Replacement}, M_I) {
forward_eviction_to_cpu;
i_allocateTBE;
g_issuePUTX; // send data, but hold in case forwarded request
ff_deallocateL1CacheBlock;
}
transition(M_I, WB_Ack, I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(M, Inv, I) {
forward_eviction_to_cpu;
f_sendDataToL2;
l_popRequestQueue;
}
transition(M_I, Inv, SINK_WB_ACK) {
ft_sendDataToL2_fromTBE;
l_popRequestQueue;
}
transition(M, Fwd_GETX, I) {
forward_eviction_to_cpu;
d_sendDataToRequestor;
l_popRequestQueue;
}
transition(M, {Fwd_GETS, Fwd_GET_INSTR}, S) {
d_sendDataToRequestor;
d2_sendDataToL2;
l_popRequestQueue;
}
transition(M_I, Fwd_GETX, SINK_WB_ACK) {
dt_sendDataToRequestor_fromTBE;
l_popRequestQueue;
}
transition(M_I, {Fwd_GETS, Fwd_GET_INSTR}, SINK_WB_ACK) {
dt_sendDataToRequestor_fromTBE;
d2t_sendDataToL2_fromTBE;
l_popRequestQueue;
}
// Transitions from IS
transition({IS, IS_I}, Inv, IS_I) {
fi_sendInvAck;
l_popRequestQueue;
}
transition({PF_IS, PF_IS_I}, Inv, PF_IS_I) {
fi_sendInvAck;
l_popRequestQueue;
}
transition(IS, Data_all_Acks, S) {
u_writeDataToL1Cache;
hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_IS, Data_all_Acks, S) {
u_writeDataToL1Cache;
s_deallocateTBE;
mp_markPrefetched;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS_I, Data_all_Acks, I) {
u_writeDataToL1Cache;
hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_IS_I, Data_all_Acks, I) {
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS, DataS_fromL1, S) {
u_writeDataToL1Cache;
j_sendUnblock;
hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_IS, DataS_fromL1, S) {
u_writeDataToL1Cache;
j_sendUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS_I, DataS_fromL1, I) {
u_writeDataToL1Cache;
j_sendUnblock;
hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_IS_I, DataS_fromL1, I) {
j_sendUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// directory is blocked when sending exclusive data
transition(IS_I, Data_Exclusive, E) {
u_writeDataToL1Cache;
hx_load_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// directory is blocked when sending exclusive data
transition(PF_IS_I, Data_Exclusive, E) {
u_writeDataToL1Cache;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(IS, Data_Exclusive, E) {
u_writeDataToL1Cache;
hx_load_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_IS, Data_Exclusive, E) {
u_writeDataToL1Cache;
jj_sendExclusiveUnblock;
s_deallocateTBE;
mp_markPrefetched;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// Transitions from IM
transition(IM, Inv, IM) {
fi_sendInvAck;
l_popRequestQueue;
}
transition({PF_IM, PF_SM}, Inv, PF_IM) {
fi_sendInvAck;
l_popRequestQueue;
}
transition(IM, Data, SM) {
u_writeDataToL1Cache;
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(PF_IM, Data, PF_SM) {
u_writeDataToL1Cache;
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(IM, Data_all_Acks, M) {
u_writeDataToL1Cache;
hhx_store_hit;
jj_sendExclusiveUnblock;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_IM, Data_all_Acks, M) {
u_writeDataToL1Cache;
jj_sendExclusiveUnblock;
s_deallocateTBE;
mp_markPrefetched;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
// transitions from SM
transition(SM, Inv, IM) {
forward_eviction_to_cpu;
fi_sendInvAck;
dg_invalidate_sc;
l_popRequestQueue;
}
transition({SM, IM, PF_SM, PF_IM}, Ack) {
q_updateAckCount;
o_popIncomingResponseQueue;
}
transition(SM, Ack_all, M) {
jj_sendExclusiveUnblock;
hhx_store_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(PF_SM, Ack_all, M) {
jj_sendExclusiveUnblock;
s_deallocateTBE;
mp_markPrefetched;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(SINK_WB_ACK, Inv){
fi_sendInvAck;
l_popRequestQueue;
}
transition(SINK_WB_ACK, WB_Ack, I){
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
}
}