blob: 2e935de666ad7d18f91830bcec1412b6029f085f [file] [log] [blame]
/*
* Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
machine(MachineType:Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory;
Cycles to_mem_ctrl_latency := 1;
Cycles directory_latency := 6;
MessageBuffer * requestToDir, network="From", virtual_network="0",
vnet_type="request";
MessageBuffer * responseToDir, network="From", virtual_network="1",
vnet_type="response";
MessageBuffer * responseFromDir, network="To", virtual_network="1",
vnet_type="response";
MessageBuffer * responseFromMemory;
{
// STATES
state_declaration(State, desc="Directory states", default="Directory_State_I") {
// Base states
I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
IM, AccessPermission:Busy, desc="Intermediate State I>M";
MI, AccessPermission:Busy, desc="Intermediate State M>I";
M_DRD, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
M_DRDI, AccessPermission:Busy, desc="Intermediate State when there is a dma read";
M_DWR, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
M_DWRI, AccessPermission:Busy, desc="Intermediate State when there is a dma write";
}
// Events
enumeration(Event, desc="Directory events") {
Fetch, desc="A memory fetch arrives";
Data, desc="writeback data arrives";
Memory_Data, desc="Fetched data from memory arrives";
Memory_Ack, desc="Writeback Ack from memory arrives";
//added by SS for dma
DMA_READ, desc="A DMA Read memory request";
DMA_WRITE, desc="A DMA Write memory request";
CleanReplacement, desc="Clean Replacement in L2 cache";
}
// TYPES
// DirectoryEntry
structure(Entry, desc="...", interface="AbstractCacheEntry", main="false") {
State DirectoryState, desc="Directory state";
MachineID Owner;
}
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Addr PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
DataBlock DataBlk, desc="Data to be written (DMA write only)";
int Len, desc="...";
MachineID Requestor, desc="The DMA engine that sent the request";
}
structure(TBETable, external="yes") {
TBE lookup(Addr);
void allocate(Addr);
void deallocate(Addr);
bool isPresent(Addr);
bool functionalRead(Packet *pkt);
int functionalWrite(Packet *pkt);
}
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
Tick clockEdge();
Tick cyclesToTicks(Cycles c);
void set_tbe(TBE tbe);
void unset_tbe();
void wakeUpBuffers(Addr a);
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
dir_entry := static_cast(Entry, "pointer",
directory.allocate(addr, new Entry));
return dir_entry;
}
State getState(TBE tbe, Addr addr) {
if (is_valid(tbe)) {
return tbe.TBEState;
} else if (directory.isPresent(addr)) {
return getDirectoryEntry(addr).DirectoryState;
} else {
return State:I;
}
}
void setState(TBE tbe, Addr addr, State state) {
if (is_valid(tbe)) {
tbe.TBEState := state;
}
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).DirectoryState := state;
}
}
AccessPermission getAccessPermission(Addr addr) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
}
if(directory.isPresent(addr)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState));
return Directory_State_to_permission(getDirectoryEntry(addr).DirectoryState);
}
DPRINTF(RubySlicc, "%s\n", AccessPermission:NotPresent);
return AccessPermission:NotPresent;
}
void functionalRead(Addr addr, Packet *pkt) {
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
functionalMemoryRead(pkt);
}
}
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
}
num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
return num_functional_writes;
}
void setAccessPermission(Addr addr, State state) {
if (directory.isPresent(addr)) {
getDirectoryEntry(addr).changePermission(Directory_State_to_permission(state));
}
}
bool isGETRequest(CoherenceRequestType type) {
return (type == CoherenceRequestType:GETS) ||
(type == CoherenceRequestType:GET_INSTR) ||
(type == CoherenceRequestType:GETX);
}
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
TBEs[makeLineAddress(in_msg.addr)]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
}
}
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
}
}
}
}
// Actions
action(a_sendAck, "a", desc="Send ack to L2") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Sender);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:MEMORY_DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false;
out_msg.MessageSize := MessageSizeType:Response_Data;
Entry e := getDirectoryEntry(in_msg.addr);
e.Owner := in_msg.OriginalRequestorMachId;
}
}
}
// Actions
action(aa_sendAck, "aa", desc="Send ack to L2") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:MEMORY_ACK;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.OriginalRequestorMachId);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
requestNetwork_in.dequeue(clockEdge());
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
responseNetwork_in.dequeue(clockEdge());
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
memQueue_in.dequeue(clockEdge());
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
wakeUpBuffers(address);
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
queueMemoryWrite(in_msg.Sender, address, to_mem_ctrl_latency,
in_msg.DataBlk);
}
}
//added by SS for dma
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
}
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
requestNetwork_in.dequeue(clockEdge());
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
peek(memQueue_in, MemoryMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(tbe.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(qw_queueMemoryWBRequest_partial, "qwp",
desc="Queue off-chip writeback request") {
peek(requestNetwork_in, RequestMsg) {
queueMemoryWritePartial(machineID, address, to_mem_ctrl_latency,
in_msg.DataBlk, in_msg.Len);
}
}
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Destination.add(tbe.Requestor);
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(z_stallAndWaitRequest, "z", desc="recycle request queue") {
stall_and_wait(requestNetwork_in, address);
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
peek(requestNetwork_in, RequestMsg) {
enqueue(responseNetwork_out, ResponseMsg, directory_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:INV;
out_msg.Sender := machineID;
out_msg.Destination.add(getDirectoryEntry(address).Owner);
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
}
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
peek(responseNetwork_in, ResponseMsg) {
enqueue(responseNetwork_out, ResponseMsg, to_mem_ctrl_latency) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.DataBlk := in_msg.DataBlk; // we send the entire data block and rely on the dma controller to split it up if need be
out_msg.Destination.add(tbe.Requestor);
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
tbe.Requestor := in_msg.Requestor;
}
}
action(qw_queueMemoryWBRequest_partialTBE, "qwt",
desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
queueMemoryWritePartial(in_msg.Sender, tbe.PhysicalAddress,
to_mem_ctrl_latency, tbe.DataBlk, tbe.Len);
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
TBEs.deallocate(address);
unset_tbe();
}
// TRANSITIONS
transition(I, Fetch, IM) {
qf_queueMemoryFetchRequest;
j_popIncomingRequestQueue;
}
transition(M, Fetch) {
inv_sendCacheInvalidate;
z_stallAndWaitRequest;
}
transition(IM, Memory_Data, M) {
d_sendData;
l_popMemQueue;
kd_wakeUpDependents;
}
//added by SS
transition(M, CleanReplacement, I) {
a_sendAck;
k_popIncomingResponseQueue;
kd_wakeUpDependents;
}
transition(M, Data, MI) {
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(MI, Memory_Ack, I) {
aa_sendAck;
l_popMemQueue;
kd_wakeUpDependents;
}
//added by SS for dma support
transition(I, DMA_READ, ID) {
v_allocateTBE;
qf_queueMemoryFetchRequestDMA;
j_popIncomingRequestQueue;
}
transition(ID, Memory_Data, I) {
dr_sendDMAData;
w_deallocateTBE;
l_popMemQueue;
kd_wakeUpDependents;
}
transition(I, DMA_WRITE, ID_W) {
v_allocateTBE;
qw_queueMemoryWBRequest_partial;
j_popIncomingRequestQueue;
}
transition(ID_W, Memory_Ack, I) {
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
kd_wakeUpDependents;
}
transition({ID, ID_W, M_DRDI, M_DWRI, IM, MI}, {Fetch, Data} ) {
z_stallAndWaitRequest;
}
transition({ID, ID_W, M_DRD, M_DRDI, M_DWR, M_DWRI, IM, MI}, {DMA_WRITE, DMA_READ} ) {
zz_recycleDMAQueue;
}
transition(M, DMA_READ, M_DRD) {
v_allocateTBE;
inv_sendCacheInvalidate;
j_popIncomingRequestQueue;
}
transition(M_DRD, Data, M_DRDI) {
drp_sendDMAData;
w_deallocateTBE;
qw_queueMemoryWBRequest;
k_popIncomingResponseQueue;
}
transition(M_DRDI, Memory_Ack, I) {
aa_sendAck;
l_popMemQueue;
kd_wakeUpDependents;
}
transition(M, DMA_WRITE, M_DWR) {
v_allocateTBE;
inv_sendCacheInvalidate;
j_popIncomingRequestQueue;
}
transition(M_DWR, Data, M_DWRI) {
qw_queueMemoryWBRequest_partialTBE;
k_popIncomingResponseQueue;
}
transition(M_DWRI, Memory_Ack, I) {
aa_sendAck;
da_sendDMAAck;
w_deallocateTBE;
l_popMemQueue;
kd_wakeUpDependents;
}
}