| /* |
| * Copyright (c) 2021 ARM Limited |
| * All rights reserved |
| * |
| * The license below extends only to copyright in the software and shall |
| * not be construed as granting a license to any other intellectual |
| * property including but not limited to intellectual property relating |
| * to a hardware implementation of the functionality of the software |
| * licensed hereunder. You may use the software subject to the license |
| * terms below provided that you ensure that this notice is replicated |
| * unmodified and in its entirety in all distributions of the software, |
| * modified or unmodified, in source code or in binary form. |
| * |
| * Redistribution and use in source and binary forms, with or without |
| * modification, are permitted provided that the following conditions are |
| * met: redistributions of source code must retain the above copyright |
| * notice, this list of conditions and the following disclaimer; |
| * redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution; |
| * neither the name of the copyright holders nor the names of its |
| * contributors may be used to endorse or promote products derived from |
| * this software without specific prior written permission. |
| * |
| * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ |
| |
| |
| //////////////////////////////////////////////////////////////////////////// |
| // CHI-cache actions definitions |
| //////////////////////////////////////////////////////////////////////////// |
| |
| action(AllocateTBE_Request, desc="") { |
| if (storTBEs.areNSlotsAvailable(1)) { |
| // reserve a slot for this request |
| storTBEs.incrementReserved(); |
| |
| // Move request to rdy queue |
| peek(reqInPort, CHIRequestMsg) { |
| enqueue(reqRdyOutPort, CHIRequestMsg, allocation_latency) { |
| assert(in_msg.addr == address); |
| assert(in_msg.is_local_pf == false); |
| out_msg := in_msg; |
| } |
| } |
| |
| } else { |
| // we don't have resources to track this request; enqueue a retry |
| peek(reqInPort, CHIRequestMsg) { |
| assert(in_msg.allowRetry); |
| enqueue(retryTriggerOutPort, RetryTriggerMsg, 0) { |
| out_msg.addr := in_msg.addr; |
| out_msg.event := Event:SendRetryAck; |
| out_msg.retryDest := in_msg.requestor; |
| retryQueue.emplace(in_msg.addr,in_msg.requestor); |
| } |
| } |
| } |
| |
| reqInPort.dequeue(clockEdge()); |
| } |
| |
| action(AllocateTBE_Request_WithCredit, desc="") { |
| // TBE slot already reserved |
| // Move request to rdy queue |
| peek(reqInPort, CHIRequestMsg) { |
| assert(in_msg.allowRetry == false); |
| enqueue(reqRdyOutPort, CHIRequestMsg, allocation_latency) { |
| assert(in_msg.addr == address); |
| out_msg := in_msg; |
| } |
| } |
| reqInPort.dequeue(clockEdge()); |
| } |
| |
| action(AllocateTBE_Snoop, desc="") { |
| // No retry for snoop requests; just create resource stall |
| check_allocate(storSnpTBEs); |
| |
| storSnpTBEs.incrementReserved(); |
| |
| // Move request to rdy queue |
| peek(snpInPort, CHIRequestMsg) { |
| enqueue(snpRdyOutPort, CHIRequestMsg, allocation_latency) { |
| assert(in_msg.addr == address); |
| out_msg := in_msg; |
| } |
| |
| // also remove snoop source from waiting retry destinations to prevent |
| // deadlocks in which this snoop is blocked by a transaction that needs to |
| // send a request to the snoop destination before going to BUSY_INTR, |
| // but the destination needs the snoop to complete before sending retry |
| // credit |
| destsWaitingRetry.remove(in_msg.requestor); |
| } |
| snpInPort.dequeue(clockEdge()); |
| } |
| |
| action(AllocateTBE_SeqRequest, desc="") { |
| // No retry for sequencer requests; just create resource stall |
| check_allocate(storTBEs); |
| |
| // reserve a slot for this request |
| storTBEs.incrementReserved(); |
| |
| // Move request to rdy queue |
| peek(seqInPort, RubyRequest) { |
| enqueue(reqRdyOutPort, CHIRequestMsg, allocation_latency) { |
| out_msg.addr := in_msg.LineAddress; |
| assert((in_msg.Size > 0) && (in_msg.Size <= blockSize)); |
| out_msg.accAddr := in_msg.PhysicalAddress; |
| out_msg.accSize := in_msg.Size; |
| out_msg.requestor := machineID; |
| out_msg.fwdRequestor := machineID; |
| out_msg.seqReq := in_msg.getRequestPtr(); |
| out_msg.isSeqReqValid := true; |
| assert(in_msg.Prefetch == PrefetchBit:No); |
| out_msg.is_local_pf := false; |
| out_msg.is_remote_pf := false; |
| |
| if ((in_msg.Type == RubyRequestType:LD) || |
| (in_msg.Type == RubyRequestType:IFETCH)) { |
| out_msg.type := CHIRequestType:Load; |
| } else if (in_msg.Type == RubyRequestType:ST) { |
| if (in_msg.Size == blockSize) { |
| out_msg.type := CHIRequestType:StoreLine; |
| } else { |
| out_msg.type := CHIRequestType:Store; |
| } |
| } else { |
| error("Invalid RubyRequestType"); |
| } |
| } |
| } |
| seqInPort.dequeue(clockEdge()); |
| } |
| |
| action(AllocateTBE_PfRequest, desc="Allocate TBE for prefetch request") { |
| // No retry for prefetch requests; just create resource stall |
| check_allocate(storTBEs); |
| |
| // reserve a slot for this request |
| storTBEs.incrementReserved(); |
| |
| // Move request to rdy queue |
| peek(pfInPort, RubyRequest) { |
| enqueue(reqRdyOutPort, CHIRequestMsg, 0) { |
| out_msg.addr := in_msg.LineAddress; |
| assert((in_msg.Size > 0) && (in_msg.Size <= blockSize)); |
| out_msg.accAddr := in_msg.PhysicalAddress; |
| out_msg.accSize := in_msg.Size; |
| out_msg.requestor := machineID; |
| out_msg.fwdRequestor := machineID; |
| out_msg.seqReq := in_msg.getRequestPtr(); |
| out_msg.isSeqReqValid := true; |
| assert(in_msg.Prefetch != PrefetchBit:No); |
| out_msg.is_local_pf := true; |
| out_msg.is_remote_pf := false; |
| |
| if (in_msg.Type == RubyRequestType:LD) { |
| out_msg.type := CHIRequestType:Load; |
| } else if (in_msg.Type == RubyRequestType:ST) { |
| error("CHI is not supporting prefetch store requests"); |
| } else { |
| error("Invalid RubyRequestType"); |
| } |
| } |
| } |
| pfInPort.dequeue(clockEdge()); |
| } |
| |
| action(Initiate_Request, desc="") { |
| State initial := getState(tbe, cache_entry, address); |
| bool was_retried := false; |
| peek(reqRdyPort, CHIRequestMsg) { |
| set_tbe(allocateRequestTBE(address, in_msg)); |
| // only a msg that was already retried doesn't allow a retry |
| was_retried := in_msg.allowRetry == false; |
| } |
| DirEntry dir_entry := getDirEntry(address); |
| copyCacheAndDir(cache_entry, dir_entry, tbe, initial); |
| |
| tbe.use_DMT := is_HN && enable_DMT; |
| tbe.use_DCT := enable_DCT; |
| |
| bool alloc_entry := needCacheEntry(tbe.reqType, |
| cache_entry, dir_entry, |
| tbe.is_local_pf); |
| bool dealloc_entry := needDeallocCacheEntry(tbe.reqType); |
| assert((alloc_entry && dealloc_entry) == false); |
| |
| // always drops any data when not caching it or when this transaction |
| // requires deallocation |
| tbe.dataToBeInvalid := dealloc_entry || |
| (is_invalid(cache_entry) && (alloc_entry == false)); |
| tbe.doCacheFill := alloc_entry || is_valid(cache_entry); |
| |
| // model the initial tag array read |
| tbe.actions.pushNB(Event:TagArrayRead); |
| |
| incomingTransactionStart(address, curTransitionEvent(), initial, was_retried); |
| } |
| |
| action(Initiate_Request_Stale, desc="") { |
| State initial := getState(tbe, cache_entry, address); |
| bool was_retried := false; |
| peek(reqRdyPort, CHIRequestMsg) { |
| set_tbe(allocateRequestTBE(address, in_msg)); |
| was_retried := in_msg.allowRetry == false; |
| } |
| copyCacheAndDir(cache_entry, getDirEntry(address), tbe, initial); |
| incomingTransactionStart(address, curTransitionEvent(), initial, was_retried); |
| } |
| |
| action(Initiate_Snoop, desc="") { |
| State initial := getState(tbe, cache_entry, address); |
| peek(snpRdyPort, CHIRequestMsg) { |
| set_tbe(allocateSnoopTBE(address, in_msg)); |
| } |
| copyCacheAndDir(cache_entry, getDirEntry(address), tbe, initial); |
| |
| // if we end up with valid data drop it if no entry allocated |
| tbe.dataToBeInvalid := is_invalid(cache_entry); |
| |
| // model the initial tag array read |
| tbe.actions.pushNB(Event:TagArrayRead); |
| |
| incomingTransactionStart(address, curTransitionEvent(), initial, false); |
| } |
| |
| action(Initiate_Snoop_Hazard, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.is_req_tbe || tbe.is_repl_tbe); |
| |
| // Switch to the new snoop TBE |
| TBE prev_tbe := tbe; |
| peek(snpRdyPort, CHIRequestMsg) { |
| set_tbe(allocateSnoopTBE(address, in_msg)); |
| } |
| assert(tbe.is_snp_tbe); |
| if (prev_tbe.is_req_tbe) { |
| assert(prev_tbe.is_repl_tbe == false); |
| tbe.is_req_hazard := true; |
| } else { |
| assert(prev_tbe.is_repl_tbe); |
| tbe.is_repl_hazard := true; |
| } |
| |
| // Use state from prev TBE |
| tbe.pendReqType := prev_tbe.pendReqType; |
| copyCacheAndDirTBEs(prev_tbe, tbe); |
| tbe.wakeup_pending_req := prev_tbe.wakeup_pending_req; |
| tbe.wakeup_pending_snp := prev_tbe.wakeup_pending_snp; |
| tbe.wakeup_pending_tgr := prev_tbe.wakeup_pending_tgr; |
| } |
| |
| action(RestoreFromHazard, desc="") { |
| TBE hazard_tbe := getHazardTBE(tbe); |
| |
| // update |
| setDataToBeStates(tbe); |
| |
| copyCacheAndDirTBEs(tbe, hazard_tbe); |
| hazard_tbe.wakeup_pending_req := tbe.wakeup_pending_req; |
| hazard_tbe.wakeup_pending_snp := tbe.wakeup_pending_snp; |
| hazard_tbe.wakeup_pending_tgr := tbe.wakeup_pending_tgr; |
| |
| deallocateSnpTBE(tbe); |
| set_tbe(hazard_tbe); |
| |
| // if the pending request is a WB or Evict then it becomes a stale request |
| // if data is no longer in the expected state |
| if (tbe.pendReqType == CHIRequestType:WriteBackFull) { |
| tbe.is_stale := (tbe.dataValid && tbe.dataDirty) == false; |
| } else if (tbe.pendReqType == CHIRequestType:WriteCleanFull) { |
| tbe.is_stale := (tbe.dataValid && tbe.dataDirty) == false; |
| } else if (hazard_tbe.pendReqType == CHIRequestType:WriteEvictFull) { |
| tbe.is_stale := (tbe.dataValid && tbe.dataUnique) == false; |
| } else if (hazard_tbe.pendReqType == CHIRequestType:Evict) { |
| tbe.is_stale := tbe.dataValid == false; |
| } |
| |
| // a pending action from the original request may have been stalled during |
| // the hazard and needs to wakeup up now |
| wakeupPendingTgrs(tbe); |
| } |
| |
| action(Initiate_Replacement, desc="") { |
| assert(is_invalid(tbe)); |
| State initial := getState(tbe, cache_entry, address); |
| if (unify_repl_TBEs) { |
| peek(replTriggerInPort, ReplacementMsg) { |
| set_tbe(allocateReplacementTBEOnSlot(address, in_msg.slot)); |
| DPRINTF(RubySlicc, "Allocated replacement TBE on slot %d\n", tbe.storSlot); |
| } |
| } else { |
| set_tbe(allocateReplacementTBE(address)); |
| DPRINTF(RubySlicc, "Allocated replacement TBE on new slot %d\n", tbe.storSlot); |
| } |
| copyCacheAndDir(cache_entry, getDirEntry(address), tbe, initial); |
| |
| // model the initial tag array read |
| tbe.actions.pushNB(Event:TagArrayRead); |
| |
| incomingTransactionStart(address, curTransitionEvent(), initial, false); |
| } |
| |
| |
| |
| action(StallRequest, desc="") { |
| // was stalled because of an existing request |
| assert(is_valid(tbe)); |
| assert(tbe.addr == address); |
| // tracks pending |
| tbe.wakeup_pending_req := true; |
| stall_and_wait(reqRdyPort, address); |
| } |
| |
| action(StallSnoop, desc="") { |
| // was stalled because of an existing request |
| assert(is_valid(tbe)); |
| assert(tbe.addr == address); |
| // tracks pending |
| tbe.wakeup_pending_snp := true; |
| stall_and_wait(snpRdyPort, address); |
| } |
| |
| action(StallLocalEviction, desc="") { |
| // was stalled because of an existing request |
| assert(is_valid(tbe)); |
| assert(tbe.addr == address); |
| |
| // Just pop the queue and When this transaction finishes wake-up the original |
| // msgs that caused this eviction |
| tbe.wakeup_pending_tgr := true; |
| replTriggerInPort.dequeue(clockEdge()); |
| } |
| |
| action(StallSnoop_NoTBE, desc="") { |
| stall_and_wait(snpRdyPort, address); |
| } |
| |
| action(StallActionOnHazard, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.is_req_hazard || tbe.is_repl_hazard); |
| tbe.wakeup_pending_tgr := true; |
| stall_and_wait(triggerInPort, address); |
| } |
| |
| action(Initiate_ReadShared_Miss, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); |
| if (is_HN && tbe.use_DMT) { |
| tbe.requestorToBeExclusiveOwner := true; |
| tbe.dataMaybeDirtyUpstream := true; // SNF always replies with CompData_UC |
| if (enable_DMT_early_dealloc) { |
| tbe.actions.push(Event:SendRespSepData); |
| } |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendReadNoSnpDMT); |
| } else if (is_HN) { |
| tbe.actions.push(Event:SendReadNoSnp); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } else { |
| tbe.actions.push(Event:SendReadShared); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadShared_Hit, desc="") { |
| tbe.actions.push(Event:ReadHitPipe); |
| tbe.actions.push(Event:DataArrayRead); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadShared_HitUpstream, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); |
| if (tbe.use_DCT) { |
| tbe.actions.push(Event:SendSnpSharedFwdToOwner); |
| tbe.actions.pushNB(Event:WaitCompAck); |
| tbe.updateDirOnCompAck := false; |
| } else { |
| tbe.actions.push(Event:SendSnpShared); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| tbe.actions.push(Event:MaintainCoherence); |
| } |
| |
| action(Initiate_ReadShared_HitUpstream_NoOwner, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); |
| if (tbe.use_DCT) { |
| tbe.actions.push(Event:SendSnpSharedFwdToSharer); |
| tbe.actions.pushNB(Event:WaitCompAck); |
| tbe.updateDirOnCompAck := false; |
| } else { |
| tbe.actions.push(Event:SendSnpOnce); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| tbe.actions.push(Event:MaintainCoherence); |
| } |
| |
| |
| action(Initiate_ReadOnce_Miss, desc="") { |
| // drop at the end if not doing a fill |
| tbe.dataToBeInvalid := tbe.doCacheFill == false; |
| |
| tbe.actions.push(Event:ReadMissPipe); |
| if (is_HN && tbe.use_DMT) { |
| assert(is_invalid(cache_entry)); |
| tbe.requestorToBeExclusiveOwner := true; |
| tbe.dataMaybeDirtyUpstream := true; // SNF always replies with CompData_UC |
| if (enable_DMT_early_dealloc) { |
| tbe.actions.push(Event:SendRespSepData); |
| } |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendReadNoSnpDMT); |
| } else if (is_HN) { |
| tbe.actions.push(Event:SendReadNoSnp); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } else { |
| // if not allocating an entry send a ReadOnce |
| if (tbe.dataToBeInvalid) { |
| tbe.actions.push(Event:SendReadOnce); |
| } else { |
| tbe.actions.push(Event:SendReadShared); |
| } |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| |
| tbe.updateDirOnCompAck := false; |
| |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadOnce_Hit, desc="") { |
| tbe.actions.push(Event:ReadHitPipe); |
| tbe.actions.push(Event:DataArrayRead); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| tbe.updateDirOnCompAck := false; |
| } |
| |
| action(Initiate_ReadOnce_HitUpstream, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); |
| if (tbe.use_DCT) { |
| tbe.actions.push(Event:SendSnpOnceFwd); |
| tbe.actions.pushNB(Event:WaitCompAck); |
| } else { |
| tbe.actions.push(Event:SendSnpOnce); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| tbe.updateDirOnCompAck := false; |
| // no need to update or access tags/data on ReadOnce served from upstream |
| } |
| |
| |
| |
| action(Initiate_ReadUnique_Miss, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); |
| if (is_HN && tbe.use_DMT) { |
| tbe.requestorToBeExclusiveOwner := true; |
| tbe.dataMaybeDirtyUpstream := true; // SNF always replies with CompData_UC |
| if (enable_DMT_early_dealloc) { |
| tbe.actions.push(Event:SendRespSepData); |
| } |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendReadNoSnpDMT); |
| } else if (is_HN) { |
| tbe.actions.push(Event:SendReadNoSnp); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } else { |
| tbe.actions.push(Event:SendReadUnique); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadUnique_AutoUpgrade, desc="") { |
| assert(is_HN); |
| tbe.dataUnique := true; |
| } |
| |
| action(Initiate_ReadUnique_Upgrade, desc="") { |
| // must use the transitions with auto upgrade otherwise |
| assert(is_HN == false); |
| assert(tbe.use_DCT == false); |
| assert((tbe.dataValid && tbe.dataUnique) == false); |
| assert((tbe.dir_ownerExists && tbe.dir_ownerIsExcl) == false); |
| |
| tbe.actions.push(Event:ReadMissPipe); |
| if (tbe.dataMaybeDirtyUpstream) { |
| tbe.actions.push(Event:SendSnpUnique); |
| } else if (tbe.dir_sharers.count() > 0) { |
| // no one will send us data unless we explicitly ask |
| tbe.actions.push(Event:SendSnpUniqueRetToSrc); |
| } else { |
| assert(tbe.dataValid); |
| } |
| // then attempt to upgrade our data |
| tbe.actions.push(Event:SendCleanUnique); |
| tbe.actions.push(Event:CheckUpgrade_FromRU); |
| |
| // send up the upgraded data or fresh data if we failed, see CheckUpgrade_FromRU |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadUnique_Hit, desc="") { |
| tbe.actions.push(Event:ReadHitPipe); |
| tbe.actions.push(Event:DataArrayRead); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadUnique_HitUpstream, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); |
| // SnpUniqueFwd can be used only if the line is cached at a single |
| // requester; so force it off if that's the case |
| tbe.use_DCT := tbe.use_DCT && (tbe.dir_sharers.count() == 1) && |
| (tbe.dir_sharers.isElement(tbe.requestor) == false); |
| if (tbe.use_DCT) { |
| tbe.actions.push(Event:SendSnpUniqueFwd); |
| tbe.actions.pushNB(Event:WaitCompAck); |
| tbe.updateDirOnCompAck := false; |
| } else if (tbe.dataMaybeDirtyUpstream) { |
| tbe.actions.push(Event:SendSnpUnique); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } else { |
| // no one will send us data unless we explicitly ask |
| tbe.actions.push(Event:SendSnpUniqueRetToSrc); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| } |
| // just tag update since data any data would become stale |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_ReadUnique_Hit_InvUpstream, desc="") { |
| tbe.actions.push(Event:ReadHitPipe); |
| tbe.actions.push(Event:SendSnpCleanInvalid); |
| tbe.actions.pushNB(Event:DataArrayRead); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.actions.pushNB(Event:SendCompData); |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_CleanUnique, desc="") { |
| tbe.actions.push(Event:ReadMissPipe); // TODO need another latency pipe ?? |
| |
| // requestor don't have the line anymore; send response but don't update the |
| // directory on CompAck. The requestor knows we are not tracking it and will |
| // send a ReadUnique later |
| if (tbe.dir_sharers.isElement(tbe.requestor) == false) { |
| tbe.actions.push(Event:SendCompUCResp); |
| tbe.actions.push(Event:WaitCompAck); |
| tbe.updateDirOnCompAck := false; |
| } else { |
| // invalidates everyone except requestor |
| if (tbe.dir_sharers.count() > 1) { |
| tbe.actions.push(Event:SendSnpCleanInvalidNoReq); |
| } |
| // auto upgrade if HN |
| tbe.dataUnique := tbe.dataUnique || is_HN; |
| // get unique permission |
| if (tbe.dataUnique == false) { |
| tbe.actions.push(Event:SendCleanUnique); |
| tbe.actions.push(Event:CheckUpgrade_FromCU); |
| } |
| // next actions will depend on the data state after snoops+CleanUnique |
| tbe.actions.push(Event:FinishCleanUnique); |
| } |
| } |
| |
| action(Finish_CleanUnique, desc="") { |
| // This is should be executed at the end of a transaction |
| assert(tbe.actions.empty()); |
| tbe.actions.push(Event:SendCompUCResp); |
| tbe.actions.push(Event:WaitCompAck); |
| |
| // everyone may have been hit by an invalidation so check again |
| if (tbe.dir_sharers.isElement(tbe.requestor) == false) { |
| tbe.updateDirOnCompAck := false; |
| assert(tbe.dataValid == false); |
| } else { |
| // must be the only one in sharers map |
| assert(tbe.dir_sharers.count() == 1); |
| assert(tbe.dataUnique); |
| |
| // similar to Initiate_MaitainCoherence; writeback if the owner has data as |
| // clean data and we have it dirty and cannot keep it |
| bool fill_pipeline := tbe.dataValid && tbe.dataDirty; |
| bool req_has_dirty := tbe.dir_ownerExists && (tbe.dir_owner == tbe.requestor); |
| if (tbe.dataValid && tbe.dataDirty && tbe.dataToBeInvalid && |
| (req_has_dirty == false)) { |
| fill_pipeline := false; |
| if (is_HN) { |
| tbe.actions.push(Event:SendWriteNoSnp); |
| } else { |
| tbe.actions.push(Event:SendWriteClean); |
| } |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| } |
| |
| // needed by UpdateDirState_FromReqResp triggered by the expected CompAck |
| tbe.dataMaybeDirtyUpstream := true; |
| tbe.requestorToBeExclusiveOwner := true; |
| tbe.dir_ownerExists := false; |
| |
| if (fill_pipeline) { |
| tbe.actions.push(Event:CheckCacheFill); |
| } |
| } |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| |
| action(Initiate_LoadHit, desc="") { |
| // Local prefetch requests do not read data array |
| if (tbe.is_local_pf == false) { |
| tbe.actions.push(Event:DataArrayRead); |
| } |
| tbe.actions.push(Event:LoadHit); |
| } |
| |
| action(Initiate_LoadMiss, desc="") { |
| if (tbe.doCacheFill) { |
| tbe.actions.push(Event:SendReadShared); |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } else { |
| tbe.actions.push(Event:SendReadOnce); |
| tbe.dataToBeInvalid := true; |
| } |
| } |
| |
| |
| |
| action(Initiate_StoreHit, desc="") { |
| tbe.actions.push(Event:DataArrayRead); |
| tbe.actions.push(Event:StoreHit); |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_StoreMiss, desc="") { |
| if (tbe.doCacheFill) { |
| tbe.actions.push(Event:SendReadUnique); |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } else { |
| tbe.actions.push(Event:SendWriteUnique); |
| tbe.actions.push(Event:SendWUDataCB); |
| tbe.dataToBeInvalid := true; |
| } |
| } |
| |
| action(Initiate_StoreUpgrade, desc="") { |
| assert(tbe.dataValid); |
| assert(is_valid(cache_entry)); |
| tbe.actions.push(Event:SendCleanUnique); |
| tbe.actions.push(Event:CheckUpgrade_FromStore); |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_WriteUnique_LocalWrite, desc="") { |
| // auto-upgrade if hn but state was not unique |
| assert(is_HN || tbe.dataUnique); |
| tbe.dataUnique := true; |
| if (tbe.dir_sharers.count() > 0) { |
| tbe.actions.push(Event:SendSnpCleanInvalid); |
| } |
| if (comp_wu) { |
| tbe.actions.push(Event:SendDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendComp_WU); |
| } else { |
| tbe.actions.push(Event:SendCompDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| } |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_WriteUnique_LocalWrite_AfterUpgrade, desc="") { |
| assert(is_HN == false); |
| assert((tbe.dataValid && tbe.dataUnique) == false); |
| tbe.actions.push(Event:SendReadUnique); |
| if (comp_wu) { |
| tbe.actions.push(Event:SendDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendComp_WU); |
| } else { |
| tbe.actions.push(Event:SendCompDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| } |
| tbe.actions.push(Event:CheckCacheFill); |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_WriteUnique_Writeback, desc="") { |
| assert(is_HN); |
| assert(tbe.dir_sharers.count() > 0); |
| tbe.actions.push(Event:SendSnpUnique); |
| if (comp_wu) { |
| tbe.actions.push(Event:SendDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendWriteNoSnp); |
| tbe.actions.pushNB(Event:SendComp_WU); |
| } else { |
| tbe.actions.push(Event:SendCompDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendWriteNoSnp); |
| } |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| tbe.dataToBeInvalid := true; |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_WriteUnique_PartialWrite, desc="") { |
| assert(is_HN); |
| if (tbe.dir_sharers.count() > 0) { |
| tbe.actions.push(Event:SendSnpCleanInvalid); |
| } |
| if (comp_wu) { |
| tbe.actions.push(Event:SendDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendWriteNoSnpPartial); |
| tbe.actions.pushNB(Event:SendComp_WU); |
| } else { |
| tbe.actions.push(Event:SendCompDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendWriteNoSnpPartial); |
| } |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWUData); |
| tbe.dataToBeInvalid := true; |
| |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_WriteUnique_Forward, desc="") { |
| if (comp_wu) { |
| tbe.actions.push(Event:SendDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendWriteUnique); |
| tbe.actions.pushNB(Event:SendComp_WU); |
| } else { |
| tbe.actions.push(Event:SendCompDBIDResp_WU); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.pushNB(Event:SendWriteUnique); |
| } |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWUData); |
| tbe.dataToBeInvalid := true; |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| |
| |
| action(Initiate_CopyBack, desc="") { |
| // expect to receive this data after Send_CompDBIDResp |
| if (tbe.reqType == CHIRequestType:WriteBackFull) { |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_UD_PD); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_SD_PD); |
| } else if (tbe.reqType == CHIRequestType:WriteEvictFull) { |
| assert(tbe.reqType == CHIRequestType:WriteEvictFull); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_SC); |
| } else { |
| assert(tbe.reqType == CHIRequestType:WriteCleanFull); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_UD_PD); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_SD_PD); |
| } |
| tbe.expected_req_resp.setExpectedCount(1); |
| |
| tbe.actions.pushNB(Event:SendCompDBIDResp); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| tbe.actions.push(Event:MaintainCoherence); |
| // MaintainCoherence queues the Tag/Data updates |
| } |
| |
| action(Initiate_CopyBack_Stale, desc="") { |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_SC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CBWrData_I); |
| tbe.expected_req_resp.setExpectedCount(1); |
| |
| tbe.actions.pushNB(Event:SendCompDBIDRespStale); |
| tbe.actions.pushNB(Event:WriteFEPipe); |
| |
| // if it was the last known sharer and we don't have the data do the same |
| // the Initiate_Evict |
| if ((is_HN == false) && (tbe.dir_sharers.count() == 1) && |
| tbe.dir_sharers.isElement(tbe.requestor) && (tbe.dataValid == false)) { |
| tbe.actions.push(Event:SendEvict); |
| } |
| |
| tbe.dir_sharers.remove(tbe.requestor); |
| assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != tbe.requestor)); |
| |
| // usually we consider data locally invalid on RU states even if we |
| // have a copy; consider it valid for this transition only so we can |
| // comeback to UD_RU/UC_RU |
| if (is_valid(cache_entry) && (tbe.dataValid == false) && |
| tbe.dir_ownerExists && tbe.dir_ownerIsExcl) { |
| tbe.dataValid := true; |
| } |
| } |
| |
| action(Initiate_Evict, desc="") { |
| tbe.actions.push(Event:SendCompIResp); |
| |
| assert(tbe.dir_sharers.isElement(tbe.requestor)); |
| assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != tbe.requestor)); |
| tbe.dir_sharers.remove(tbe.requestor); |
| |
| if ((is_HN == false) && (tbe.dir_sharers.count() == 0) && |
| (tbe.dataValid == false)) { |
| tbe.actions.push(Event:SendEvict); |
| } |
| |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_MaitainCoherence, desc="") { |
| // issue a copy back if necessary to maintain coherence for data we are |
| // droping. This is should be executed at the end of a transaction |
| assert(tbe.actions.empty()); |
| // go through either the fill or the writeback pipeline |
| if (tbe.dataValid && tbe.dataToBeInvalid) { |
| if (is_HN) { |
| if (tbe.dataDirty && (tbe.dataMaybeDirtyUpstream == false)) { |
| tbe.actions.push(Event:SendWriteNoSnp); |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| } |
| } else { |
| if (tbe.dir_sharers.isEmpty() && (tbe.dataDirty || tbe.dataUnique)) { |
| tbe.actions.push(Event:SendWriteBackOrWriteEvict); |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| } else if ((tbe.dir_sharers.isEmpty() == false) && tbe.dataDirty && |
| (tbe.dataMaybeDirtyUpstream == false)) { |
| tbe.actions.push(Event:SendWriteClean); |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| } |
| } |
| } |
| else if (tbe.dataValid) { |
| tbe.actions.push(Event:CheckCacheFill); |
| } |
| tbe.actions.push(Event:TagArrayWrite); |
| } |
| |
| |
| |
| // Too many common stuff between SnpUnique/SnpUniqueFwd/SnpCleanInvalid |
| // so do one action for all of them here |
| action(Initiate_InvalidationSnoop, desc="") { |
| tbe.actions.push(Event:SnpInvPipe); |
| // Propagate a snoop upwards depending on the type |
| if (tbe.dir_sharers.count() > 0) { |
| if ((tbe.reqType == CHIRequestType:SnpUniqueFwd) || |
| (tbe.reqType == CHIRequestType:SnpUnique)) { |
| if ((tbe.snpNeedsData && (tbe.dataMaybeDirtyUpstream == false)) || |
| (tbe.dataValid == false)) { |
| tbe.actions.push(Event:SendSnpUniqueRetToSrc); |
| } else { |
| tbe.actions.push(Event:SendSnpUnique); |
| } |
| } else { |
| assert(tbe.reqType == CHIRequestType:SnpCleanInvalid); |
| tbe.actions.push(Event:SendSnpCleanInvalid); |
| } |
| } |
| |
| if (tbe.reqType == CHIRequestType:SnpUniqueFwd) { |
| tbe.actions.push(Event:SendSnpUniqueFwdCompData); |
| } else { |
| tbe.actions.push(Event:SendInvSnpResp); |
| } |
| |
| if(tbe.is_req_hazard || tbe.is_repl_hazard) { |
| tbe.actions.push(Event:RestoreFromHazard); |
| } else { |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| tbe.dataToBeInvalid := true; |
| } |
| |
| action(Initiate_SnpShared, desc="") { |
| // Handles both SnpShared,SnpSharedFwd,SnpNotSharedDirtyFwd |
| tbe.actions.push(Event:SnpSharedPipe); |
| if (tbe.dir_ownerExists) { |
| assert(tbe.dataMaybeDirtyUpstream); |
| tbe.actions.push(Event:SendSnpShared); |
| } else if (tbe.dataValid == false) { |
| // must get a copy of shared data upstream |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| assert(tbe.dir_sharers.count() > 0); |
| tbe.actions.push(Event:SendSnpOnce); |
| } else { |
| tbe.actions.push(Event:DataArrayRead); |
| } |
| |
| if (tbe.reqType == CHIRequestType:SnpSharedFwd) { |
| tbe.actions.push(Event:SendSnpSharedFwdCompData); |
| } else if (tbe.reqType == CHIRequestType:SnpNotSharedDirtyFwd) { |
| tbe.actions.push(Event:SendSnpNotSharedDirtyFwdCompData); |
| } else { |
| assert(tbe.reqType == CHIRequestType:SnpShared); |
| tbe.actions.push(Event:SendSnpData); |
| } |
| if (tbe.is_req_hazard || tbe.is_repl_hazard) { |
| tbe.actions.push(Event:RestoreFromHazard); |
| } else { |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| tbe.dataToBeSharedClean := true; |
| } |
| |
| action(Initiate_SnpOnce, desc="") { |
| tbe.actions.push(Event:SnpOncePipe); |
| if (tbe.dataValid == false) { |
| assert(tbe.dir_sharers.count() > 0); |
| tbe.actions.push(Event:SendSnpOnce); |
| } else { |
| tbe.actions.push(Event:DataArrayRead); |
| } |
| |
| if (tbe.reqType == CHIRequestType:SnpOnceFwd) { |
| tbe.actions.push(Event:SendSnpOnceFwdCompData); |
| } else { |
| assert(tbe.reqType == CHIRequestType:SnpOnce); |
| assert(tbe.snpNeedsData); |
| tbe.actions.push(Event:SendSnpData); |
| } |
| |
| if (tbe.is_req_hazard || tbe.is_repl_hazard) { |
| tbe.actions.push(Event:RestoreFromHazard); |
| } else { |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| } |
| |
| |
| |
| action(Initiate_Replacement_Evict_BackInvalidte, desc="") { |
| assert(is_HN == false); |
| tbe.actions.push(Event:SendSnpCleanInvalid); |
| tbe.actions.push(Event:SendEvict); |
| tbe.dataToBeInvalid := true; |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_Replacement_Evict, desc="") { |
| assert(is_HN == false); |
| assert(tbe.dir_sharers.isEmpty()); |
| tbe.actions.push(Event:SendEvict); |
| tbe.dataToBeInvalid := true; |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_Replacement_JustDrop, desc="") { |
| tbe.dataToBeInvalid := true; |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_Replacement_WB_BackInvalidate, desc="") { |
| assert(tbe.dataDirty || tbe.dataUnique || tbe.dataMaybeDirtyUpstream); |
| tbe.actions.push(Event:SendSnpCleanInvalid); |
| tbe.actions.push(Event:WriteFEPipe); |
| if (is_HN) { |
| if (tbe.dataDirty || tbe.dataMaybeDirtyUpstream) { |
| tbe.actions.push(Event:SendWriteNoSnp); |
| } |
| } else { |
| tbe.actions.push(Event:SendWriteBackOrWriteEvict); |
| } |
| tbe.actions.pushNB(Event:DataArrayRead); |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| tbe.dataToBeInvalid := true; |
| |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| action(Initiate_Replacement_WB, desc="") { |
| tbe.actions.push(Event:WriteFEPipe); |
| if (is_HN) { |
| assert(tbe.dataDirty); |
| tbe.actions.push(Event:SendWriteNoSnp); |
| } else if (tbe.dir_sharers.isEmpty()) { |
| assert(tbe.dataDirty || tbe.dataUnique); |
| tbe.actions.push(Event:SendWriteBackOrWriteEvict); |
| } else { |
| assert(tbe.dataDirty); |
| tbe.actions.push(Event:SendWriteClean); |
| } |
| tbe.actions.pushNB(Event:DataArrayRead); |
| tbe.actions.push(Event:WriteBEPipe); |
| tbe.actions.push(Event:SendWBData); |
| tbe.dataToBeInvalid := true; |
| tbe.actions.pushNB(Event:TagArrayWrite); |
| } |
| |
| |
| |
| action(Send_ReadShared, desc="") { |
| assert(is_HN == false); |
| assert(tbe.dataValid == false); |
| |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:DataSepResp_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_UD_PD); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_SC); |
| if (allow_SD) { |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_SD_PD); |
| } |
| // NOTE: the first CompData received counts as RespSepData |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:RespSepData); |
| tbe.expected_req_resp.setExpectedCount(2); |
| tbe.dataBlkValid.clear(); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| if (allow_SD) { |
| prepareRequest(tbe, CHIRequestType:ReadShared, out_msg); |
| } else { |
| prepareRequest(tbe, CHIRequestType:ReadNotSharedDirty, out_msg); |
| } |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| out_msg.dataToFwdRequestor := false; |
| allowRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Send_ReadNoSnp, desc="") { |
| assert(is_HN); |
| assert(tbe.use_DMT == false); |
| |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_UC); |
| // NOTE: the first CompData received counts as RespSepData |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:RespSepData); |
| tbe.expected_req_resp.setExpectedCount(2); |
| tbe.dataBlkValid.clear(); |
| outgoingTransactionStart(address, curTransitionEvent()); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:ReadNoSnp, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| out_msg.dataToFwdRequestor := false; |
| allowRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Send_ReadNoSnpDMT, desc="") { |
| assert(is_HN); |
| assert(tbe.use_DMT); |
| |
| CHIRequestType req := CHIRequestType:ReadNoSnp; |
| if (enable_DMT_early_dealloc) { |
| req := CHIRequestType:ReadNoSnpSep; |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:ReadReceipt); |
| tbe.expected_req_resp.addExpectedCount(1); |
| } |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, req, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| out_msg.dataToFwdRequestor := true; |
| allowRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Send_ReadOnce, desc="") { |
| assert(is_HN == false); |
| assert(tbe.dataValid == false); |
| |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:DataSepResp_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_I); |
| // NOTE: the first CompData received counts as RespSepData |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:RespSepData); |
| tbe.expected_req_resp.setExpectedCount(2); |
| tbe.dataBlkValid.clear(); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:ReadOnce, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| out_msg.dataToFwdRequestor := false; |
| allowRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Send_ReadUnique, desc="") { |
| assert((tbe.dataValid && tbe.dataUnique) == false); |
| |
| assert(tbe.expected_req_resp.hasExpected() == false); |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:DataSepResp_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_UC); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:CompData_UD_PD); |
| // NOTE: the first CompData received counts as RespSepData |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:RespSepData); |
| tbe.expected_req_resp.setExpectedCount(2); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:ReadUnique, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| out_msg.dataToFwdRequestor := false; |
| allowRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Send_CleanUnique, desc="") { |
| assert(tbe.dataValid || (tbe.dir_sharers.count() > 0)); |
| assert(tbe.dataUnique == false); |
| |
| assert(tbe.expected_req_resp.hasExpected() == false); |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:Comp_UC); |
| tbe.expected_req_resp.setExpectedCount(1); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:CleanUnique, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Send_Evict, desc="") { |
| assert(is_valid(tbe)); |
| assert(is_HN == false); |
| assert(tbe.expected_req_resp.hasExpected() == false); |
| clearExpectedReqResp(tbe); |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:Evict, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:Comp_I); |
| tbe.expected_req_resp.setExpectedCount(1); |
| } |
| |
| action(Send_InvSnpResp, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| if (tbe.dataDirty || tbe.snpNeedsData || |
| (tbe.dataUnique && (tbe.reqType == CHIRequestType:SnpUnique))) { |
| tbe.actions.pushFront(Event:SendSnpData); |
| } else { |
| tbe.actions.pushFront(Event:SendSnpIResp); |
| } |
| } |
| |
| action(Send_WriteBackOrWriteEvict, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.dataBlkValid.isFull()); |
| assert(tbe.dataValid); |
| assert(is_HN == false); |
| |
| assert(tbe.dataUnique || tbe.dataDirty); |
| assert(tbe.dir_sharers.isEmpty()); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| if (tbe.dataDirty) { |
| prepareRequest(tbe, CHIRequestType:WriteBackFull, out_msg); |
| } else { |
| prepareRequest(tbe, CHIRequestType:WriteEvictFull, out_msg); |
| } |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:CompDBIDResp); |
| tbe.expected_req_resp.setExpectedCount(1); |
| } |
| |
| action(Send_WriteCleanFull, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.dataBlkValid.isFull()); |
| assert(tbe.dataValid); |
| assert(is_HN == false); |
| assert(tbe.dataDirty); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:WriteCleanFull, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| clearExpectedReqResp(tbe); |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:CompDBIDResp); |
| tbe.expected_req_resp.setExpectedCount(1); |
| } |
| |
| action(Send_WriteNoSnp, desc="") { |
| assert(is_valid(tbe)); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:WriteNoSnp, out_msg); |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| // allow to expect this on top of data coming from upstream; |
| // so addExpectedCount |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:CompDBIDResp); |
| tbe.expected_req_resp.addExpectedCount(1); |
| } |
| |
| action(Send_WriteNoSnp_Partial, desc="") { |
| assert(is_valid(tbe)); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequest(tbe, CHIRequestType:WriteNoSnpPtl, out_msg); |
| out_msg.accAddr := tbe.accAddr; |
| out_msg.accSize := tbe.accSize; |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| // allow to expect this on top of data coming from upstream; |
| // so addExpectedCount |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:CompDBIDResp); |
| tbe.expected_req_resp.addExpectedCount(1); |
| } |
| |
| action(Send_WriteUnique, desc="") { |
| assert(is_valid(tbe)); |
| |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| if (tbe.accSize == blockSize) { |
| prepareRequest(tbe, CHIRequestType:WriteUniqueFull, out_msg); |
| } else { |
| prepareRequest(tbe, CHIRequestType:WriteUniquePtl, out_msg); |
| out_msg.accAddr := tbe.accAddr; |
| out_msg.accSize := tbe.accSize; |
| } |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| allowRequestRetry(tbe, out_msg); |
| } |
| // allow to expect this on top of data coming from upstream; |
| // so addExpectedCount |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:CompDBIDResp); |
| // if receive only DBIDResp then will expect Comp later |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:DBIDResp); |
| tbe.expected_req_resp.addExpectedCount(1); |
| } |
| |
| action(Send_SnpCleanInvalid, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_snp_resp.hasExpected() == false); |
| // at least one sharer or owner othrwise should not execute this |
| assert(tbe.dir_sharers.count() > 0); |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpCleanInvalid, out_msg); |
| out_msg.Destination.addNetDest(tbe.dir_sharers); |
| out_msg.retToSrc := false; |
| } |
| setExpectedForInvSnoop(tbe, false); |
| } |
| |
| action(Send_SnpCleanInvalid_NoReq, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_snp_resp.hasExpected() == false); |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpCleanInvalid, out_msg); |
| out_msg.Destination.addNetDest(tbe.dir_sharers); |
| out_msg.Destination.remove(tbe.requestor); |
| // at least one sharer other than requestor |
| assert(out_msg.Destination.count() > 0); |
| out_msg.retToSrc := false; |
| setExpectedForInvSnoop(tbe, false); |
| tbe.expected_snp_resp.setExpectedCount(out_msg.Destination.count()); |
| } |
| } |
| |
| action(Send_SnpUnique, desc="") { |
| assert(is_valid(tbe)); |
| // at least one sharer or owner othrwise should not execute this |
| assert(tbe.dir_sharers.count() > 0); |
| |
| setExpectedForInvSnoop(tbe, true); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpUnique, out_msg); |
| out_msg.Destination.addNetDest(tbe.dir_sharers); |
| out_msg.retToSrc := false; |
| } |
| } |
| |
| action(Send_SnpUnique_RetToSrc, desc="") { |
| assert(is_valid(tbe)); |
| // at least one sharer or owner othrwise should not execute this |
| assert(tbe.dir_sharers.count() > 0); |
| |
| setExpectedForInvSnoop(tbe, true); |
| |
| MachineID dest; |
| if (tbe.dir_ownerExists) { |
| dest := tbe.dir_owner; |
| } else { |
| // TODO should be random or the closest one |
| dest := tbe.dir_sharers.smallestElement(); |
| } |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpUnique, out_msg); |
| out_msg.Destination.add(dest); |
| out_msg.retToSrc := true; |
| } |
| // if other sharers send with retToSrc=false to others |
| if (tbe.dir_sharers.count() > 1) { |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpUnique, out_msg); |
| out_msg.Destination.addNetDest(tbe.dir_sharers); |
| out_msg.Destination.remove(dest); |
| out_msg.retToSrc := false; |
| } |
| } |
| } |
| |
| action(Send_SnpUniqueFwd, desc="") { |
| assert(is_valid(tbe)); |
| // single sharer or owner otherwise should not execute this |
| assert(tbe.dir_sharers.count() == 1); |
| |
| assert(tbe.expected_snp_resp.expected() == 0); |
| clearExpectedSnpResp(tbe); |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_I_Fwded_UC); |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_I_Fwded_UD_PD); |
| tbe.expected_snp_resp.addExpectedCount(1); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpUniqueFwd, out_msg); |
| out_msg.Destination.addNetDest(tbe.dir_sharers); |
| out_msg.retToSrc := false; |
| } |
| } |
| |
| action(Send_SnpShared, desc="") { |
| assert(is_valid(tbe)); |
| |
| // only sent to a dirty or exclusive snoopee |
| assert(tbe.dataMaybeDirtyUpstream); |
| assert(tbe.dir_ownerExists); |
| assert(tbe.dir_sharers.count() > 0); |
| |
| assert(tbe.expected_snp_resp.expected() == 0); |
| clearExpectedSnpResp(tbe); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC_PD); |
| tbe.expected_snp_resp.setExpectedCount(1); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpShared, out_msg); |
| out_msg.Destination.add(tbe.dir_owner); |
| out_msg.retToSrc := false; |
| } |
| } |
| |
| action(Send_SnpSharedFwd_ToOwner, desc="") { |
| assert(is_valid(tbe)); |
| |
| // the dirty snoopee must go to SC and send data |
| assert(tbe.dataMaybeDirtyUpstream); |
| assert(tbe.dir_ownerExists); |
| assert(tbe.dir_sharers.count() > 0); |
| |
| assert(tbe.expected_snp_resp.expected() == 0); |
| clearExpectedSnpResp(tbe); |
| |
| bool allowFwdSD := tbe.reqType != CHIRequestType:ReadNotSharedDirty; |
| |
| // get us a copy if we have allocated a cache entry for this block |
| bool retToSrc := tbe.doCacheFill && (tbe.dataToBeInvalid == false); |
| |
| if (allowFwdSD) { |
| if (retToSrc) { |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC_Fwded_SC); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC_Fwded_SD_PD); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_I_Fwded_SC); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_I_Fwded_SD_PD); |
| } else { |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_SC_Fwded_SC); |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_SC_Fwded_SD_PD); |
| } |
| } else { |
| if (retToSrc) { |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC_Fwded_SC); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_I_Fwded_SC); |
| } else { |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_SC_Fwded_SC); |
| } |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC_PD_Fwded_SC); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_I_PD_Fwded_SC); |
| } |
| tbe.expected_snp_resp.addExpectedCount(1); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| if (allowFwdSD) { |
| prepareRequest(tbe, CHIRequestType:SnpSharedFwd, out_msg); |
| } else { |
| prepareRequest(tbe, CHIRequestType:SnpNotSharedDirtyFwd, out_msg); |
| } |
| out_msg.Destination.add(tbe.dir_owner); |
| out_msg.retToSrc := retToSrc; |
| } |
| } |
| |
| action(Send_SnpSharedFwd_ToSharer, desc="") { |
| assert(is_valid(tbe)); |
| // send to onde of the sharers with shared clean data |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| assert(tbe.dir_ownerExists == false); |
| assert(tbe.dir_sharers.count() > 0); |
| |
| assert(tbe.expected_snp_resp.expected() == 0); |
| clearExpectedSnpResp(tbe); |
| // if we have a block allocated for this line, asks snoopee to forward |
| // data to us as well |
| bool retToSrc := tbe.doCacheFill; |
| if (retToSrc) { |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC_Fwded_SC); |
| } else { |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_SC_Fwded_SC); |
| } |
| tbe.expected_snp_resp.addExpectedCount(1); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpSharedFwd, out_msg); |
| // TODO should be random or the closest one to the fwd dest |
| out_msg.Destination.add(tbe.dir_sharers.smallestElement()); |
| out_msg.retToSrc := retToSrc; |
| } |
| } |
| |
| action(Send_SnpOnce, desc="") { |
| assert(is_valid(tbe)); |
| |
| // send to one of the sharers or owner to get a copy of the line |
| assert(tbe.dir_sharers.count() > 0); |
| |
| assert(tbe.expected_snp_resp.expected() == 0); |
| clearExpectedSnpResp(tbe); |
| |
| if (tbe.dir_ownerExists) { |
| if (tbe.dir_ownerIsExcl) { |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_UC); |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_UD); |
| } else { |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SD); |
| } |
| } else { |
| tbe.expected_snp_resp.addExpectedDataType(CHIDataType:SnpRespData_SC); |
| } |
| tbe.expected_snp_resp.addExpectedCount(1); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpOnce, out_msg); |
| if (tbe.dir_ownerExists) { |
| out_msg.Destination.add(tbe.dir_owner); |
| } else { |
| // TODO should be random or the closest one |
| out_msg.Destination.add(tbe.dir_sharers.smallestElement()); |
| } |
| out_msg.retToSrc := true; |
| } |
| } |
| |
| action(Send_SnpOnceFwd, desc="") { |
| assert(is_valid(tbe)); |
| |
| // send to one of the sharers or owner to get a copy of the line |
| assert(tbe.dir_sharers.count() > 0); |
| |
| assert(tbe.expected_snp_resp.expected() == 0); |
| clearExpectedSnpResp(tbe); |
| |
| if (tbe.dir_ownerExists) { |
| if (tbe.dir_ownerIsExcl) { |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_UC_Fwded_I); |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_UD_Fwded_I); |
| } else { |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_SD_Fwded_I); |
| } |
| } else { |
| tbe.expected_snp_resp.addExpectedRespType(CHIResponseType:SnpResp_SC_Fwded_I); |
| } |
| tbe.expected_snp_resp.addExpectedCount(1); |
| |
| enqueue(snpOutPort, CHIRequestMsg, snoop_latency) { |
| prepareRequest(tbe, CHIRequestType:SnpOnceFwd, out_msg); |
| if (tbe.dir_ownerExists) { |
| out_msg.Destination.add(tbe.dir_owner); |
| } else { |
| // TODO should be random or the closest one |
| out_msg.Destination.add(tbe.dir_sharers.smallestElement()); |
| } |
| out_msg.retToSrc := false; |
| } |
| } |
| |
| |
| action(ExpectNCBWrData, desc="") { |
| // Expected data |
| int num_msgs := tbe.accSize / data_channel_size; |
| if ((tbe.accSize % data_channel_size) != 0) { |
| num_msgs := num_msgs + 1; |
| } |
| tbe.expected_req_resp.clear(num_msgs); |
| tbe.expected_req_resp.addExpectedDataType(CHIDataType:NCBWrData); |
| tbe.expected_req_resp.setExpectedCount(1); |
| |
| // Clear the mask bits we expect to receive |
| tbe.dataBlkValid.setMask(addressOffset(tbe.accAddr, tbe.addr), tbe.accSize, false); |
| } |
| |
| action(ExpectCompAck, desc="") { |
| assert(is_valid(tbe)); |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:CompAck); |
| tbe.expected_req_resp.addExpectedCount(1); |
| } |
| |
| action(Receive_ReqDataResp, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_req_resp.hasExpected()); |
| peek(datInPort, CHIDataMsg) { |
| // Decrement pending |
| if (tbe.expected_req_resp.receiveData(in_msg.type) == false) { |
| error("Received unexpected message"); |
| } |
| // Copy data to tbe only if we didn't have valid data or the received |
| // data is dirty |
| if ((tbe.dataBlkValid.isFull() == false) || |
| (in_msg.type == CHIDataType:CompData_UD_PD) || |
| (in_msg.type == CHIDataType:CompData_SD_PD) || |
| (in_msg.type == CHIDataType:CBWrData_UD_PD) || |
| (in_msg.type == CHIDataType:CBWrData_SD_PD) || |
| (in_msg.type == CHIDataType:NCBWrData)) { |
| // clear mask if started to receive new data |
| if(tbe.dataBlkValid.isFull()){ |
| tbe.dataBlkValid.clear(); |
| } |
| tbe.dataBlk.copyPartial(in_msg.dataBlk, in_msg.bitMask); |
| assert(tbe.dataBlkValid.isOverlap(in_msg.bitMask) == false); |
| tbe.dataBlkValid.orMask(in_msg.bitMask); |
| } |
| } |
| } |
| |
| action(Receive_RespSepDataFromCompData, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_req_resp.hasExpected()); |
| // check if a previous CompData msg already counted as a RespSepData |
| if (tbe.expected_req_resp.receivedRespType(CHIResponseType:RespSepData) == false) { |
| if (tbe.expected_req_resp.receiveResp(CHIResponseType:RespSepData) == false) { |
| error("Received unexpected message"); |
| } |
| if (is_HN == false) { |
| // must now ack the responder |
| tbe.actions.pushFrontNB(Event:SendCompAck); |
| } |
| } |
| } |
| |
| action(Receive_RespSepData, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_req_resp.hasExpected()); |
| if (tbe.expected_req_resp.receiveResp(CHIResponseType:RespSepData) == false) { |
| error("Received unexpected message"); |
| } |
| if (is_HN == false) { |
| // must now ack the responder |
| tbe.actions.pushFrontNB(Event:SendCompAck); |
| } |
| } |
| |
| action(Receive_ReadReceipt, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_req_resp.hasExpected()); |
| if (tbe.expected_req_resp.receiveResp(CHIResponseType:ReadReceipt) == false) { |
| error("Received unexpected message"); |
| } |
| } |
| |
| action(Receive_SnpDataResp, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.expected_snp_resp.hasExpected()); |
| peek(datInPort, CHIDataMsg) { |
| // Decrement pending |
| if (tbe.expected_snp_resp.receiveData(in_msg.type) == false) { |
| error("Received unexpected message"); |
| } |
| // Copy data to tbe only if we didn't have valid data or the received |
| // data is dirty |
| if ((tbe.dataBlkValid.isFull() == false) || |
| (in_msg.type == CHIDataType:SnpRespData_I_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SD_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_PD_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_I_Fwded_SD_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_I_PD_Fwded_SC)) { |
| // clear mask if started to receive new data |
| if(tbe.dataBlkValid.isFull()){ |
| tbe.dataBlkValid.clear(); |
| } |
| tbe.dataBlk.copyPartial(in_msg.dataBlk, in_msg.bitMask); |
| assert(tbe.dataBlkValid.isOverlap(in_msg.bitMask) == false); |
| tbe.dataBlkValid.orMask(in_msg.bitMask); |
| } |
| } |
| } |
| |
| action(UpdateDirState_FromReqDataResp, desc="") { |
| assert(is_valid(tbe)); |
| // only perform the update once we received all chunks |
| if (tbe.expected_req_resp.hasReceivedData()) { |
| assert(tbe.dataBlkValid.isFull()); |
| peek(datInPort, CHIDataMsg) { |
| |
| if (in_msg.type == CHIDataType:CBWrData_UC) { |
| assert(tbe.dir_ownerExists && tbe.dir_ownerIsExcl && (tbe.dir_owner == in_msg.responder)); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| tbe.dir_sharers.remove(in_msg.responder); |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_UD_PD) { |
| assert(tbe.dir_ownerExists && tbe.dir_ownerIsExcl && (tbe.dir_owner == in_msg.responder)); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| if (tbe.pendReqType != CHIRequestType:WriteCleanFull) { |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| tbe.dir_sharers.remove(in_msg.responder); |
| } |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_SC) { |
| assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != in_msg.responder)); |
| tbe.dir_sharers.remove(in_msg.responder); |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_SD_PD) { |
| assert(tbe.dir_ownerExists && (tbe.dir_ownerIsExcl == false) && (tbe.dir_owner == in_msg.responder)); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| if (tbe.pendReqType != CHIRequestType:WriteCleanFull) { |
| tbe.dir_sharers.remove(in_msg.responder); |
| } |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_I) { |
| // nothing to do here; just check |
| assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != in_msg.responder)); |
| assert(tbe.dir_sharers.isElement(in_msg.responder) == false); |
| |
| } else { |
| error("Unsuported data type"); |
| } |
| } |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDirState_FromSnpDataResp, desc="") { |
| assert(is_valid(tbe)); |
| // only perform the update once we received all chunks |
| if (tbe.expected_snp_resp.hasReceivedData()) { |
| assert(tbe.dataBlkValid.isFull()); |
| peek(datInPort, CHIDataMsg) { |
| |
| if (in_msg.type == CHIDataType:SnpRespData_I) { |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| tbe.dir_sharers.remove(in_msg.responder); |
| |
| } else if (in_msg.type == CHIDataType:SnpRespData_I_PD) { |
| assert(tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder)); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| tbe.dir_sharers.remove(in_msg.responder); |
| |
| } else if ((in_msg.type == CHIDataType:SnpRespData_SC_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SD_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_PD_Fwded_SC)) { |
| // the owner must have been the responder, if there was one |
| assert((tbe.dir_ownerExists == false) || |
| (tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder))); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| if ((in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_PD_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SD_PD)) { |
| tbe.dir_sharers.add(tbe.requestor); |
| } |
| if (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SD_PD) { |
| tbe.dir_ownerExists := true; |
| tbe.dir_owner := tbe.requestor; |
| } |
| |
| } else if ((in_msg.type == CHIDataType:SnpRespData_I_Fwded_SD_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_I_PD_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_I_Fwded_SC)) { |
| // the owner must have been the responder, if there was one |
| assert((tbe.dir_ownerExists == false) || |
| (tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder))); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| tbe.dir_sharers.remove(in_msg.responder); |
| tbe.dir_sharers.add(tbe.requestor); |
| if (in_msg.type == CHIDataType:SnpRespData_I_Fwded_SD_PD) { |
| tbe.dir_ownerExists := true; |
| tbe.dir_owner := tbe.requestor; |
| } |
| |
| } else if ((in_msg.type == CHIDataType:SnpRespData_SD) || |
| (in_msg.type == CHIDataType:SnpRespData_UC) || |
| (in_msg.type == CHIDataType:SnpRespData_UD)) { |
| // expected only in response to a SnpOnce; just do some checks |
| // also may get SnpRespData_SC, but handled properly above |
| assert(tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder)); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| |
| } else { |
| error("Unsuported data type"); |
| } |
| } |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDataState_FromReqDataResp, desc="") { |
| assert(is_valid(tbe)); |
| // only perform the update once we received all chunks |
| if (tbe.expected_req_resp.hasReceivedData()) { |
| assert(tbe.dataBlkValid.isFull()); |
| peek(datInPort, CHIDataMsg) { |
| |
| if ((in_msg.type == CHIDataType:CompData_UC) || |
| (in_msg.type == CHIDataType:DataSepResp_UC)) { |
| assert(tbe.dataUnique == false); |
| assert((tbe.dataValid && tbe.dataDirty) == false); |
| tbe.dataDirty := false; |
| tbe.dataUnique := true; |
| tbe.dataValid := true; |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| } else if (in_msg.type == CHIDataType:CompData_UD_PD) { |
| assert(tbe.dataUnique == false); |
| assert((tbe.dataValid && tbe.dataDirty) == false); |
| tbe.dataDirty := true; |
| tbe.dataUnique := true; |
| tbe.dataValid := true; |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| } else if (in_msg.type == CHIDataType:CompData_SC) { |
| assert(tbe.dataUnique == false); |
| assert((tbe.dataValid && tbe.dataDirty) == false); |
| tbe.dataDirty := false; |
| tbe.dataUnique := false; |
| tbe.dataValid := true; |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| } else if (in_msg.type == CHIDataType:CompData_SD_PD) { |
| assert(tbe.dataUnique == false); |
| assert((tbe.dataValid && tbe.dataDirty) == false); |
| tbe.dataDirty := true; |
| tbe.dataUnique := false; |
| tbe.dataValid := true; |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| } else if (in_msg.type == CHIDataType:CompData_I) { |
| tbe.dataValid := true; |
| tbe.dataToBeInvalid := true; |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_UC) { |
| assert(tbe.dataUnique); |
| tbe.dataMaybeDirtyUpstream := false; |
| tbe.dataValid := true; |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_SC) { |
| // stale WB, nothing to do ?? |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_UD_PD) { |
| assert(tbe.dataUnique); |
| tbe.dataDirty := true; |
| tbe.dataValid := true; |
| tbe.dataMaybeDirtyUpstream := false; |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_SD_PD) { |
| tbe.dataDirty := true; |
| tbe.dataValid := true; |
| tbe.dataMaybeDirtyUpstream := false; |
| |
| } else if (in_msg.type == CHIDataType:CBWrData_I) { |
| // stale WB, nothing to do ?? |
| |
| } else { |
| error("Unsuported data type"); |
| } |
| } |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDataState_FromWUDataResp, desc="") { |
| assert(is_valid(tbe)); |
| int offset := addressOffset(tbe.accAddr, tbe.addr); |
| if (tbe.expected_req_resp.hasReceivedData()) { |
| assert(tbe.dataBlkValid.test(offset)); |
| assert(tbe.dataBlkValid.test(offset + tbe.accSize - 1)); |
| peek(datInPort, CHIDataMsg) { |
| assert(in_msg.type == CHIDataType:NCBWrData); |
| tbe.dataDirty := true; |
| tbe.dataValid := tbe.accSize == blockSize; |
| } |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDataState_FromCUResp, desc="") { |
| assert(is_valid(tbe)); |
| peek(rspInPort, CHIResponseMsg) { |
| assert(in_msg.type == CHIResponseType:Comp_UC); |
| assert(tbe.dataUnique == false); |
| tbe.dataUnique := tbe.dataValid || (tbe.dir_sharers.count() > 0); |
| // self and upstream may have been invalidated while waiting for this |
| // expect to follow up with a ReadUnique |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDataState_FromSnpDataResp, desc="") { |
| assert(is_valid(tbe)); |
| // only perform the update once we received all chunks |
| if (tbe.expected_snp_resp.hasReceivedData()) { |
| assert(tbe.dataBlkValid.isFull()); |
| peek(datInPort, CHIDataMsg) { |
| |
| if ((in_msg.type == CHIDataType:SnpRespData_I_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_PD_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_I_PD_Fwded_SC)) { |
| tbe.dataDirty := true; |
| tbe.dataValid := true; |
| tbe.dataMaybeDirtyUpstream := false; |
| |
| } else if ((in_msg.type == CHIDataType:SnpRespData_SD) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SD_PD) || |
| (in_msg.type == CHIDataType:SnpRespData_I_Fwded_SD_PD)) { |
| tbe.dataDirty := true; |
| tbe.dataValid := true; |
| tbe.dataMaybeDirtyUpstream := true; |
| |
| } else if ((in_msg.type == CHIDataType:SnpRespData_I) || |
| (in_msg.type == CHIDataType:SnpRespData_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_SC_Fwded_SC) || |
| (in_msg.type == CHIDataType:SnpRespData_I_Fwded_SC)) { |
| tbe.dataValid := true; |
| tbe.dataMaybeDirtyUpstream := false; |
| |
| } else if ((in_msg.type == CHIDataType:SnpRespData_UC) || |
| (in_msg.type == CHIDataType:SnpRespData_UD)) { |
| tbe.dataValid := true; |
| tbe.dataUnique := true; |
| tbe.dataMaybeDirtyUpstream := true; |
| if (in_msg.type == CHIDataType:SnpRespData_UD){ |
| tbe.dataDirty := true; |
| } |
| |
| } else { |
| error("Unsuported data type"); |
| } |
| } |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDirState_FromReqResp, desc="") { |
| peek(rspInPort, CHIResponseMsg) { |
| if ((in_msg.type == CHIResponseType:CompAck) && tbe.updateDirOnCompAck) { |
| assert(tbe.requestor == in_msg.responder); |
| |
| tbe.dir_sharers.add(in_msg.responder); |
| |
| if (tbe.requestorToBeOwner) { |
| assert(tbe.dataMaybeDirtyUpstream); |
| assert(tbe.dir_ownerExists == false); |
| assert(tbe.requestorToBeExclusiveOwner == false); |
| tbe.dir_owner := in_msg.responder; |
| tbe.dir_ownerExists := true; |
| tbe.dir_ownerIsExcl := false; |
| |
| } else if (tbe.requestorToBeExclusiveOwner) { |
| assert(tbe.dataMaybeDirtyUpstream); |
| assert(tbe.dir_ownerExists == false); |
| assert(tbe.dir_sharers.count() == 1); |
| tbe.dir_owner := in_msg.responder; |
| tbe.dir_ownerExists := true; |
| tbe.dir_ownerIsExcl := true; |
| } |
| } |
| } |
| printTBEState(tbe); |
| } |
| |
| action(UpdateDirState_FromSnpResp, desc="") { |
| peek(rspInPort, CHIResponseMsg) { |
| |
| if (in_msg.type == CHIResponseType:SnpResp_I) { |
| // must have been a known sharer otherwise we would receive data |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_sharers.remove(in_msg.responder); |
| if (tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder)){ |
| tbe.dir_ownerExists := false; |
| } |
| |
| } else if (in_msg.type == CHIResponseType:SnpResp_SC) { |
| // expected from a sharer that already has it in shared state |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != in_msg.responder)); |
| |
| } else if ((in_msg.type == CHIResponseType:SnpResp_SC_Fwded_SC) || |
| (in_msg.type == CHIResponseType:SnpResp_SC_Fwded_SD_PD)) { |
| // the SnpSharedFwd must have been sent to the owner if there was one |
| assert((tbe.dir_ownerExists == false) || |
| (tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder))); |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| tbe.dir_ownerExists := false; |
| tbe.dir_ownerIsExcl := false; |
| tbe.dir_sharers.add(tbe.requestor); |
| if (in_msg.type == CHIResponseType:SnpResp_SC_Fwded_SD_PD) { |
| // Requestor is new owner |
| tbe.dir_ownerExists := true; |
| tbe.dir_owner := tbe.requestor; |
| } |
| |
| } else if ((in_msg.type == CHIResponseType:SnpResp_I_Fwded_UC) || |
| (in_msg.type == CHIResponseType:SnpResp_I_Fwded_UD_PD)) { |
| // must have been a single sharer that received SnpUniqueFwd |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| assert(tbe.dir_sharers.count() == 1); |
| tbe.dir_sharers.remove(in_msg.responder); |
| // requestor is the new owner |
| tbe.dir_sharers.add(tbe.requestor); |
| tbe.dir_ownerExists := true; |
| tbe.dir_ownerIsExcl := true; |
| tbe.dir_owner := tbe.requestor; |
| |
| } else if ((in_msg.type == CHIResponseType:SnpResp_UC_Fwded_I) || |
| (in_msg.type == CHIResponseType:SnpResp_UD_Fwded_I) || |
| (in_msg.type == CHIResponseType:SnpResp_SD_Fwded_I)) { |
| // SnpSharedFwd; just confirm |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| assert(tbe.dir_ownerExists && (tbe.dir_owner == in_msg.responder)); |
| |
| } else if (in_msg.type == CHIResponseType:SnpResp_SC_Fwded_I) { |
| // SnpSharedFwd; just confirm |
| assert(tbe.dir_sharers.isElement(in_msg.responder)); |
| assert((tbe.dir_ownerExists == false) || (tbe.dir_owner != in_msg.responder)); |
| } |
| |
| tbe.dataMaybeDirtyUpstream := tbe.dir_ownerExists; |
| |
| } |
| printTBEState(tbe); |
| } |
| |
| action(Receive_ReqResp, desc="") { |
| assert(tbe.expected_req_resp.hasExpected()); |
| peek(rspInPort, CHIResponseMsg) { |
| // Decrement pending |
| if (tbe.expected_req_resp.receiveResp(in_msg.type) == false) { |
| error("Received unexpected message"); |
| } |
| assert(in_msg.stale == tbe.is_stale); |
| } |
| } |
| |
| action(Receive_ReqResp_WUNeedComp, desc="") { |
| tbe.defer_expected_comp := true; |
| } |
| |
| action(Receive_ReqResp_WUComp, desc="") { |
| if (tbe.defer_expected_comp) { |
| tbe.defer_expected_comp := false; |
| } else if (tbe.expected_req_resp.receiveResp(CHIResponseType:Comp) == false) { |
| error("Received unexpected message"); |
| } |
| } |
| |
| action(Receive_SnpResp, desc="") { |
| assert(tbe.expected_snp_resp.hasExpected()); |
| peek(rspInPort, CHIResponseMsg) { |
| // Decrement pending |
| if (tbe.expected_snp_resp.receiveResp(in_msg.type) == false) { |
| error("Received unexpected message"); |
| } |
| assert(in_msg.stale == tbe.is_stale); |
| } |
| } |
| |
| action(Receive_RetryAck, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.pendReqAllowRetry); |
| assert(tbe.rcvdRetryAck == false); |
| tbe.rcvdRetryAck := true; |
| destsWaitingRetry.addNetDest(tbe.pendReqDest); |
| enqueueDoRetry(tbe); |
| } |
| |
| action(Receive_PCrdGrant, desc="") { |
| assert(tbe.pendReqAllowRetry); |
| assert(tbe.rcvdRetryCredit == false); |
| tbe.rcvdRetryCredit := true; |
| enqueueDoRetry(tbe); |
| } |
| |
| action(Send_Retry, desc="") { |
| assert(tbe.pendReqAllowRetry); |
| assert(tbe.rcvdRetryCredit); |
| assert(tbe.rcvdRetryAck); |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequestRetry(tbe, out_msg); |
| } |
| } |
| |
| action(Receive_RetryAck_Hazard, desc="") { |
| TBE hazard_tbe := getHazardTBE(tbe); |
| assert(hazard_tbe.pendReqAllowRetry); |
| assert(hazard_tbe.rcvdRetryAck == false); |
| hazard_tbe.rcvdRetryAck := true; |
| destsWaitingRetry.addNetDest(hazard_tbe.pendReqDest); |
| enqueueDoRetry(hazard_tbe); |
| } |
| |
| action(Receive_PCrdGrant_Hazard, desc="") { |
| TBE hazard_tbe := getHazardTBE(tbe); |
| assert(hazard_tbe.pendReqAllowRetry); |
| assert(hazard_tbe.rcvdRetryCredit == false); |
| hazard_tbe.rcvdRetryCredit := true; |
| enqueueDoRetry(hazard_tbe); |
| } |
| |
| action(Send_Retry_Hazard, desc="") { |
| TBE hazard_tbe := getHazardTBE(tbe); |
| assert(hazard_tbe.pendReqAllowRetry); |
| assert(hazard_tbe.rcvdRetryCredit); |
| assert(hazard_tbe.rcvdRetryAck); |
| enqueue(reqOutPort, CHIRequestMsg, request_latency) { |
| prepareRequestRetry(hazard_tbe, out_msg); |
| } |
| } |
| |
| action(Send_CompData, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.dataValid); |
| |
| bool is_rd_once := tbe.reqType == CHIRequestType:ReadOnce; |
| bool is_rd_shared := (tbe.reqType == CHIRequestType:ReadShared) || |
| (tbe.reqType == CHIRequestType:ReadNotSharedDirty); |
| bool is_rd_nsd := tbe.reqType == CHIRequestType:ReadNotSharedDirty; |
| bool is_rd_unique := tbe.reqType == CHIRequestType:ReadUnique; |
| |
| if (is_rd_once) { |
| tbe.snd_msgType := CHIDataType:CompData_I; |
| } else if (tbe.dataToBeInvalid) { |
| // We will drop the data so propagate it's coherent state upstream |
| if (tbe.dataUnique && tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:CompData_UD_PD; |
| } else if (tbe.dataUnique) { |
| tbe.snd_msgType := CHIDataType:CompData_UC; |
| } else if (tbe.dataDirty) { |
| if (is_rd_nsd) { |
| tbe.snd_msgType := CHIDataType:CompData_SC; |
| } else { |
| tbe.snd_msgType := CHIDataType:CompData_SD_PD; |
| } |
| } else { |
| tbe.snd_msgType := CHIDataType:CompData_SC; |
| } |
| } else if (is_rd_unique || |
| (is_rd_shared && tbe.dataUnique && |
| fwd_unique_on_readshared && (tbe.dir_ownerExists == false))) { |
| // propagates dirtyness |
| assert(tbe.dataUnique); |
| if (tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:CompData_UD_PD; |
| } else { |
| tbe.snd_msgType := CHIDataType:CompData_UC; |
| } |
| } else if (is_rd_shared) { |
| // still keeping a copy so can send as SC |
| tbe.snd_msgType := CHIDataType:CompData_SC; |
| } else { |
| error("Invalid request type"); |
| } |
| |
| tbe.dataMaybeDirtyUpstream := tbe.dataMaybeDirtyUpstream || |
| (tbe.snd_msgType == CHIDataType:CompData_UD_PD) || |
| (tbe.snd_msgType == CHIDataType:CompData_SD_PD) || |
| (tbe.snd_msgType == CHIDataType:CompData_UC); |
| tbe.requestorToBeExclusiveOwner := tbe.requestorToBeExclusiveOwner || |
| (tbe.snd_msgType == CHIDataType:CompData_UD_PD) || |
| (tbe.snd_msgType == CHIDataType:CompData_UC); |
| tbe.requestorToBeOwner := tbe.requestorToBeOwner || |
| (tbe.snd_msgType == CHIDataType:CompData_SD_PD); |
| |
| tbe.snd_destination := tbe.requestor; |
| setupPendingSend(tbe); |
| printTBEState(tbe); |
| } |
| |
| action(Send_WBData, desc="") { |
| assert(is_valid(tbe)); |
| if (is_HN) { |
| assert(tbe.dataBlkValid.isFull()); |
| assert(tbe.dataDirty); |
| assert(tbe.dataValid); |
| tbe.snd_msgType := CHIDataType:NCBWrData; |
| } else { |
| if (tbe.dataValid == false) { |
| // only possible when the WB was made stale by a snoop |
| assert(tbe.is_stale); |
| tbe.dataBlkValid.fillMask(); |
| tbe.snd_msgType := CHIDataType:CBWrData_I; |
| } else if (tbe.dataUnique) { |
| assert(tbe.dataBlkValid.isFull()); |
| if (tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:CBWrData_UD_PD; |
| } else { |
| tbe.snd_msgType := CHIDataType:CBWrData_UC; |
| } |
| } else { |
| assert(tbe.dataBlkValid.isFull()); |
| if (tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:CBWrData_SD_PD; |
| } else { |
| tbe.snd_msgType := CHIDataType:CBWrData_SC; |
| } |
| } |
| } |
| tbe.snd_destination := mapAddressToDownstreamMachine(tbe.addr); |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_WUData, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.dataBlkValid.count() > 0); |
| tbe.snd_msgType := CHIDataType:NCBWrData; |
| tbe.snd_destination := mapAddressToDownstreamMachine(tbe.addr); |
| setupPendingPartialSend(tbe); |
| } |
| |
| action(CheckWUComp, desc="") { |
| assert(is_valid(tbe)); |
| if (tbe.defer_expected_comp) { |
| tbe.defer_expected_comp := false; |
| tbe.expected_req_resp.addExpectedCount(1); |
| tbe.expected_req_resp.addExpectedRespType(CHIResponseType:Comp); |
| } |
| } |
| |
| action(Send_SnpRespData, desc="") { |
| assert(is_HN == false); |
| assert(is_valid(tbe)); |
| assert(tbe.dataBlkValid.isFull()); |
| assert(tbe.dataValid); |
| |
| assert(tbe.snpNeedsData || |
| (tbe.dataDirty && (tbe.reqType == CHIRequestType:SnpCleanInvalid)) || |
| ((tbe.dataDirty || tbe.dataUnique) && (tbe.reqType == CHIRequestType:SnpShared)) || |
| ((tbe.dataDirty || tbe.dataUnique) && (tbe.reqType == CHIRequestType:SnpUnique))); |
| |
| if (tbe.dataToBeInvalid) { |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| if (tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_I_PD; |
| } else { |
| tbe.snd_msgType := CHIDataType:SnpRespData_I; |
| } |
| } else if (tbe.dataToBeSharedClean) { |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| if (tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SC_PD; |
| } else { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SC; |
| } |
| } else { |
| assert(tbe.reqType == CHIRequestType:SnpOnce); |
| if (tbe.dataDirty && tbe.dataUnique) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_UD; |
| } else if (tbe.dataDirty) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SD; |
| } else if (tbe.dataUnique) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_UC; |
| } else { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SC; |
| } |
| } |
| |
| tbe.snd_destination := tbe.requestor; |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_CompData_SnpUniqueFwd, desc="") { |
| assert(tbe.dataValid); |
| assert(tbe.dataToBeInvalid); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| if (tbe.dataDirty) { |
| tbe.fwdedState := State:UD; |
| tbe.snd_msgType := CHIDataType:CompData_UD_PD; |
| } else { |
| tbe.fwdedState := State:UC; |
| tbe.snd_msgType := CHIDataType:CompData_UC; |
| } |
| tbe.actions.pushFront(Event:SendSnpFwdedResp); |
| |
| tbe.snd_destination := tbe.fwdRequestor; |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_CompData_SnpSharedFwd, desc="") { |
| assert(tbe.dataValid); |
| assert(tbe.dataToBeSharedClean); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| if (tbe.dataDirty) { |
| tbe.fwdedState := State:SD; |
| tbe.snd_msgType := CHIDataType:CompData_SD_PD; |
| } else { |
| tbe.fwdedState := State:SC; |
| tbe.snd_msgType := CHIDataType:CompData_SC; |
| } |
| if (tbe.snpNeedsData) { |
| tbe.actions.pushFront(Event:SendSnpFwdedData); |
| } else { |
| tbe.actions.pushFront(Event:SendSnpFwdedResp); |
| } |
| |
| tbe.snd_destination := tbe.fwdRequestor; |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_CompData_SnpNSDFwd, desc="") { |
| assert(tbe.dataValid); |
| assert(tbe.dataToBeSharedClean); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| tbe.snd_msgType := CHIDataType:CompData_SC; |
| tbe.fwdedState := State:SC; |
| if (tbe.dataDirty || tbe.snpNeedsData) { |
| tbe.actions.pushFront(Event:SendSnpFwdedData); |
| } else { |
| tbe.actions.pushFront(Event:SendSnpFwdedResp); |
| } |
| |
| tbe.snd_destination := tbe.fwdRequestor; |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_CompData_SnpOnceFwd, desc="") { |
| assert(tbe.dataValid); |
| |
| tbe.fwdedState := State:I; |
| tbe.snd_msgType := CHIDataType:CompData_I; |
| tbe.actions.pushFront(Event:SendSnpFwdedResp); |
| |
| tbe.snd_destination := tbe.fwdRequestor; |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_SnpRespDataFwded, desc="") { |
| assert(tbe.dataValid); |
| |
| // right only using this for the SnpShared/SnpNSD, so check |
| assert(tbe.dataToBeSharedClean); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| |
| // We have the data (locally or upstream) or are dropping it |
| bool keepData := (tbe.dir_sharers.count() > 0) || |
| (tbe.dataToBeInvalid == false); |
| |
| if (keepData) { |
| if (tbe.fwdedState == State:SD) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SC_Fwded_SD_PD; |
| } else if (tbe.dataDirty && (tbe.fwdedState == State:SC)) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SC_PD_Fwded_SC; |
| } else { |
| tbe.snd_msgType := CHIDataType:SnpRespData_SC_Fwded_SC; |
| } |
| } else { |
| if (tbe.fwdedState == State:SD) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_I_Fwded_SD_PD; |
| } else if (tbe.dataDirty && (tbe.fwdedState == State:SC)) { |
| tbe.snd_msgType := CHIDataType:SnpRespData_I_PD_Fwded_SC; |
| } else { |
| tbe.snd_msgType := CHIDataType:SnpRespData_I_Fwded_SC; |
| } |
| } |
| |
| tbe.snd_destination := tbe.requestor; |
| setupPendingSend(tbe); |
| } |
| |
| action(Send_FwdSnpResp, desc="") { |
| assert(is_valid(tbe)); |
| assert(tbe.dataValid); |
| |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| |
| // We have the data (locally or upstream) or are dropping it |
| bool keepData := (tbe.dir_sharers.count() > 0) || |
| (tbe.dataToBeInvalid == false); |
| |
| if (keepData && tbe.dataToBeSharedClean) { |
| assert((tbe.reqType == CHIRequestType:SnpSharedFwd) || |
| (tbe.reqType == CHIRequestType:SnpNotSharedDirtyFwd)); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| if (tbe.fwdedState == State:SD) { |
| out_msg.type := CHIResponseType:SnpResp_SC_Fwded_SD_PD; |
| } else { |
| assert(tbe.fwdedState == State:SC); |
| out_msg.type := CHIResponseType:SnpResp_SC_Fwded_SC; |
| } |
| |
| } else if (keepData) { |
| assert(tbe.reqType == CHIRequestType:SnpOnceFwd); |
| assert(tbe.fwdedState == State:I); |
| if (tbe.dataUnique && (tbe.dataDirty || tbe.dataMaybeDirtyUpstream)) { |
| out_msg.type := CHIResponseType:SnpResp_UD_Fwded_I; |
| } else if (tbe.dataUnique) { |
| out_msg.type := CHIResponseType:SnpResp_UC_Fwded_I; |
| } else if (tbe.dataDirty || tbe.dataMaybeDirtyUpstream) { |
| out_msg.type := CHIResponseType:SnpResp_SD_Fwded_I; |
| } else { |
| out_msg.type := CHIResponseType:SnpResp_SC_Fwded_I; |
| } |
| |
| } else { |
| assert(tbe.reqType == CHIRequestType:SnpUniqueFwd); |
| assert(tbe.dataMaybeDirtyUpstream == false); |
| if (tbe.fwdedState == State:UD) { |
| out_msg.type := CHIResponseType:SnpResp_I_Fwded_UD_PD; |
| } else { |
| assert(tbe.fwdedState == State:UC); |
| out_msg.type := CHIResponseType:SnpResp_I_Fwded_UC; |
| } |
| } |
| } |
| } |
| |
| action(Send_Data, desc="") { |
| assert(tbe.snd_pendEv); |
| assert(tbe.snd_pendBytes.count() > 0); |
| tbe.snd_pendEv := false; |
| enqueue(datOutPort, CHIDataMsg, data_latency) { |
| out_msg.addr := tbe.addr; |
| out_msg.type := tbe.snd_msgType; |
| |
| int offset := tbe.snd_pendBytes.firstBitSet(true); |
| assert(offset < blockSize); |
| int range := tbe.snd_pendBytes.firstBitSet(false, offset) - offset; |
| assert((range > 0) && (range <= blockSize)); |
| if (range > data_channel_size) { |
| range := data_channel_size; |
| } |
| tbe.snd_pendBytes.setMask(offset, range, false); |
| |
| out_msg.dataBlk := tbe.dataBlk; |
| out_msg.bitMask.setMask(offset, range); |
| |
| out_msg.responder := machineID; |
| |
| out_msg.Destination.add(tbe.snd_destination); |
| } |
| |
| // send next chunk (if any) next cycle |
| scheduleSendData(tbe, 1); |
| } |
| |
| action(Send_RespSepData, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:RespSepData; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_CompI, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:Comp_I; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_CompUC, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:Comp_UC; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_CompAck, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:CompAck; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(mapAddressToDownstreamMachine(tbe.addr)); |
| } |
| } |
| |
| action(Send_CompI_Stale, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:Comp_I; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| // We don't know if this is a stale writeback or a bug, so flag the |
| // reponse so the requestor can make further checks |
| out_msg.stale := true; |
| } |
| } |
| |
| action(Send_CompDBIDResp, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:CompDBIDResp; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_CompDBIDResp_Stale, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:CompDBIDResp; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| // We don't know if this is a stale writeback or a bug, so flag the |
| // reponse so the requestor can make further checks |
| out_msg.stale := true; |
| } |
| } |
| |
| action(Send_DBIDResp, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:DBIDResp; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_Comp_WU, desc="") { |
| assert(is_valid(tbe)); |
| enqueue(rspOutPort, CHIResponseMsg, comp_wu_latency + response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:Comp; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_SnpRespI, desc="") { |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := address; |
| out_msg.type := CHIResponseType:SnpResp_I; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(tbe.requestor); |
| } |
| } |
| |
| action(Send_RetryAck, desc="") { |
| peek(retryTriggerInPort, RetryTriggerMsg) { |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := in_msg.addr; |
| out_msg.type := CHIResponseType:RetryAck; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(in_msg.retryDest); |
| } |
| } |
| } |
| |
| action(Send_PCrdGrant, desc="") { |
| peek(retryTriggerInPort, RetryTriggerMsg) { |
| enqueue(rspOutPort, CHIResponseMsg, response_latency) { |
| out_msg.addr := in_msg.addr; |
| out_msg.type := CHIResponseType:PCrdGrant; |
| out_msg.responder := machineID; |
| out_msg.Destination.add(in_msg.retryDest); |
| } |
| } |
| } |
| |
| // Note on CheckUpgrade_FromStore/CheckUpgrade_FromCU/CheckUpgrade_FromRU |
| // We will always get Comp_UC; but if our data is invalidated before |
| // Comp_UC we would need to go to UCE. Since we don't use the UCE state |
| // we remain in the transient state and follow-up with ReadUnique. |
| // Note this assumes the responder knows we have invalid data when sending |
| // us Comp_UC and does not register us as owner. |
| |
| action(CheckUpgrade_FromStore, desc="") { |
| assert(is_HN == false); |
| if (tbe.dataUnique) { |
| // success, just send CompAck next |
| assert(tbe.dataValid); |
| } else { |
| tbe.actions.pushFront(Event:SendReadUnique); |
| } |
| tbe.actions.pushFront(Event:SendCompAck); |
| } |
| |
| action(CheckUpgrade_FromCU, desc="") { |
| assert(is_HN == false); |
| if (tbe.dataUnique == false) { |
| // actually failed, so just cancel the directory update |
| assert(tbe.dir_sharers.isElement(tbe.requestor) == false); |
| tbe.requestorToBeExclusiveOwner := false; |
| tbe.updateDirOnCompAck := false; |
| } |
| // otherwise nothing else to do here other than acking the CleanUnique |
| tbe.actions.pushFront(Event:SendCompAck); |
| } |
| |
| action(CheckUpgrade_FromRU, desc="") { |
| assert(is_HN == false); |
| if (tbe.dataUnique) { |
| // success, just send CompAck next |
| assert(tbe.dataValid); |
| } else { |
| // will need to get data instead |
| tbe.actions.pushFront(Event:SendReadUnique); |
| } |
| tbe.actions.pushFront(Event:SendCompAck); |
| } |
| |
| action(Finalize_UpdateCacheFromTBE, desc="") { |
| assert(is_valid(tbe)); |
| State final := tbe.finalState; |
| if ((final == State:UD_RSC) || (final == State:SD_RSC) || (final == State:UC_RSC) || |
| (final == State:SC_RSC) || (final == State:UD) || (final == State:UD_T) || |
| (final == State:SD) || (final == State:UC) || (final == State:SC) || |
| (final == State:UC_RU) || (final == State:UD_RU) || (final == State:UD_RSD) || |
| (final == State:SD_RSD)) { |
| assert(tbe.dataBlkValid.isFull()); |
| assert(tbe.dataValid); |
| assert(is_valid(cache_entry)); |
| cache_entry.DataBlk := tbe.dataBlk; |
| DPRINTF(RubySlicc, "Cached data %s pfb %s\n", tbe.dataBlk, cache_entry.HWPrefetched); |
| } else { |
| // make sure only deallocate the cache entry if data is invalid |
| assert(tbe.dataValid == false); |
| if (is_valid(cache_entry)) { |
| cache.deallocate(address); |
| unset_cache_entry(); |
| } |
| } |
| } |
| |
| action(Finalize_UpdateDirectoryFromTBE, desc="") { |
| assert(is_valid(tbe)); |
| State final := tbe.finalState; |
| if ((final == State:UD_RSC) || (final == State:SD_RSC) || (final == State:UC_RSC) || |
| (final == State:SC_RSC) || (final == State:UC_RU) || (final == State:UD_RU) || |
| (final == State:UD_RSD) || (final == State:SD_RSD) || (final == State:RU) || |
| (final == State:RSC) || (final == State:RSD) || (final == State:RUSD) || |
| (final == State:RUSC)) { |
| DirEntry dir_entry := getDirEntry(address); |
| assert(is_valid(dir_entry)); |
| assert(tbe.dir_sharers.count() > 0); |
| dir_entry.ownerExists := tbe.dir_ownerExists; |
| dir_entry.ownerIsExcl := tbe.dir_ownerIsExcl; |
| dir_entry.owner := tbe.dir_owner; |
| dir_entry.sharers := tbe.dir_sharers; |
| } else |