blob: 01cc3ab456a2fbebd0a72936a4cf8f0e8adab0aa [file] [log] [blame]
<!DOCTYPE html>
<html>
<head>
<!-- Global site tag (gtag.js) - Google Analytics -->
<script async src="https://www.googletagmanager.com/gtag/js?id='UA-133422980-2"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', ''UA-133422980-2');
</script>
<meta charset="utf-8">
<meta http-equiv="x-ua-compatible" content="ie=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>gem5</title>
<!-- SITE FAVICON -->
<link rel="shortcut icon" type="image/gif" href="/assets/img/gem5ColorVert.gif"/>
<link rel="canonical" href="http://localhost:4000/cache-actions/">
<link href='https://fonts.googleapis.com/css?family=Open+Sans:400,300,700,800,600' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Muli:400,300' rel='stylesheet' type='text/css'>
<!-- FAVICON -->
<link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css">
<!-- BOOTSTRAP -->
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<!-- CUSTOM CSS -->
<link rel="stylesheet" href="/css/main.css">
</head>
<body>
<nav class="navbar navbar-expand-md navbar-light bg-light">
<a class="navbar-brand" href="/">
<img src="/assets/img/gem5ColorLong.gif" alt="gem5" height=45px>
</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarNavDropdown" aria-controls="navbarNavDropdown" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarNavDropdown">
<ul class="navbar-nav ml-auto">
<li class="nav-item ">
<a class="nav-link" href="/">Home</a>
</li>
<li class="nav-item dropdown ">
<a class="nav-link dropdown-toggle" href="/about" id="navbarDropdownMenuLink" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
About
</a>
<div class="dropdown-menu" aria-labelledby="navbarDropdownMenuLink">
<a class="dropdown-item" href="/about">About</a>
<a class="dropdown-item" href="/publications">Publications</a>
<a class="dropdown-item" href="/governance">Governance</a>
</div>
</li>
<li class="nav-item dropdown active">
<a class="nav-link dropdown-toggle" href="#" id="navbarDropdownMenuLink" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
Documentation
</a>
<div class="dropdown-menu" aria-labelledby="navbarDropdownMenuLink">
<!-- Pull navigation from _data/documentation.yml -->
<a class="dropdown-item" href="/introduction">Introduction</a>
<a class="dropdown-item" href="/building">Getting Started</a>
<a class="dropdown-item" href="/environment">Modifying/Extending</a>
<a class="dropdown-item" href="/MSIintro">Modeling Cache Coherence with Ruby</a>
</div>
</li>
<li class="nav-item ">
<a class="nav-link" href="/contributing">Contributing</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="/blog">Blog</a>
</li>
<li class="nav-item ">
<a class="nav-link" href="/search">Search</a>
</li>
</ul>
</div>
</nav>
<main>
<div class="sidenav-top">
<a href="/"><img src="/assets/img/gem5ColorLong.gif" height="80"></a>
<div class="search">
<form action="/search" method="get">
<!-- <label for="search-box"><i class="fa fa-search"></i></label> -->
<input type="text" name="query">
<button type="submit" name="submit"><i class="fa fa-search"></i></button>
</form>
</div>
</div>
<div class="sidenav">
<!-- Pull navigation from _data/documentation.yml -->
<a class="item" href="/introduction" role="button" aria-expanded="false" aria-controls="collapseExample">
Introduction
</a>
<div class="collapse " id="introduction">
</div>
<a class="item" data-toggle="collapse" href="#pt1" role="button" aria-expanded="false" aria-controls="collapseExample">
Getting Started
</a>
<div class="collapse " id="pt1">
<a class="subitem " href="/building">Building gem5</a>
<a class="subitem " href="/simple_config">Creating a simple configuration script</a>
<a class="subitem " href="/cache_config">Adding cache to configuration script</a>
<a class="subitem " href="/gem5_stats">Understanding gem5 statistics and output</a>
<a class="subitem " href="/example_configs">Using the default configuration scripts</a>
</div>
<a class="item" data-toggle="collapse" href="#pt2" role="button" aria-expanded="false" aria-controls="collapseExample">
Modifying/Extending
</a>
<div class="collapse " id="pt2">
<a class="subitem " href="/environment">Setting up your development environment</a>
<a class="subitem " href="/helloobject">Creating a very simple SimObject</a>
<a class="subitem " href="/debugging">Debugging gem5</a>
<a class="subitem " href="/events">Event-driven programming</a>
<a class="subitem " href="/parameters">Adding parameters to SimObjects and more events</a>
<a class="subitem " href="/memoryobject">Creating SimObjects in the memory system</a>
<a class="subitem " href="/simplecache">Creating a simple cache object</a>
</div>
<a class="item" data-toggle="collapse" href="#pt3" role="button" aria-expanded="false" aria-controls="collapseExample">
Modeling Cache Coherence with Ruby
</a>
<div class="collapse show" id="pt3">
<a class="subitem " href="/MSIintro">Introduction to Ruby</a>
<a class="subitem " href="/cache-intro">MSI example cache protocol</a>
<a class="subitem " href="/cache-declarations">Declaring a state machine</a>
<a class="subitem " href="/cache-in-ports">In port code blocks</a>
<a class="subitem active" href="/cache-actions">Action code blocks</a>
<a class="subitem " href="/cache-transitions">Transition code blocks</a>
<a class="subitem " href="/directory">MSI Directory implementation</a>
<a class="subitem " href="/MSIbuilding">Compiling a SLICC protocol</a>
<a class="subitem " href="/configuration">Configuring a simple Ruby system</a>
<a class="subitem " href="/running">Running the simple Ruby system</a>
<a class="subitem " href="/MSIdebugging">Debugging SLICC Protocols</a>
<a class="subitem " href="/simple-MI_example">Configuring for a standard protocol</a>
</div>
</div>
<div class="container" id="doc-container">
<div class="edit"><a href="https://github.com/gem5/new-website/tree/master/_pages/documentation/part3/cache-actions.md">Edit this page</a></div>
<dl>
<dt>authors</dt>
<dd>Jason Lowe-Power</dd>
</dl>
<h1 id="action-code-blocks">Action code blocks</h1>
<p>The next section of the state machine file is the action blocks. The
action blocks are executed during a transition from one state to
another, and are called by the transition code blocks (which we will
discuss in the next section &lt;MSI-transitions-section&gt;). Actions are
<em>single action</em> blocks. Some examples are “send a message to the
directory” and “pop the head of the buffer”. Each action should be small
and only perform a single action.</p>
<p>The first action we will implement is an action to send a GetS request
to the directory. We need to send a GetS request to the directory
whenever we want to read some data that is not in the Modified or Shared
states in our cache. As previously mentioned, there are three variables
that are automatically populated inside the action block (like the
<code class="highlighter-rouge">in_msg</code> in <code class="highlighter-rouge">peek</code> blocks). <code class="highlighter-rouge">address</code> is the address that was passed
into the <code class="highlighter-rouge">trigger</code> function, <code class="highlighter-rouge">cache_entry</code> is the cache entry passed
into the <code class="highlighter-rouge">trigger</code> function, and <code class="highlighter-rouge">tbe</code> is the TBE passed into the
<code class="highlighter-rouge">trigger</code> function.</p>
<p>``` {.sourceCode .c++}
action(sendGetS, ‘gS’, desc=”Send GetS to the directory”) {
enqueue(request_out, RequestMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GetS;
out_msg.Destination.add(mapAddressToMachine(address,
MachineType:Directory));
// See mem/protocol/RubySlicc_Exports.sm for possible sizes.
out_msg.MessageSize := MessageSizeType:Control;
// Set that the requestor is this machine so we get the response.
out_msg.Requestor := machineID;
}
}</p>
<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>
When specifying the action block, there are two parameters: a
description and a "shorthand". These two parameters are used in the HTML
table generation. The shorthand shows up in the transition cell, so it
should be as short as possible. SLICC provides a special syntax to allow
for bold (''), superscript ('\^'), and spaces ('\_') in the shorthand to
help keep them short. Second, the description also shows up in the HTML
table when you click on a particular action. The description can be
longer and help explain what the action does.
Next, in this action we are going to send a message to the directory on
the `request_out` port as declared above the `in_port` blocks. The
`enqueue` function is similar to the `peek` function since it requires a
code block. `enqueue`, however, has the special variable `out_msg`. In
the `enqueue` block, you can modify the `out_msg` with the current data.
The `enqueue` block takes three parameters, the message buffer to send
the message, the type of the message, and a latency. This latency (1
cycle in the example above and throughout this cache controller) is the
*cache latency*. This is where you specify the latency of accessing the
cache, in this case for a miss. Below we will see that specifying the
latency for a hit is similar.
Inside the `enqueue` block is where the message data is populated. For
the address of the request, we can use the automatically populated
`address` variable. We are sending a GetS message, so we use that
message type. Next, we need to specify the destination of the message.
For this, we use the `mapAddressToMachine` function that takes the
address and the machine type we are sending to. This will look up in the
correct `MachineID` based on the address. We call `Destination.add`
because `Destination` is a `NetDest` object, or a bitmap of all
`MachineID`.
Finally, we need to specify the message size (from
`mem/protocol/RubySlicc_Exports.sm`) and set ourselves as the requestor.
By setting this `machineID` as the requestor, it will allow the
directory to respond to this cache or forward it to another cache to
respond to this request.
Similarly, we can create actions for sending other get and put requests.
Note that get requests represent requests for data and put requests
represent requests where we downgrading or evicting our copy of the
data.
``` {.sourceCode .c++}
action(sendGetM, "gM", desc="Send GetM to the directory") {
enqueue(request_out, RequestMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:GetM;
out_msg.Destination.add(mapAddressToMachine(address,
MachineType:Directory));
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Requestor := machineID;
}
}
action(sendPutS, "pS", desc="Send PutS to the directory") {
enqueue(request_out, RequestMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:PutS;
out_msg.Destination.add(mapAddressToMachine(address,
MachineType:Directory));
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Requestor := machineID;
}
}
action(sendPutM, "pM", desc="Send putM+data to the directory") {
enqueue(request_out, RequestMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceRequestType:PutM;
out_msg.Destination.add(mapAddressToMachine(address,
MachineType:Directory));
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
out_msg.Requestor := machineID;
}
}
</code></pre></div></div>
<p>Next, we need to specify an action to send data to another cache in the
case that we get a forwarded request from the directory for another
cache. In this case, we have to peek into the request queue to get other
data from the requesting message. This peek code block is exactly the
same as the ones in the <code class="highlighter-rouge">in_port</code>. When you nest an <code class="highlighter-rouge">enqueue</code> block in a
<code class="highlighter-rouge">peek</code> block both <code class="highlighter-rouge">in_msg</code> and <code class="highlighter-rouge">out_msg</code> variables are available. This
is needed so we know which other cache to send the data to.
Additionally, in this action we use the <code class="highlighter-rouge">cache_entry</code> variable to get
the data to send to the other cache.</p>
<p>``` {.sourceCode .c++}
action(sendCacheDataToReq, “cdR”, desc=”Send cache data to requestor”) {
assert(is_valid(cache_entry));
peek(forward_in, RequestMsg) {
enqueue(response_out, ResponseMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:Data;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
out_msg.Sender := machineID;
}
}
}</p>
<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>
Next, we specify actions for sending data to the directory and sending
an invalidation ack to the original requestor on a forward request when
this cache does not have the data.
``` {.sourceCode .c++}
action(sendCacheDataToDir, "cdD", desc="Send the cache data to the dir") {
enqueue(response_out, ResponseMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:Data;
out_msg.Destination.add(mapAddressToMachine(address,
MachineType:Directory));
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
out_msg.Sender := machineID;
}
}
action(sendInvAcktoReq, "iaR", desc="Send inv-ack to requestor") {
peek(forward_in, RequestMsg) {
enqueue(response_out, ResponseMsg, 1) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:InvAck;
out_msg.Destination.add(in_msg.Requestor);
out_msg.DataBlk := cache_entry.DataBlk;
out_msg.MessageSize := MessageSizeType:Control;
out_msg.Sender := machineID;
}
}
}
</code></pre></div></div>
<p>Another required action is to decrement the number of acks we are
waiting for. This is used when we get a invalidation ack from another
cache to track the total number of acks. For this action, we assume that
there is a valid TBE and modify the implicit <code class="highlighter-rouge">tbe</code> variable in the
action block.</p>
<p>Additionally, we have another example of making debugging easier in
protocols: <code class="highlighter-rouge">APPEND_TRANSITION_COMMENT</code>. This function takes a string, or
something that can easily be converted to a string (e.g., <code class="highlighter-rouge">int</code>) as a
parameter. It modifies the <em>protocol trace</em> output, which we will
discuss in the debugging section &lt;MSI-debugging-section&gt;. On each
protocol trace line that executes this action it will print the total
number of acks this cache is still waiting on. This is useful since the
number of remaining acks is part of the cache block state.</p>
<p>``` {.sourceCode .c++}
action(decrAcks, “da”, desc=”Decrement the number of acks”) {
assert(is_valid(tbe));
tbe.AcksOutstanding := tbe.AcksOutstanding - 1;
APPEND_TRANSITION_COMMENT(“Acks: “);
APPEND_TRANSITION_COMMENT(tbe.AcksOutstanding);
}</p>
<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>
We also need an action to store the acks when we receive a message from
the directory with an ack count. For this action, we peek into the
directory's response message to get the number of acks and store them in
the (required to be valid) TBE.
``` {.sourceCode .c++}
action(storeAcks, "sa", desc="Store the needed acks to the TBE") {
assert(is_valid(tbe));
peek(response_in, ResponseMsg) {
tbe.AcksOutstanding := in_msg.Acks + tbe.AcksOutstanding;
}
assert(tbe.AcksOutstanding &gt; 0);
}
</code></pre></div></div>
<p>The next set of actions are to respond to CPU requests on hits and
misses. For these actions, we need to notify the sequencer (the
interface between Ruby and the rest of gem5) of the new data. In the
case of a store, we give the sequencer a pointer to the data block and
the sequencer updates the data in-place.</p>
<p>``` {.sourceCode .c++}
action(loadHit, “Lh”, desc=”Load hit”) {
assert(is_valid(cache_entry));
cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false);
}</p>
<p>action(externalLoadHit, “xLh”, desc=”External load hit (was a miss)”) {
assert(is_valid(cache_entry));
peek(response_in, ResponseMsg) {
cacheMemory.setMRU(cache_entry);
// Forward the type of machine that responded to this request
// E.g., another cache or the directory. This is used for tracking
// statistics.
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
}</p>
<p>action(storeHit, “Sh”, desc=”Store hit”) {
assert(is_valid(cache_entry));
cacheMemory.setMRU(cache_entry);
// The same as the read callback above.
sequencer.writeCallback(address, cache_entry.DataBlk, false);
}</p>
<p>action(externalStoreHit, “xSh”, desc=”External store hit (was a miss)”) {
assert(is_valid(cache_entry));
peek(response_in, ResponseMsg) {
cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true,
// Note: this could be the last ack.
machineIDToMachineType(in_msg.Sender));
}
}</p>
<p>action(forwardEviction, “e”, desc=”sends eviction notification to CPU”) {
if (send_evictions) {
sequencer.evictionCallback(address);
}
}</p>
<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>
In each of these actions, it is vital that we call `setMRU` on the cache
entry. The `setMRU` function is what allows the replacement policy to
know which blocks are most recently accessed. If you leave out the
`setMRU` call, the replacement policy will not operate correctly!
On loads and stores, we call the `read/writeCallback` function on the
`sequencer`. This notifies the sequencer of the new data or allows it to
write the data into the data block. These functions take four parameters
(the last parameter is optional): address, data block, a boolean for if
the original request was a miss, and finally, an optional `MachineType`.
The final optional parameter is used for tracking statistics on where
the data for the request was found. It allows you to track whether the
data comes from cache-to-cache transfers or from memory.
Finally, we also have an action to forward evictions to the CPU. This is
required for gem5's out-of-order models to squash speculative loads if
the cache block is evicted before the load is committed. We use the
parameter specified at the top of the state machine file to check if
this is needed or not.
Next, we have a set of cache management actions that allocate and free
cache entries and TBEs. To create a new cache entry, we must have space
in the `CacheMemory` object. Then, we can call the `allocate` function.
This allocate function doesn't actually allocate the host memory for the
cache entry since this controller specialized the `Entry` type, which is
why we need to pass a `new Entry` to the `allocate` function.
Additionally, in these actions we call `set_cache_entry`,
`unset_cache_entry`, and similar functions for the TBE. These set and
unset the implicit variables that were passed in via the `trigger`
function. For instance, when allocating a new cache block, we call
`set_cache_entry` and in all actions proceeding `allocateCacheBlock` the
`cache_entry` variable will be valid.
There is also an action that copies the data from the cache data block
to the TBE. This allows us to keep the data around even after removing
the cache block until we are sure that this cache no longer are
responsible for the data.
``` {.sourceCode .c++}
action(allocateCacheBlock, "a", desc="Allocate a cache block") {
assert(is_invalid(cache_entry));
assert(cacheMemory.cacheAvail(address));
set_cache_entry(cacheMemory.allocate(address, new Entry));
}
action(deallocateCacheBlock, "d", desc="Deallocate a cache block") {
assert(is_valid(cache_entry));
cacheMemory.deallocate(address);
// clear the cache_entry variable (now it's invalid)
unset_cache_entry();
}
action(writeDataToCache, "wd", desc="Write data to the cache") {
peek(response_in, ResponseMsg) {
assert(is_valid(cache_entry));
cache_entry.DataBlk := in_msg.DataBlk;
}
}
action(allocateTBE, "aT", desc="Allocate TBE") {
assert(is_invalid(tbe));
TBEs.allocate(address);
// this updates the tbe variable for other actions
set_tbe(TBEs[address]);
}
action(deallocateTBE, "dT", desc="Deallocate TBE") {
assert(is_valid(tbe));
TBEs.deallocate(address);
// this makes the tbe variable invalid
unset_tbe();
}
action(copyDataFromCacheToTBE, "Dct", desc="Copy data from cache to TBE") {
assert(is_valid(cache_entry));
assert(is_valid(tbe));
tbe.DataBlk := cache_entry.DataBlk;
}
</code></pre></div></div>
<p>The next set of actions are for managing the message buffers. We need to
add actions to pop the head message off of the buffers after the message
has been satisfied. The <code class="highlighter-rouge">dequeue</code> function takes a single parameter, a
time for the dequeue to take place. Delaying the dequeue for a cycle
prevents the <code class="highlighter-rouge">in_port</code> logic from consuming another message from the
same message buffer in a single cycle.</p>
<p>``` {.sourceCode .c++}
action(popMandatoryQueue, “pQ”, desc=”Pop the mandatory queue”) {
mandatory_in.dequeue(clockEdge());
}</p>
<p>action(popResponseQueue, “pR”, desc=”Pop the response queue”) {
response_in.dequeue(clockEdge());
}</p>
<p>action(popForwardQueue, “pF”, desc=”Pop the forward queue”) {
forward_in.dequeue(clockEdge());
}</p>
<div class="highlighter-rouge"><div class="highlight"><pre class="highlight"><code>
Finally, the last action is a stall. Below, we are using a "z\_stall",
which is the simplest kind of stall in SLICC. By leaving the action
blank, it generates a "protocol stall" in the `in_port` logic which
stalls all messages from being processed in the current message buffer
and all lower priority message buffer. Protocols using "z\_stall" are
usually simpler, but lower performance since a stall on a high priority
buffer can stall many requests that may not need to be stalled.
``` {.sourceCode .c++}
action(stall, "z", desc="Stall the incoming request") {
// z_stall
}
</code></pre></div></div>
<p>There are two other ways to deal with messages that cannot currently be
processed that can improve the performance of protocols. (Note: We will
not be using these more complicated techniques in this simple example
protocol.) The first is <code class="highlighter-rouge">recycle</code>. The message buffers have a <code class="highlighter-rouge">recycle</code>
function that moves the request on the head of the queue to the tail.
This allows other requests in the buffer or requests in other buffers to
be processed immediately. <code class="highlighter-rouge">recycle</code> actions often improve the
performance of protocols significantly.</p>
<p>However, <code class="highlighter-rouge">recycle</code> is not very realistic when compared to real
implementations of cache coherence. For a more realistic
high-performance solution to stalling messages, Ruby provides the
<code class="highlighter-rouge">stall_and_wait</code> function on message buffers. This function takes the
head request and moves it into a separate structure tagged by an
address. The address is user-specified, but is usually the request’s
address. Later, when the blocked request can be handled, there is
another function <code class="highlighter-rouge">wakeUpBuffers(address)</code> which will wake up all
requests stalled on <code class="highlighter-rouge">address</code> and <code class="highlighter-rouge">wakeUpAllBuffers()</code> that wakes up all
of the stalled requests. When a request is “woken up” it is placed back
into the message buffer to be subsequently processed.</p>
<br>
<!-- RETRIVE PREVIOUS PAGE LINK -->
<!-- RETRIEVE NEXT PAGE LINK -->
<div class="navbuttons">
<a href="/cache-in-ports"><button type="button" class="btn btn-outline-primary">PREVIOUS</button></a>
<a href="/cache-transitions"><button type="button" class="btn btn-outline-primary">NEXT</button></a>
</div>
</div>
</main>
<script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.14.3/umd/popper.min.js" integrity="sha384-ZMP7rVo3mIykV+2+9J3UJ46jBk0WLaUAdn689aCwoqbBJiSnjAK/l8WvCWPIPm49" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js" integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy" crossorigin="anonymous"></script>
<script src="https://unpkg.com/commentbox.io/dist/commentBox.min.js"></script>
<script>
// When the user scrolls down 20px from the top of the document, show the button
window.onscroll = function() {scrollFunction()};
function scrollFunction() {
if (document.body.scrollTop > 100 || document.documentElement.scrollTop > 20) {
document.getElementById("myBtn").style.display = "block";
} else {
document.getElementById("myBtn").style.display = "none";
}
}
// When the user clicks on the button, scroll to the top of the document
function topFunction() {
document.body.scrollTop = 0;
document.documentElement.scrollTop = 0;
}
import commentBox from 'commentbox.io';
// or
const commentBox = require('commentbox.io');
// or if using the CDN, it will be available as a global "commentBox" variable.
commentBox('my-project-id');
</script>
</body>
</html>