summaryrefslogtreecommitdiff
path: root/src/learning_gem5/part3/MSI-cache.sm
blob: 3847b53a854ff49f0db274584b95bb89422f94f1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
/*
 * Copyright (c) 2017 Jason Lowe-Power
 * All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met: redistributions of source code must retain the above copyright
 * notice, this list of conditions and the following disclaimer;
 * redistributions in binary form must reproduce the above copyright
 * notice, this list of conditions and the following disclaimer in the
 * documentation and/or other materials provided with the distribution;
 * neither the name of the copyright holders nor the names of its
 * contributors may be used to endorse or promote products derived from
 * this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */

/**
 * This file contains a simple example MSI protocol.
 *
 * The protocol in this file is based off of the MSI protocol found in
 * A Primer on Memory Consistency and Cache Coherence
 *      Daniel J. Sorin, Mark D. Hill, and David A. Wood
 *      Synthesis Lectures on Computer Architecture 2011 6:3, 141-149
 *
 * Table 8.1 contains the transitions and actions found in this file and
 * section 8.2.4 explains the protocol in detail.
 *
 * See Learning gem5 Part 3: Ruby for more details.
 *
 * Authors: Jason Lowe-Power
 */

/// Declare a machine with type L1Cache.
machine(MachineType:L1Cache, "MSI cache")
    : Sequencer *sequencer; // Incoming request from CPU come from this
      CacheMemory *cacheMemory; // This stores the data and cache states
      bool send_evictions; // Needed to support O3 CPU and mwait

      // Other declarations
      // Message buffers are required to send and receive data from the Ruby
      // network. The from/to and request/response can be confusing!
      // Virtual networks are needed to prevent deadlock (e.g., it is bad if a
      // response gets stuck behind a stalled request). In this protocol, we are
      // using three virtual networks. The highest priority is responses,
      // followed by forwarded requests, then requests have the lowest priority.

      // Requests *to* the directory
      MessageBuffer * requestToDir, network="To", virtual_network="0",
            vnet_type="request";
      // Responses *to* the directory or other caches
      MessageBuffer * responseToDirOrSibling, network="To", virtual_network="2",
            vnet_type="response";

      // Requests *from* the directory for fwds, invs, and put acks.
      MessageBuffer * forwardFromDir, network="From", virtual_network="1",
            vnet_type="forward";
      // Responses *from* directory and other caches for this cache's reqs.
      MessageBuffer * responseFromDirOrSibling, network="From",
            virtual_network="2", vnet_type="response";

      // This is all of the incoming requests from the core via the sequencer
      MessageBuffer * mandatoryQueue;
{
    // Declare the states that this cache will use. These are both stable
    // states (no underscore) and transient states (with underscore). Letters
    // after the underscores are superscript in Sorin et al.
    // Underscores and "desc" are used when generating HTML tables.
    // Access permissions are used for functional accesses. For reads, the
    // functional access reads *all* of the blocks with a matching address that
    // have read-only or read-write permission. For functional writes, all
    // blocks are updated with new data if they have busy, read-only, or
    // read-write permission.
    state_declaration(State, desc="Cache states") {
        I,      AccessPermission:Invalid,
                    desc="Not present/Invalid";

        // States moving out of I
        IS_D,   AccessPermission:Invalid,
                    desc="Invalid, moving to S, waiting for data";
        IM_AD,  AccessPermission:Invalid,
                    desc="Invalid, moving to M, waiting for acks and data";
        IM_A,   AccessPermission:Busy,
                    desc="Invalid, moving to M, waiting for acks";

        S,      AccessPermission:Read_Only,
                    desc="Shared. Read-only, other caches may have the block";

        // States moving out of S
        SM_AD,  AccessPermission:Read_Only,
                    desc="Shared, moving to M, waiting for acks and 'data'";
        SM_A,   AccessPermission:Read_Only,
                    desc="Shared, moving to M, waiting for acks";

        M,      AccessPermission:Read_Write,
                    desc="Modified. Read & write permissions. Owner of block";

        // States moving to Invalid
        MI_A,   AccessPermission:Busy,
                    desc="Was modified, moving to I, waiting for put ack";
        SI_A,   AccessPermission:Busy,
                    desc="Was shared, moving to I, waiting for put ack";
        II_A,   AccessPermission:Invalid,
                    desc="Sent valid data before receiving put ack. ";
                         //"Waiting for put ack.";
    }

    // Events that can be triggered on incoming messages. These are the events
    // that will trigger transitions
    enumeration(Event, desc="Cache events") {
        // From the processor/sequencer/mandatory queue
        Load,           desc="Load from processor";
        Store,          desc="Store from processor";

        // Internal event (only triggered from processor requests)
        Replacement,    desc="Triggered when block is chosen as victim";

        // Forwarded reqeust from other cache via dir on the forward network
        FwdGetS,        desc="Directory sent us a request to satisfy GetS. ";
                             //"We must have the block in M to respond to this.";
        FwdGetM,        desc="Directory sent us a request to satisfy GetM. ";
                             //"We must have the block in M to respond to this.";
        Inv,            desc="Invalidate from the directory.";
        PutAck,         desc="Response from directory after we issue a put. ";
                             //"This must be on the fwd network to avoid";
                             //"deadlock.";

        // Responses from directory
        DataDirNoAcks,  desc="Data from directory (acks = 0)";
        DataDirAcks,    desc="Data from directory (acks > 0)";

        // Responses from other caches
        DataOwner,      desc="Data from owner";
        InvAck,         desc="Invalidation ack from other cache after Inv";

        // Special internally triggered event to simplify implementation
        LastInvAck,     desc="Triggered after the last ack is received";
    }

    // A structure for the cache entry. This stores the cache data and state
    // as defined above. You can put any other information here you like.
    // The AbstractCacheEntry is defined in
    // src/mem/ruby/slic_interface/AbstractCacheEntry.hh
    // If you want to use any of the functions in the abstract entry declare
    // them here.
    structure(Entry, desc="Cache entry", interface="AbstractCacheEntry") {
        State CacheState,        desc="cache state";
        DataBlock DataBlk,       desc="Data in the block";
    }

    // TBE is the "transaction buffer entry". This stores information needed
    // during transient states. This is *like* an MSHR. It functions as an MSHR
    // in this protocol, but the entry is also allocated for other uses.
    structure(TBE, desc="Entry for transient requests") {
        State TBEState,         desc="State of block";
        DataBlock DataBlk,      desc="Data for the block. Needed for MI_A";
        int AcksOutstanding, default=0, desc="Number of acks left to receive.";
    }

    // Table of TBE entries. This is defined externally in
    // src/mem/ruby/structures/TBETable.hh. It is templatized on the TBE
    // structure defined above.
    structure(TBETable, external="yes") {
      TBE lookup(Addr);
      void allocate(Addr);
      void deallocate(Addr);
      bool isPresent(Addr);
    }

    /*************************************************************************/
    // Some declarations of member functions and member variables.

    // The TBE table for this machine. It is templatized under the covers.
    // NOTE: SLICC mangles names with the machine type. Thus, the TBE declared
    //       above will be L1Cache_TBE in C++.
    // We also have to pass through a parameter to the machine to the TBETable.
    TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";

    // Declare all of the functions of the AbstractController that we may use
    // in this file.
    // Functions from clocked object
    Tick clockEdge();

    // Functions we must use to set things up for the transitions to execute
    // correctly.
    // These next set/unset functions are used to populate the implicit
    // variables used in actions. This is required when a transition has
    // multiple actions.
    void set_cache_entry(AbstractCacheEntry a);
    void unset_cache_entry();
    void set_tbe(TBE b);
    void unset_tbe();

    // Given an address and machine type this queries the network to check
    // where it should be sent. In a real implementation, this might be fixed
    // at design time, but this function gives us flexibility at runtime.
    // For example, if you have multiple memory channels, this function will
    // tell you which addresses to send to which memory controller.
    MachineID mapAddressToMachine(Addr addr, MachineType mtype);

    // Convience function to look up the cache entry.
    // Needs a pointer so it will be a reference and can be updated in actions
    Entry getCacheEntry(Addr address), return_by_pointer="yes" {
        return static_cast(Entry, "pointer", cacheMemory.lookup(address));
    }

    /*************************************************************************/
    // Functions that we need to define/override to use our specific structures
    // in this implementation.

    // Required function for getting the current state of the block.
    // This is called from the transition to know which transition to execute
    State getState(TBE tbe, Entry cache_entry, Addr addr) {
        // The TBE state will override the state in cache memory, if valid
        if (is_valid(tbe)) { return tbe.TBEState; }
        // Next, if the cache entry is valid, it holds the state
        else if (is_valid(cache_entry)) { return cache_entry.CacheState; }
        // If the block isn't present, then it's state must be I.
        else { return State:I; }
    }


    // Required function for setting the current state of the block.
    // This is called from the transition to set the ending state.
    // Needs to set both the TBE and the cache entry state.
    // This is also called when transitioning to I so it's possible the TBE and/
    // or the cache_entry is invalid.
    void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
      if (is_valid(tbe)) { tbe.TBEState := state; }
      if (is_valid(cache_entry)) { cache_entry.CacheState := state; }
    }

    // Required function to override. Used for functional access to know where
    // the valid data is. NOTE: L1Cache_State_to_permission is automatically
    // created based on the access permissions in the state_declaration.
    // This is mangled by both the MachineType and the name of the state
    // declaration ("State" in this case)
    AccessPermission getAccessPermission(Addr addr) {
        TBE tbe := TBEs[addr];
        if(is_valid(tbe)) {
            return L1Cache_State_to_permission(tbe.TBEState);
        }

        Entry cache_entry := getCacheEntry(addr);
        if(is_valid(cache_entry)) {
            return L1Cache_State_to_permission(cache_entry.CacheState);
        }

        return AccessPermission:NotPresent;
    }

    // Required function to override. Like above function, but sets thte state.
    void setAccessPermission(Entry cache_entry, Addr addr, State state) {
        if (is_valid(cache_entry)) {
            cache_entry.changePermission(L1Cache_State_to_permission(state));
        }
    }

    // Required function to override for functionally reading/writing data.
    // NOTE: testAndRead/Write defined in src/mem/ruby/slicc_interface/Util.hh
    void functionalRead(Addr addr, Packet *pkt) {
        TBE tbe := TBEs[addr];
        if(is_valid(tbe)) {
            testAndRead(addr, tbe.DataBlk, pkt);
        } else {
            testAndRead(addr, getCacheEntry(addr).DataBlk, pkt);
        }
    }

    int functionalWrite(Addr addr, Packet *pkt) {
        TBE tbe := TBEs[addr];
        if(is_valid(tbe)) {
            if (testAndWrite(addr, tbe.DataBlk, pkt)) {
                return 1;
            } else {
                return 0;
            }
        } else {
            if (testAndWrite(addr, getCacheEntry(addr).DataBlk, pkt)) {
                return 1;
            } else {
                return 0;
            }
        }
    }

    /*************************************************************************/
    // Input/output network definitions

    // Output ports. This defines the message types that will flow ocross the
    // output buffers as defined above. These must be "to" networks.
    // "request_out" is the name we'll use later to send requests.
    // "RequestMsg" is the message type we will send (see MSI-msg.sm)
    // "requestToDir" is the name of the MessageBuffer declared above that
    //      we are sending these requests out of.
    out_port(request_out, RequestMsg, requestToDir);
    out_port(response_out, ResponseMsg, responseToDirOrSibling);

    // Input ports. The order here is/(can be) important. The code in each
    // in_port is executed in the order specified in this file (or by the rank
    // parameter). Thus, we must sort these based on the network priority.
    // In this cache, the order is responses from other caches, forwards, then
    // requests from the CPU.

    // Like the out_port above
    // "response_in" is the name we'll use later when we refer to this port
    // "ResponseMsg" is the type of message we expect on this port
    // "responseFromDirOrSibling" is the name of the buffer this in_port is
    // connected to for responses from other caches and the directory.
    in_port(response_in, ResponseMsg, responseFromDirOrSibling) {
        // NOTE: You have to check to make sure the message buffer has a valid
        // message at the head. The code in in_port is executed either way.
        if (response_in.isReady(clockEdge())) {
            // Peek is a special function. Any code inside a peek statement has
            // a special variable declared and populated: in_msg. This contains
            // the message (of type RequestMsg in this case) at the head.
            // "forward_in" is the port we want to peek into
            // "RequestMsg" is the type of message we expect.
            peek(response_in, ResponseMsg) {
                // Grab the entry and tbe if they exist.
                Entry cache_entry := getCacheEntry(in_msg.addr);
                TBE tbe := TBEs[in_msg.addr];
                // The TBE better exist since this is a response and we need to
                // be able to check the remaining acks.
                assert(is_valid(tbe));

                // If it's from the directory...
                if (machineIDToMachineType(in_msg.Sender) ==
                            MachineType:Directory) {
                    if (in_msg.Type != CoherenceResponseType:Data) {
                        error("Directory should only reply with data");
                    }
                    // Take the in_msg acks and add (sub) the Acks we've seen.
                    // The InvAck will decrement the acks we're waiting for in
                    // tbe.AcksOutstanding to below 0 if we haven't gotten the
                    // dir resp yet. So, if this is 0 we don't need to wait
                    assert(in_msg.Acks + tbe.AcksOutstanding >= 0);
                    if (in_msg.Acks + tbe.AcksOutstanding == 0) {
                        trigger(Event:DataDirNoAcks, in_msg.addr, cache_entry,
                                tbe);
                    } else {
                        // If it's not 0, then we need to wait for more acks
                        // and we'll trigger LastInvAck later.
                        trigger(Event:DataDirAcks, in_msg.addr, cache_entry,
                                tbe);
                    }
                } else {
                    // This is from another cache.
                    if (in_msg.Type == CoherenceResponseType:Data) {
                        trigger(Event:DataOwner, in_msg.addr, cache_entry,
                                tbe);
                    } else if (in_msg.Type == CoherenceResponseType:InvAck) {
                        DPRINTF(RubySlicc, "Got inv ack. %d left\n",
                                tbe.AcksOutstanding);
                        if (tbe.AcksOutstanding == 1) {
                            // If there is exactly one ack remaining then we
                            // know it is the last ack.
                            trigger(Event:LastInvAck, in_msg.addr, cache_entry,
                                    tbe);
                        } else {
                            trigger(Event:InvAck, in_msg.addr, cache_entry,
                                    tbe);
                        }
                    } else {
                        error("Unexpected response from other cache");
                    }
                }
            }
        }
    }

    // Forward requests for other caches.
    in_port(forward_in, RequestMsg, forwardFromDir) {
        if (forward_in.isReady(clockEdge())) {
            peek(forward_in, RequestMsg) {
                // Grab the entry and tbe if they exist.
                Entry cache_entry := getCacheEntry(in_msg.addr);
                TBE tbe := TBEs[in_msg.addr];

                if (in_msg.Type == CoherenceRequestType:GetS) {
                    // This is a special function that will trigger a
                    // transition (as defined below). It *must* have these
                    // parameters.
                    trigger(Event:FwdGetS, in_msg.addr, cache_entry, tbe);
                } else if (in_msg.Type == CoherenceRequestType:GetM) {
                    trigger(Event:FwdGetM, in_msg.addr, cache_entry, tbe);
                } else if (in_msg.Type == CoherenceRequestType:Inv) {
                    trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
                } else if (in_msg.Type == CoherenceRequestType:PutAck) {
                    trigger(Event:PutAck, in_msg.addr, cache_entry, tbe);
                } else {
                    error("Unexpected forward message!");
                }
            }
        }
    }

    // The "mandatory queue" is the port/queue from the CPU or other processor.
    // This is *always* a RubyRequest
    in_port(mandatory_in, RubyRequest, mandatoryQueue) {
        if (mandatory_in.isReady(clockEdge())) {
            // Block all requests if there is already an outstanding request
            // that has the same line address. This is unblocked when we
            // finally respond to the request.
            peek(mandatory_in, RubyRequest, block_on="LineAddress") {
                // NOTE: Using LineAddress here to promote smaller requests to
                // full cache block requests.
                Entry cache_entry := getCacheEntry(in_msg.LineAddress);
                TBE tbe := TBEs[in_msg.LineAddress];
                // If there isn't a matching entry and no room in the cache,
                // then we need to find a victim.
                if (is_invalid(cache_entry) &&
                        cacheMemory.cacheAvail(in_msg.LineAddress) == false ) {
                    // make room for the block
                    // The "cacheProbe" function looks at the cache set for
                    // the address and queries the replacement protocol for
                    // the address to replace. It returns the address to repl.
                    Addr addr := cacheMemory.cacheProbe(in_msg.LineAddress);
                    Entry victim_entry := getCacheEntry(addr);
                    TBE victim_tbe := TBEs[addr];
                    trigger(Event:Replacement, addr, victim_entry, victim_tbe);
                } else {
                    if (in_msg.Type == RubyRequestType:LD ||
                            in_msg.Type == RubyRequestType:IFETCH) {
                        trigger(Event:Load, in_msg.LineAddress, cache_entry,
                                tbe);
                    } else if (in_msg.Type == RubyRequestType:ST) {
                        trigger(Event:Store, in_msg.LineAddress, cache_entry,
                                tbe);
                    } else {
                        error("Unexpected type from processor");
                    }
                }
            }
        }
    }


    /*************************************************************************/
    // Below are all of the actions that might be taken on a transition.

    // Each actions has a name, a shorthand, and a description.
    // The shorthand is used when generating the HTML tables for the protocol.
    // "\" in the shorthand cause that letter to be bold. Underscores insert a
    // space, ^ makes the rest of the letters superscript.
    // The description is also shown in the HTML table when clicked

    // The first set of actions are things we will do to interact with the
    // rest of the system. Things like sending requests/responses.

    // Action blocks define a number of implicit variables that are useful.
    // These variables come straight from the trigger() call in the in_port
    // blocks.
    // address: The address passed in the trigger (usually the in_msg.addr,
    //          though it can be different. E.g., on a replacement it is the
    //          victim address).
    // cache_entry: The cache entry passed in the trigger call
    // tbe: The TBE passed in the trigger call
    action(sendGetS, 'gS', desc="Send GetS to the directory") {
        // The syntax for enqueue is a lot like peek. Instead of populating
        // in_msg, enqueue has an out_msg reference. Whatever you set on out_msg
        // is sent through the out port specified.  "request_out" is the port
        // we're sending the message out of "RequestMsg" is the type of message
        // we're sending "1" is the latency (in cycles) the port waits before
        // sending the message.
        enqueue(request_out, RequestMsg, 1) {
            out_msg.addr := address;
            // This type is defined in MSI-msg.sm for this protocol.
            out_msg.Type := CoherenceRequestType:GetS;
            // The destination may change depending on the address striping
            // across different directories, so query the network.
            out_msg.Destination.add(mapAddressToMachine(address,
                                    MachineType:Directory));
            // See mem/protocol/RubySlicc_Exports.sm for possible sizes.
            out_msg.MessageSize := MessageSizeType:Control;
            // Set that the reqeustor is this machine so we get the response.
            out_msg.Requestor := machineID;
        }
    }

    action(sendGetM, "gM", desc="Send GetM to the directory") {
        enqueue(request_out, RequestMsg, 1) {
            out_msg.addr := address;
            out_msg.Type := CoherenceRequestType:GetM;
            out_msg.Destination.add(mapAddressToMachine(address,
                                    MachineType:Directory));
            out_msg.MessageSize := MessageSizeType:Control;
            out_msg.Requestor := machineID;
        }
    }

    // NOTE: Clean evict. Required to keep the directory state up-to-date
    action(sendPutS, "pS", desc="Send PutS to the directory") {
        enqueue(request_out, RequestMsg, 1) {
            out_msg.addr := address;
            out_msg.Type := CoherenceRequestType:PutS;
            out_msg.Destination.add(mapAddressToMachine(address,
                                    MachineType:Directory));
            out_msg.MessageSize := MessageSizeType:Control;
            out_msg.Requestor := machineID;
        }
    }

    action(sendPutM, "pM", desc="Send putM+data to the directory") {
        enqueue(request_out, RequestMsg, 1) {
            out_msg.addr := address;
            out_msg.Type := CoherenceRequestType:PutM;
            out_msg.Destination.add(mapAddressToMachine(address,
                                    MachineType:Directory));
            out_msg.DataBlk := cache_entry.DataBlk;
            out_msg.MessageSize := MessageSizeType:Data;
            out_msg.Requestor := machineID;
        }
    }

    action(sendCacheDataToReq, "cdR", desc="Send cache data to requestor") {
        // We have to peek into the request to see who to send to.
        // If we are in both the peek and the enqueue block then we have access
        // to both in_msg and out_msg.
        assert(is_valid(cache_entry));
        peek(forward_in, RequestMsg) {
            enqueue(response_out, ResponseMsg, 1) {
                out_msg.addr := address;
                out_msg.Type := CoherenceResponseType:Data;
                out_msg.Destination.add(in_msg.Requestor);
                out_msg.DataBlk := cache_entry.DataBlk;
                out_msg.MessageSize := MessageSizeType:Data;
                out_msg.Sender := machineID;
            }
        }
    }

    action(sendCacheDataToDir, "cdD", desc="Send the cache data to the dir") {
        enqueue(response_out, ResponseMsg, 1) {
            out_msg.addr := address;
            out_msg.Type := CoherenceResponseType:Data;
            out_msg.Destination.add(mapAddressToMachine(address,
                                    MachineType:Directory));
            out_msg.DataBlk := cache_entry.DataBlk;
            out_msg.MessageSize := MessageSizeType:Data;
            out_msg.Sender := machineID;
        }
    }

    action(sendInvAcktoReq, "iaR", desc="Send inv-ack to requestor") {
        peek(forward_in, RequestMsg) {
            enqueue(response_out, ResponseMsg, 1) {
                out_msg.addr := address;
                out_msg.Type := CoherenceResponseType:InvAck;
                out_msg.Destination.add(in_msg.Requestor);
                out_msg.DataBlk := cache_entry.DataBlk;
                out_msg.MessageSize := MessageSizeType:Control;
                out_msg.Sender := machineID;
            }
        }
    }

    action(decrAcks, "da", desc="Decrement the number of acks") {
        assert(is_valid(tbe));
        tbe.AcksOutstanding := tbe.AcksOutstanding - 1;
        // This annotates the protocol trace
        APPEND_TRANSITION_COMMENT("Acks: ");
        APPEND_TRANSITION_COMMENT(tbe.AcksOutstanding);
    }

    action(storeAcks, "sa", desc="Store the needed acks to the TBE") {
        assert(is_valid(tbe));
        peek(response_in, ResponseMsg) {
            tbe.AcksOutstanding := in_msg.Acks + tbe.AcksOutstanding;
        }
        assert(tbe.AcksOutstanding > 0);
    }

    // Responses to CPU requests (e.g., hits and store acks)

    action(loadHit, "Lh", desc="Load hit") {
        assert(is_valid(cache_entry));
        // Set this entry as the most recently used for the replacement policy
        cacheMemory.setMRU(cache_entry);
        // Send the data back to the sequencer/CPU. NOTE: False means it was
        // not an "external hit", but hit in this local cache.
        sequencer.readCallback(address, cache_entry.DataBlk, false);
    }

    action(externalLoadHit, "xLh", desc="External load hit (was a miss)") {
        assert(is_valid(cache_entry));
        peek(response_in, ResponseMsg) {
            cacheMemory.setMRU(cache_entry);
            // Forward the type of machine that responded to this request
            // E.g., another cache or the directory. This is used for tracking
            // statistics.
            sequencer.readCallback(address, cache_entry.DataBlk, true,
                                   machineIDToMachineType(in_msg.Sender));
        }
    }

    action(storeHit, "Sh", desc="Store hit") {
        assert(is_valid(cache_entry));
        cacheMemory.setMRU(cache_entry);
        // The same as the read callback above.
        sequencer.writeCallback(address, cache_entry.DataBlk, false);
    }

    action(externalStoreHit, "xSh", desc="External store hit (was a miss)") {
        assert(is_valid(cache_entry));
        peek(response_in, ResponseMsg) {
            cacheMemory.setMRU(cache_entry);
            sequencer.writeCallback(address, cache_entry.DataBlk, true,
                                   // Note: this could be the last ack.
                                   machineIDToMachineType(in_msg.Sender));
        }
    }

    action(forwardEviction, "e", desc="sends eviction notification to CPU") {
        if (send_evictions) {
            sequencer.evictionCallback(address);
        }
    }

    // Cache management actions

    action(allocateCacheBlock, "a", desc="Allocate a cache block") {
        assert(is_invalid(cache_entry));
        assert(cacheMemory.cacheAvail(address));
        // Create a new entry and update cache_entry to the new entry
        set_cache_entry(cacheMemory.allocate(address, new Entry));
    }

    action(deallocateCacheBlock, "d", desc="Deallocate a cache block") {
        assert(is_valid(cache_entry));
        cacheMemory.deallocate(address);
        // clear the cache_entry variable (now it's invalid)
        unset_cache_entry();
    }

    action(writeDataToCache, "wd", desc="Write data to the cache") {
        peek(response_in, ResponseMsg) {
            assert(is_valid(cache_entry));
            cache_entry.DataBlk := in_msg.DataBlk;
        }
    }

    action(allocateTBE, "aT", desc="Allocate TBE") {
        assert(is_invalid(tbe));
        TBEs.allocate(address);
        // this updates the tbe variable for other actions
        set_tbe(TBEs[address]);
    }

    action(deallocateTBE, "dT", desc="Deallocate TBE") {
        assert(is_valid(tbe));
        TBEs.deallocate(address);
        // this makes the tbe varible invalid
        unset_tbe();
    }

    // Queue management actions

    action(popMandatoryQueue, "pQ", desc="Pop the mandatory queue") {
        mandatory_in.dequeue(clockEdge());
    }

    action(popResponseQueue, "pR", desc="Pop the response queue") {
        response_in.dequeue(clockEdge());
    }

    action(popForwardQueue, "pF", desc="Pop the forward queue") {
        forward_in.dequeue(clockEdge());
    }

    // Stalling actions

    action(stall, "z", desc="Stall the incoming request") {
        // Do nothing. However, the transition must have some action to be
        // valid which is why this is needed.
        // NOTE: There are other more complicated but higher performing stalls
        // in Ruby like recycle() or stall_and_wait.
        // z_stall stalls everything in the queue behind this request.
    }


    /*************************************************************************/
    // These are the transition definition. These are simply each cell in the
    // table from Sorin et al. These are mostly in upper-left to bottom-right
    // order

    // Each transtiion has (up to) 3 parameters, the current state, the
    // triggering event and the final state. Thus, the below transition reads
    // "Move from state I on a Load event to state IS_D". Below are other
    // examples of transition statements.
    // Within the transition statement is a set of action to take during the
    // transition. These actions are executed atomically (i.e., all or nothing)
    transition(I, Load, IS_D) {
        // Make sure there is room in the cache to put the block whenever the
        // miss returns. Otherwise we could deadlock.
        allocateCacheBlock;
        // We may need to track acks for this block and only the TBE holds an
        // ack count. Thus, we need to allocate both a TBE and cache block.
        allocateTBE;
        // Actually send the request to the directory
        sendGetS;
        // Since we have handled this request on the mandatory queue, we can pop
        popMandatoryQueue;
    }

    transition(I, Store, IM_AD) {
        allocateCacheBlock;
        allocateTBE;
        sendGetM;
        popMandatoryQueue;
    }

    // You can use {} to specify multiple states or events for which the
    // transition applies. For instance, below. If we are in IS_D, then on any
    // of the following Events (Load, Store, Replacement, Inv) we should stall
    // When there is no third parameter to transition, it means that we want
    // to stay in the initial state.
    transition(IS_D, {Load, Store, Replacement, Inv}) {
        stall;
    }

    // Similarly, on either DataDirNoAcks or DataOwner we should go to S
    transition(IS_D, {DataDirNoAcks, DataOwner}, S) {
        writeDataToCache;
        deallocateTBE;
        externalLoadHit;
        popResponseQueue;
    }

    transition({IM_AD, IM_A}, {Load, Store, Replacement, FwdGetS, FwdGetM}) {
        stall;
    }

    transition({IM_AD, SM_AD}, {DataDirNoAcks, DataOwner}, M) {
        writeDataToCache;
        deallocateTBE;
        externalStoreHit;
        popResponseQueue;
    }

    transition(IM_AD, DataDirAcks, IM_A) {
        writeDataToCache;
        storeAcks;
        popResponseQueue;
    }

    transition({IM_AD, IM_A, SM_AD, SM_A}, InvAck) {
        decrAcks;
        popResponseQueue;
    }

    transition({IM_A, SM_A}, LastInvAck, M) {
        deallocateTBE;
        externalStoreHit;
        popResponseQueue;
    }

    transition({S, SM_AD, SM_A, M}, Load) {
        loadHit;
        popMandatoryQueue;
    }

    transition(S, Store, SM_AD) {
        allocateTBE;
        sendGetM;
        popMandatoryQueue;
    }

    transition(S, Replacement, SI_A) {
        sendPutS;
    }

    transition(S, Inv, I) {
        sendInvAcktoReq;
        forwardEviction;
        deallocateCacheBlock;
        popForwardQueue;
    }

    transition({SM_AD, SM_A}, {Store, Replacement, FwdGetS, FwdGetM}) {
        stall;
    }

    transition(SM_AD, Inv, IM_AD) {
        sendInvAcktoReq;
        popForwardQueue;
    }

    transition(SM_AD, DataDirAcks, SM_A) {
        writeDataToCache;
        storeAcks;
        popResponseQueue;
    }

    transition(M, Store) {
        storeHit;
        forwardEviction;
        popMandatoryQueue;
    }

    transition(M, Replacement, MI_A) {
        sendPutM;
    }

    transition(M, FwdGetS, S) {
        sendCacheDataToReq;
        sendCacheDataToDir;
        popForwardQueue;
    }

    transition(M, FwdGetM, I) {
        sendCacheDataToReq;
        deallocateCacheBlock;
        popForwardQueue;
    }

    transition({MI_A, SI_A, II_A}, {Load, Store, Replacement}) {
        stall;
    }

    transition(MI_A, FwdGetS, SI_A) {
        sendCacheDataToReq;
        sendCacheDataToDir;
        popForwardQueue;
    }

    transition(MI_A, FwdGetM, II_A) {
        sendCacheDataToReq;
        popForwardQueue;
    }

    transition({MI_A, SI_A, II_A}, PutAck, I) {
        deallocateCacheBlock;
        popForwardQueue;
    }

    transition(SI_A, Inv, II_A) {
        sendInvAcktoReq;
        popForwardQueue;
    }

}