summaryrefslogtreecommitdiff
path: root/src/mem/ruby/storebuffer
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2010-03-31 16:56:45 -0700
committerNathan Binkert <nate@binkert.org>2010-03-31 16:56:45 -0700
commitbe10204729c107b41d5d7487323c732e9fa09df5 (patch)
tree5c8f4001c490c4d777e8756e536cd2f2340c9ebb /src/mem/ruby/storebuffer
parent60ae1d2b10002bb73b420fce91c4b74397c55457 (diff)
downloadgem5-be10204729c107b41d5d7487323c732e9fa09df5.tar.xz
style: another ruby style pass
Diffstat (limited to 'src/mem/ruby/storebuffer')
-rw-r--r--src/mem/ruby/storebuffer/hfa.hh103
-rw-r--r--src/mem/ruby/storebuffer/hfatypes.hh80
-rw-r--r--src/mem/ruby/storebuffer/interface.cc67
-rw-r--r--src/mem/ruby/storebuffer/interface.hh46
-rw-r--r--src/mem/ruby/storebuffer/stb_interface.cc72
-rw-r--r--src/mem/ruby/storebuffer/stb_interface.hh20
-rw-r--r--src/mem/ruby/storebuffer/storebuffer.cc672
-rw-r--r--src/mem/ruby/storebuffer/storebuffer.hh163
8 files changed, 366 insertions, 857 deletions
diff --git a/src/mem/ruby/storebuffer/hfa.hh b/src/mem/ruby/storebuffer/hfa.hh
deleted file mode 100644
index abcd96495..000000000
--- a/src/mem/ruby/storebuffer/hfa.hh
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-// this code was modified to fit into Rochs
-
-#ifndef _HFA_H_
-#define _HFA_H_
-
-using namespace std;
-
-/*
- * Global include file for entire project.
- * Should be included first in all ".cc" project files
- */
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-#include "mem/ruby/common/Global.hh"
-#include <string>
-#include <map>
-#include <set>
-#include <list>
-#include <fstream>
-#include <iostream>
-
-#include <math.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h> // va_start(), va_end()
-#include <strings.h> // declaration of bzero()
-
-#include <sys/time.h> // gettimeofday() includes
-#include <errno.h>
-#include <unistd.h>
-
-/*------------------------------------------------------------------------*/
-/* Type Includes */
-/*------------------------------------------------------------------------*/
-
-#include "mem/ruby/storebuffer/hfatypes.hh"
-
-/*------------------------------------------------------------------------*/
-/* Forward class declaration(s) */
-/*------------------------------------------------------------------------*/
-
-class wait_list_t;
-class waiter_t;
-class free_list_t;
-class pipestate_t;
-class pipepool_t;
-
-
-/** Maximum size of a load or store that may occur to/from the memory system.
- * (in 64-bit quantities). Currently this is set to 8 * 64-bits = 64-bytes.
- */
-const uint32 MEMOP_MAX_SIZE = 8;
-
-/** 64-bit int memory masks */
-#define MEM_BYTE_MASK 0x00000000000000ffULL
-#define MEM_HALF_MASK 0x000000000000ffffULL
-#define MEM_WORD_MASK 0x00000000ffffffffULL
-#define MEM_EXTD_MASK 0xffffffffffffffffULL
-#define MEM_QUAD_MASK 0xffffffffffffffffULL
-
-#define ISEQ_MASK 0x0000ffffffffffffULL
-
-/*------------------------------------------------------------------------*/
-/* Configuration Parameters */
-/*------------------------------------------------------------------------*/
-
-#define SIM_HALT assert(0);
-
-#include <assert.h>
-
-#endif /* _HFA_H_ */
-
-
diff --git a/src/mem/ruby/storebuffer/hfatypes.hh b/src/mem/ruby/storebuffer/hfatypes.hh
deleted file mode 100644
index c4d0de2e6..000000000
--- a/src/mem/ruby/storebuffer/hfatypes.hh
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _HFATYPES_H_
-#define _HFATYPES_H_
-
-/*
- * Global include file for entire project.
- * Should be included first in all ".cc" project files
- */
-
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-/*------------------------------------------------------------------------*/
-/* SimIcs Includes */
-/*------------------------------------------------------------------------*/
-
-/* import C functions */
-
-
-/*------------------------------------------------------------------------*/
-/* Forward class declaration(s) */
-/*------------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------------*/
-/* Macro declarations */
-/*------------------------------------------------------------------------*/
-
-// definitions of MAX / MIN (if needed)
-#ifndef MAX
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
-#ifndef MIN
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#endif
-
-/* Statistics tracking definition */
-#define STAT_INC(A) (A)++
-
-/*------------------------------------------------------------------------*/
-/* Enumerations */
-/*------------------------------------------------------------------------*/
-
-/*------------------------------------------------------------------------*/
-/* Project Includes */
-/*------------------------------------------------------------------------*/
-
-typedef unsigned char byte_t; /* byte - 8 bits */
-typedef unsigned short half_t; /* half - 16 bits */
-typedef unsigned int word_t; /* word - 32 bits */
-typedef uint64 tick_t; /* time - 64 bit */
-
-#endif /* _HFATYPES_H_ */
diff --git a/src/mem/ruby/storebuffer/interface.cc b/src/mem/ruby/storebuffer/interface.cc
deleted file mode 100644
index 1ee6ee3a0..000000000
--- a/src/mem/ruby/storebuffer/interface.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "mem/ruby/libruby.hh"
-#include "writebuffer.hh"
-#include <iostream>
-
-writebuffer_status_t handleStore (writebuffer_t * writebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_ST);
- if (writebuffer->writeBufferFull()){
- return WB_FULL;
- }
- else if (writebuffer->writeBufferFlushing()) {
- return WB_FLUSHING;
- }
- else {
- writebuffer->addToWriteBuffer(request);
- return WB_OK;
- }
-}
-
-uint64_t handleLoad(writebuffer_t * writebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_LD);
- return writebuffer->handleLoad(request);
-}
-
-uint64_t handleAtomic(writebuffer_t * writebuffer, const RubyRequest & request) {
- // flush the store buffer
- writebuffer->flushWriteBuffer();
- // let writebuffer issue atomic
- //return writebuffer->issueAtomic(request);
-}
-
-void flushSTB(writebuffer_t * writebuffer) {
- // in in-order can't get a request to flushSTB if already flushing
- // on out of order, have to check if already flushing
- writebuffer->flushWriteBuffer();
-}
-
-void registerHitCallback(writebuffer_t * writebuffer, void (*hit_callback)(int64_t access_id)) {
- writebuffer->registerHitCallback(hit_callback);
-}
diff --git a/src/mem/ruby/storebuffer/interface.hh b/src/mem/ruby/storebuffer/interface.hh
deleted file mode 100644
index cbf010275..000000000
--- a/src/mem/ruby/storebuffer/interface.hh
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef STB_H
-#define STB_H
-
-#include "mem/ruby/libruby.hh"
-#include "writebuffer.hh"
-#include <iostream>
-
-writebuffer_status_t handleStore (writebuffer_t * writebuffer, const RubyRequest & request);
-
-uint64_t handleLoad(writebuffer_t * writebuffer, const RubyRequest & request);
-
-uint64_t handleAtomic(writebuffer_t * writebuffer, const RubyRequest & request);
-
-void flushSTB(writebuffer_t * writebuffer);
-
-void registerHitCallback(writebuffer_t * writebuffer, void (*hit_callback)(int64_t access_id));
-
-#endif
diff --git a/src/mem/ruby/storebuffer/stb_interface.cc b/src/mem/ruby/storebuffer/stb_interface.cc
index cc93d3b51..e3d6f29ed 100644
--- a/src/mem/ruby/storebuffer/stb_interface.cc
+++ b/src/mem/ruby/storebuffer/stb_interface.cc
@@ -27,49 +27,59 @@
*/
#include <iostream>
+
#include "mem/ruby/storebuffer/stb_interface.hh"
-StoreBuffer * createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size) {
- StoreBuffer * stb = new StoreBuffer(id, block_bits, storebuffer_size);
- return stb;
+StoreBuffer *
+createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size)
+{
+ StoreBuffer *stb = new StoreBuffer(id, block_bits, storebuffer_size);
+ return stb;
}
-storebuffer_status_t handleStore (StoreBuffer * storebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_ST);
- if (storebuffer->storeBufferFull()){
- return WB_FULL;
- }
- else if (storebuffer->storeBufferFlushing()) {
- return WB_FLUSHING;
- }
- else {
- storebuffer->addToStoreBuffer(request);
- return WB_OK;
- }
+storebuffer_status_t
+handleStore(StoreBuffer *storebuffer, const RubyRequest &request)
+{
+ assert(request.type == RubyRequestType_ST);
+ if (storebuffer->storeBufferFull()){
+ return WB_FULL;
+ } else if (storebuffer->storeBufferFlushing()) {
+ return WB_FLUSHING;
+ } else {
+ storebuffer->addToStoreBuffer(request);
+ return WB_OK;
+ }
}
-uint64_t handleLoad(StoreBuffer * storebuffer, const RubyRequest & request) {
- assert(request.type == RubyRequestType_LD);
- return storebuffer->handleLoad(request);
+uint64_t
+handleLoad(StoreBuffer *storebuffer, const RubyRequest &request)
+{
+ assert(request.type == RubyRequestType_LD);
+ return storebuffer->handleLoad(request);
}
#if 0
-uint64_t handleAtomic(StoreBuffer * storebuffer, const RubyRequest & request) {
- // flush the store buffer
- storebuffer->flushStoreBuffer();
- // let storebuffer issue atomic
- //return storebuffer->issueAtomic(request);
+uint64_t
+handleAtomic(StoreBuffer *storebuffer, const RubyRequest &request)
+{
+ // flush the store buffer
+ storebuffer->flushStoreBuffer();
+ // let storebuffer issue atomic
+ // return storebuffer->issueAtomic(request);
}
#endif
-void flushSTB(StoreBuffer * storebuffer) {
- // in in-order can't get a request to flushSTB if already flushing
- // on out of order, have to check if already flushing
- storebuffer->flushStoreBuffer();
+void
+flushSTB(StoreBuffer *storebuffer)
+{
+ // in in-order can't get a request to flushSTB if already flushing
+ // on out of order, have to check if already flushing
+ storebuffer->flushStoreBuffer();
}
-void registerHitCallback(StoreBuffer * storebuffer, void (*hit_callback)(int64_t access_id)) {
- storebuffer->registerHitCallback(hit_callback);
+void
+registerHitCallback(StoreBuffer *storebuffer,
+ void (*hit_callback)(int64_t access_id))
+{
+ storebuffer->registerHitCallback(hit_callback);
}
-
-
diff --git a/src/mem/ruby/storebuffer/stb_interface.hh b/src/mem/ruby/storebuffer/stb_interface.hh
index e1a026abc..b7f1b152d 100644
--- a/src/mem/ruby/storebuffer/stb_interface.hh
+++ b/src/mem/ruby/storebuffer/stb_interface.hh
@@ -27,16 +27,12 @@
*/
#include "mem/ruby/storebuffer/storebuffer.hh"
-#include <iostream>
-StoreBuffer * createNewSTB (uint32 id, uint32 block_bits, int storebuffer_size);
-
-storebuffer_status_t handleStore (StoreBuffer * storebuffer, const RubyRequest & request);
-
-uint64_t handleLoad(StoreBuffer * storebuffer, const RubyRequest & request);
-
-uint64_t handleAtomic(StoreBuffer * storebuffer, const RubyRequest & request);
-
-void flushSTB(StoreBuffer * storebuffer);
-
-void registerHitCallback(StoreBuffer * storebuffer, void (*hit_callback)(int64_t access_id));
+StoreBuffer *createNewSTB(uint32 id, uint32 block_bits, int storebuffer_size);
+storebuffer_status_t handleStore(StoreBuffer *storebuffer,
+ const RubyRequest &request);
+uint64_t handleLoad(StoreBuffer *storebuffer, const RubyRequest &request);
+uint64_t handleAtomic(StoreBuffer *storebuffer, const RubyRequest &request);
+void flushSTB(StoreBuffer *storebuffer);
+void registerHitCallback(StoreBuffer *storebuffer,
+ void (*hit_callback)(int64_t access_id));
diff --git a/src/mem/ruby/storebuffer/storebuffer.cc b/src/mem/ruby/storebuffer/storebuffer.cc
index ed97b216c..d6ec0959e 100644
--- a/src/mem/ruby/storebuffer/storebuffer.cc
+++ b/src/mem/ruby/storebuffer/storebuffer.cc
@@ -26,240 +26,216 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/*------------------------------------------------------------------------*/
-/* Includes */
-/*------------------------------------------------------------------------*/
-
#include <map>
-#include "mem/ruby/storebuffer/hfa.hh"
-#include "mem/ruby/storebuffer/storebuffer.hh"
#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/storebuffer/storebuffer.hh"
-#if RUBY_TSO_CHECKER
-#include "TsoChecker.hh"
-#endif
+using namespace std;
#define SYSTEM_EXIT ASSERT(0)
-
// global map of request id_s to map them back to storebuffer pointers
-map <uint64_t, StoreBuffer *> request_map;
-
-#if RUBY_TSO_CHECKER
-Tso::TsoChecker * g_tsoChecker;
-#endif
+map<uint64_t, StoreBuffer *> request_map;
-void hit(int64_t id) {
- if (request_map.find(id) == request_map.end()) {
- ERROR_OUT("Request ID not found in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
- ASSERT(0);
- }
- else {
- request_map[id]->complete(id);
- request_map.erase(id);
- }
+void
+hit(int64_t id)
+{
+ if (request_map.find(id) == request_map.end()) {
+ ERROR_OUT("Request ID not found in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ } else {
+ request_map[id]->complete(id);
+ request_map.erase(id);
+ }
}
+StoreBuffer::StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size)
+{
+ iseq = 0;
+ tso_iseq = 0;
+ char name [] = "Sequencer_";
+ char port_name [13];
+ sprintf(port_name, "%s%d", name, id);
+ m_port = libruby_get_port(port_name, hit);
+ m_hit_callback = NULL;
+ ASSERT(storebuffer_size >= 0);
+ m_storebuffer_size = storebuffer_size;
+ m_id = id;
+ m_block_size = 1 << block_bits;
+ m_block_mask = ~(m_block_size - 1);
+ m_buffer_size = 0;
+ m_use_storebuffer = false;
+ m_storebuffer_full = false;
+ m_storebuffer_flushing = false;
+ m_stalled_issue = true;
+ if (m_storebuffer_size > 0){
+ m_use_storebuffer = true;
+ }
-//*****************************************************************************************
-StoreBuffer::StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size) {
-#if RUBY_TSO_CHECKER
- if (id == 0) {
- g_tsoChecker = new Tso::TsoChecker();
- g_tsoChecker->init(64);
- }
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n",
+ m_use_storebuffer);
#endif
- iseq = 0;
- tso_iseq = 0;
- char name [] = "Sequencer_";
- char port_name [13];
- sprintf(port_name, "%s%d", name, id);
- m_port = libruby_get_port(port_name, hit);
- m_hit_callback = NULL;
- ASSERT(storebuffer_size >= 0);
- m_storebuffer_size = storebuffer_size;
- m_id = id;
- m_block_size = 1 << block_bits;
- m_block_mask = ~(m_block_size - 1);
- m_buffer_size = 0;
- m_use_storebuffer = false;
- m_storebuffer_full = false;
- m_storebuffer_flushing = false;
- m_stalled_issue = true;
- if(m_storebuffer_size > 0){
- m_use_storebuffer = true;
- }
-
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("*******storebuffer_t::Using Write Buffer? %d\n",m_use_storebuffer);
- #endif
}
-//******************************************************************************************
-StoreBuffer::~StoreBuffer(){
-#if RUBY_TSO_CHECKER
- if (m_id == 0) {
- delete g_tsoChecker;
- }
-#endif
+StoreBuffer::~StoreBuffer()
+{
}
-//*****************************************************************************************************
-void StoreBuffer::registerHitCallback(void (*hit_callback)(int64_t request_id)) {
- assert(m_hit_callback == NULL); // can't assign hit_callback twice
- m_hit_callback = hit_callback;
+void
+StoreBuffer::registerHitCallback(void (*hit_callback)(int64_t request_id))
+{
+ assert(m_hit_callback == NULL); // can't assign hit_callback twice
+ m_hit_callback = hit_callback;
}
-//*****************************************************************************************************
-void StoreBuffer::addToStoreBuffer(struct RubyRequest request){
- if(m_use_storebuffer){
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n");
- DEBUG_OUT("\n");
- #endif
+void
+StoreBuffer::addToStoreBuffer(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ // make request to libruby
+ uint64_t id = libruby_issue_request(m_port, request);
+ if (request_map.find(id) != request_map.end()) {
+ ERROR_OUT("Request ID is already in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ } else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request));
+ }
+ return;
+ }
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\t INSERTING new request\n");
- #endif
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: addToStoreBuffer BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+ DEBUG_OUT("\t INSERTING new request\n");
+#endif
buffer.push_front(SBEntry(request, NULL));
m_buffer_size++;
if (m_buffer_size >= m_storebuffer_size) {
- m_storebuffer_full = true;
- }
- else if (m_stalled_issue) {
- m_stalled_issue = false;
- issueNextStore();
+ m_storebuffer_full = true;
+ } else if (m_stalled_issue) {
+ m_stalled_issue = false;
+ issueNextStore();
}
iseq++;
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n");
- DEBUG_OUT("\n");
- #endif
- } //end if(m_use_storebuffer)
- else {
- // make request to libruby
- uint64_t id = libruby_issue_request(m_port, request);
- if (request_map.find(id) != request_map.end()) {
- ERROR_OUT("Request ID is already in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
- ASSERT(0);
- }
- else {
- request_map.insert(make_pair(id, this));
- outstanding_requests.insert(make_pair(id, request));
- }
- }
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("***StoreBuffer: addToStoreBuffer END, contents:\n");
+ DEBUG_OUT("\n");
+#endif
}
-//*****************************************************************************************************
-// Return value of -2 indicates that the load request was satisfied by the store buffer
-// Return value of -3 indicates a partial match, so the load has to retry until NO_MATCH
-// Alternatively we could satisfy the partial match, but tso gets complicated and more races
-//*****************************************************************************************************
-int64_t StoreBuffer::handleLoad(struct RubyRequest request) {
- if (m_use_storebuffer) {
+// Return value of -2 indicates that the load request was satisfied by
+// the store buffer
+// Return value of -3 indicates a partial match, so the load has to
+// retry until NO_MATCH
+// Alternatively we could satisfy the partial match, but tso gets
+// complicated and more races
+int64_t
+StoreBuffer::handleLoad(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ // make a request to ruby
+ return libruby_issue_request(m_port, request);
+ }
+
load_match match = checkForLoadHit(request);
if (match == FULL_MATCH) {
- // fill data
- returnMatchedData(request);
- iseq++;
- return -2;
- }
- else if (match == NO_MATCH) {
- // make request to libruby and return the id
- uint64_t id = libruby_issue_request(m_port, request);
- if (request_map.find(id) != request_map.end()) {
- ERROR_OUT("Request ID is already in the map");
- DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
- ASSERT(0);
- }
- else {
- request_map.insert(make_pair(id, this));
- outstanding_requests.insert(make_pair(id, request));
- }
- iseq++;
- return id;
- }
- else { // partial match
- return -3;
+ // fill data
+ returnMatchedData(request);
+ iseq++;
+ return -2;
+ } else if (match == NO_MATCH) {
+ // make request to libruby and return the id
+ uint64_t id = libruby_issue_request(m_port, request);
+ if (request_map.find(id) != request_map.end()) {
+ ERROR_OUT("Request ID is already in the map");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, id);
+ ASSERT(0);
+ } else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request));
+ }
+ iseq++;
+ return id;
+ } else { // partial match
+ return -3;
}
- }
- else {
- // make a request to ruby
- return libruby_issue_request(m_port, request);
- }
}
-
-//*****************************************************************************************************
// This function will fill the data array if any match is found
-//*****************************************************************************************************
-load_match StoreBuffer::checkForLoadHit(struct RubyRequest request) {
- if (m_use_storebuffer) {
+load_match
+StoreBuffer::checkForLoadHit(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ // this function should never be called if we are not using a
+ // store buffer
+ ERROR_OUT("checkForLoadHit called while write buffer is not in use");
+ ASSERT(0);
+ }
+
physical_address_t physical_address = request.paddr;
int len = request.len;
uint8_t * data = new uint8_t[64];
memset(data, 0, 64);
- for (int i = physical_address%64; i < len; i++) {
- data[i] = 1;
- }
+ for (int i = physical_address % 64; i < len; i++)
+ data[i] = 1;
bool found = false;
physical_address_t lineaddr = physical_address & m_block_mask;
// iterate over the buffer looking for hits
- for (deque<struct SBEntry>::iterator it = buffer.begin(); it != buffer.end(); it++) {
- if ((it->m_request.paddr & m_block_mask) == lineaddr) {
+ deque<SBEntry>::iterator it = buffer.begin();
+ for (; it != buffer.end(); it++) {
+ RubyRequest &req = it->m_request;
+ if ((req.paddr & m_block_mask) != lineaddr)
+ continue;
+
found = true;
- for (int i = it->m_request.paddr%64; i < it->m_request.len; i++) {
- data[i] = 0;
- }
- }
+ for (int i = req.paddr % 64; i < req.len; i++)
+ data[i] = 0;
}
- // if any matching entry is found, determine if all the requested bytes have been matched
+ // if any matching entry is found, determine if all the
+ // requested bytes have been matched
if (found) {
- ASSERT(m_buffer_size > 0);
- int unmatched_bytes = 0;
- for (int i = physical_address%64; i < len; i++) {
- unmatched_bytes = unmatched_bytes + data[i];
- }
- if (unmatched_bytes == 0) {
- delete data;
- return FULL_MATCH;
- }
- else {
+ ASSERT(m_buffer_size > 0);
+ int unmatched_bytes = 0;
+ for (int i = physical_address%64; i < len; i++) {
+ unmatched_bytes = unmatched_bytes + data[i];
+ }
+ if (unmatched_bytes == 0) {
+ delete data;
+ return FULL_MATCH;
+ } else {
+ delete data;
+ return PARTIAL_MATCH;
+ }
+ } else {
delete data;
- return PARTIAL_MATCH;
- }
- }
- else {
- delete data;
- return NO_MATCH;
+ return NO_MATCH;
}
- } // end of if (m_use_storebuffer)
- else {
- // this function should never be called if we are not using a store buffer
- ERROR_OUT("checkForLoadHit called while write buffer is not in use");
- ASSERT(0);
- }
}
-
-//***************************************************************************************************
-void StoreBuffer::returnMatchedData(struct RubyRequest request) {
- if (m_use_storebuffer) {
+void
+StoreBuffer::returnMatchedData(RubyRequest request)
+{
+ if (!m_use_storebuffer) {
+ ERROR_OUT("returnMatchedData called while write buffer is not in use");
+ ASSERT(0);
+ }
uint8_t * data = new uint8_t[64];
memset(data, 0, 64);
@@ -272,114 +248,75 @@ void StoreBuffer::returnMatchedData(struct RubyRequest request) {
ASSERT(checkForLoadHit(request) != NO_MATCH);
physical_address_t lineaddr = physical_address & m_block_mask;
bool found = false;
-#if RUBY_TSO_CHECKER
- Tso::TsoCheckerCmd * cmd;
-#endif
- deque<struct SBEntry>::iterator satisfying_store;
- for (deque<struct SBEntry>::iterator it = buffer.begin(); it != buffer.end(); it++) {
- if ((it->m_request.paddr & m_block_mask) == lineaddr) {
- if (!found) {
- found = true;
-#if RUBY_TSO_CHECKER
- satisfying_store = it;
- cmd = new Tso::TsoCheckerCmd(m_id, // this thread id
- iseq, // instruction sequence
- ITYPE_LOAD, // is a store
- MEM_LOAD_DATA, // commit
- request.paddr, // the address
- NULL, // and data
- request.len, // and len
- DSRC_STB, // shouldn't matter
- libruby_get_time(), // macc: for store macc and time are the same and it
- 0, // gobs
- 0);
-#endif
- }
- uint8_t * dataPtr = it->m_request.data;
- int offset = it->m_request.paddr%64;
- for (int i = offset; i < it->m_request.len; i++) {
- if (!written[i]) { // don't overwrite data with earlier data
- data[i] = dataPtr[i-offset];
- written[i] = 1;
- }
+ deque<SBEntry>::iterator satisfying_store;
+ deque<SBEntry>::iterator it = buffer.begin();
+ for (; it != buffer.end(); it++) {
+ if ((it->m_request.paddr & m_block_mask) == lineaddr) {
+ if (!found) {
+ found = true;
+ }
+ uint8_t * dataPtr = it->m_request.data;
+ int offset = it->m_request.paddr%64;
+ for (int i = offset; i < it->m_request.len; i++) {
+ if (!written[i]) { // don't overwrite data with earlier data
+ data[i] = dataPtr[i-offset];
+ written[i] = 1;
+ }
+ }
}
- }
}
int i = physical_address%64;
for (int j = 0; (i < physical_address%64 + len) && (j < len); i++, j++) {
- if (written[i]) {
- request.data[j] = data[i];
- }
- }
-
-#if RUBY_TSO_CHECKER
- uint64_t tso_data = 0;
- memcpy(&tso_data, request.data, request.len);
- cmd->setData(tso_data);
-
- Tso::TsoCheckerCmd * adjust_cmd = satisfying_store->m_next_ptr;
- if (adjust_cmd == NULL) {
- adjust_cmd = cmd;
- }
- else {
- while (adjust_cmd->getNext() != NULL) {
- adjust_cmd = adjust_cmd->getNext();
- }
- adjust_cmd->setNext(cmd);
+ if (written[i]) {
+ request.data[j] = data[i];
+ }
}
-#endif
delete data;
delete written;
- }
- else {
- ERROR_OUT("returnMatchedData called while write buffer is not in use");
- ASSERT(0);
- }
}
+void
+StoreBuffer::flushStoreBuffer()
+{
+ if (!m_use_storebuffer) {
+ // do nothing
+ return;
+ }
+
+#ifdef DEBUG_WRITE_BUFFER
+ DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n");
+ DEBUG_OUT("\n");
+#endif
-//******************************************************************************************
-void StoreBuffer::flushStoreBuffer(){
- if (m_use_storebuffer) {
- #ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("\n***StoreBuffer: flushStoreBuffer BEGIN, contents:\n");
- DEBUG_OUT("\n");
- #endif
-
- if(m_buffer_size > 0) {
- m_storebuffer_flushing = true; // indicate that we are flushing
- }
- else {
- m_storebuffer_flushing = false;
- return;
- }
- }
- else {
- // do nothing
- return;
- }
+ m_storebuffer_flushing = (m_buffer_size > 0);
}
-//****************************************************************************************
-void StoreBuffer::issueNextStore() {
- SBEntry request = buffer.back();
- uint64_t id = libruby_issue_request(m_port, request.m_request);
- if (request_map.find(id) != request_map.end()) {
- assert(0);
- }
- else {
- request_map.insert(make_pair(id, this));
- outstanding_requests.insert(make_pair(id, request.m_request));
- }
+void
+StoreBuffer::issueNextStore()
+{
+ SBEntry request = buffer.back();
+ uint64_t id = libruby_issue_request(m_port, request.m_request);
+ if (request_map.find(id) != request_map.end()) {
+ assert(0);
+ } else {
+ request_map.insert(make_pair(id, this));
+ outstanding_requests.insert(make_pair(id, request.m_request));
+ }
}
-//****************************************************************************************
-void StoreBuffer::complete(uint64_t id) {
- if (m_use_storebuffer) {
+void
+StoreBuffer::complete(uint64_t id)
+{
+ if (!m_use_storebuffer) {
+ m_hit_callback(id);
+ return;
+ }
+
ASSERT(outstanding_requests.find(id) != outstanding_requests.end());
- physical_address_t physical_address = outstanding_requests.find(id)->second.paddr;
+ physical_address_t physical_address =
+ outstanding_requests.find(id)->second.paddr;
RubyRequestType type = outstanding_requests.find(id)->second.type;
#ifdef DEBUG_WRITE_BUFFER
DEBUG_OUT("\n***StoreBuffer: complete BEGIN, contents:\n");
@@ -387,184 +324,59 @@ void StoreBuffer::complete(uint64_t id) {
#endif
if (type == RubyRequestType_ST) {
- physical_address_t lineaddr = physical_address & m_block_mask;
-
- //Note fastpath hits are handled like regular requests - they must remove the WB entry!
- if ( lineaddr != physical_address ) {
- ERROR_OUT("error: StoreBuffer: ruby returns pa 0x%0llx which is not a cache line: 0x%0llx\n", physical_address, lineaddr );
- }
-
- SBEntry from_buffer = buffer.back();
- if (((from_buffer.m_request.paddr & m_block_mask) == lineaddr) && (from_buffer.m_request.type == type)) {
- buffer.pop_back();
- m_buffer_size--;
- ASSERT(m_buffer_size >= 0);
-
-#if RUBY_TSO_CHECKER
- int len = outstanding_requests.find(id)->second.len;
- uint64_t data = 0;
- memcpy(&data, from_buffer.m_request.data, 4);
-
- cerr << m_id << " INSERTING STORE" << endl << flush;
- // add to the tsoChecker
- g_tsoChecker->input(m_id, // this thread id
- (id & ISEQ_MASK), // instruction sequence
- ITYPE_STORE, // is a store
- MEM_STORE_COMMIT, // commit
- physical_address, // the address
- data, // and data
- len, // and len
- DSRC_STB, // shouldn't matter
- libruby_get_time(), // macc
- libruby_get_time(), // gobs
- libruby_get_time()); // time
- tso_iseq++;
-
- // also add the loads that are satisfied by this store
- if (from_buffer.m_next_ptr != NULL) {
- from_buffer.m_next_ptr->setGobs(libruby_get_time());
- g_tsoChecker->input(*(from_buffer.m_next_ptr));
- cerr << m_id << " INSERTING LOAD for STORE: " << from_buffer.m_next_ptr->getIseq() << endl << flush;
- tso_iseq++;
- Tso::TsoCheckerCmd * to_input = from_buffer.m_next_ptr->getNext();
- while (to_input != NULL) {
- if (to_input->getGobs() == 0) {
- to_input->setGobs(libruby_get_time());
- }
- cerr << m_id << " INSERTING LOAD iseq for STORE: " << to_input->getIseq() << endl << flush;
- g_tsoChecker->input(*to_input);
- tso_iseq++;
- to_input = to_input->getNext();
- }
- }
-#endif
- // schedule the next request
- if (m_buffer_size > 0) {
- issueNextStore();
- }
- else if (m_buffer_size == 0) {
- m_storebuffer_flushing = false;
- m_stalled_issue = true;
+ physical_address_t lineaddr = physical_address & m_block_mask;
+
+ // Note fastpath hits are handled like regular requests - they
+ // must remove the WB entry!
+ if (lineaddr != physical_address) {
+ ERROR_OUT("error: StoreBuffer: ruby returns pa 0x%0llx "
+ "which is not a cache line: 0x%0llx\n",
+ physical_address, lineaddr);
}
- m_storebuffer_full = false;
+ SBEntry from_buffer = buffer.back();
+ if ((from_buffer.m_request.paddr & m_block_mask) == lineaddr &&
+ from_buffer.m_request.type == type) {
+ buffer.pop_back();
+ m_buffer_size--;
+ ASSERT(m_buffer_size >= 0);
+
+ // schedule the next request
+ if (m_buffer_size > 0) {
+ issueNextStore();
+ } else if (m_buffer_size == 0) {
+ m_storebuffer_flushing = false;
+ m_stalled_issue = true;
+ }
- }
- else {
- ERROR_OUT("[%d] error: StoreBuffer: at complete, address 0x%0llx not found.\n", m_id, lineaddr);
- ERROR_OUT("StoreBuffer:: complete FAILS\n");
- ASSERT(0);
- }
+ m_storebuffer_full = false;
+ } else {
+ ERROR_OUT("[%d] error: StoreBuffer: at complete, address 0x%0llx "
+ "not found.\n", m_id, lineaddr);
+ ERROR_OUT("StoreBuffer:: complete FAILS\n");
+ ASSERT(0);
+ }
#ifdef DEBUG_WRITE_BUFFER
- DEBUG_OUT("***StoreBuffer: complete END, contents:\n");
- DEBUG_OUT("\n");
-#endif
- } // end if (type == ST)
- else if (type == RubyRequestType_LD) {
-#if RUBY_TSO_CHECKER
- RubyRequest request = outstanding_requests.find(id)->second;
- uint64_t data = 0;
- memcpy(&data, request.data, request.len);
-
- // add to the tsoChecker if in order, otherwise, find a place to put ourselves
- if ((id & ISEQ_MASK) == tso_iseq) {
- tso_iseq++;
- cerr << m_id << " INSERTING LOAD" << endl << flush;
- g_tsoChecker->input(m_id, // this thread id
- (id & ISEQ_MASK), // instruction sequence
- ITYPE_LOAD, // is a store
- MEM_LOAD_DATA, // commit
- request.paddr, // the address
- data, // and data
- request.len, // and len
- DSRC_L2_MEMORY, // shouldn't matter DSRC_L1
- libruby_get_time(), // macc: for store macc and time are the same and it
- libruby_get_time(), // macc
- libruby_get_time()); // time
- }
- else {
- Tso::TsoCheckerCmd * cmd;
- cmd = new Tso::TsoCheckerCmd(m_id, // this thread id
- (id & ISEQ_MASK), // instruction sequence
- ITYPE_LOAD, // is a store
- MEM_LOAD_DATA, // commit
- request.paddr, // the address
- data, // and data
- request.len, // and len
- DSRC_L2_MEMORY, // shouldn't matter DSRC_L1
- libruby_get_time(), // macc: for store macc and time are the same and it
- libruby_get_time(), // macc
- libruby_get_time()); // time
- insertTsoLL(cmd);
- }
+ DEBUG_OUT("***StoreBuffer: complete END, contents:\n");
+ DEBUG_OUT("\n");
#endif
- m_hit_callback(id);
+ } else if (type == RubyRequestType_LD) {
+ m_hit_callback(id);
}
// LD, ST or FETCH hit callback
outstanding_requests.erase(id);
-
- } // end if(m_use_storebuffer)
- else {
- m_hit_callback(id);
- }
}
-#if RUBY_TSO_CHECKER
-void StoreBuffer::insertTsoLL(Tso::TsoCheckerCmd * cmd) {
- uint64_t count = cmd->getIseq();
- Tso::TsoCheckerCmd * current = NULL;
- Tso::TsoCheckerCmd * previous = NULL;
- deque<struct SBEntry>::reverse_iterator iter;
- bool found = false;
- for (iter = buffer.rbegin(); iter != buffer.rend(); ++ iter) {
- if (iter->m_next_ptr != NULL) {
- current = iter->m_next_ptr->getNext(); // initalize both to the beginning of the linked list
- previous = current;
- while (current != NULL) {
- if (current->getIseq() > count) {
- found = true;
- break;
- }
- previous = current;
- current = current->getNext();
- }
- }
- // break out if found a match, iterator should still point to the right SBEntry
- if (found) {
- break;
- }
- }
-
- // will insert at the end if not found
- if (!found) {
- buffer.front().m_next_ptr = cmd;
- }
- else if (current == previous) {
- cerr << "INSERTING " << count << " BEFORE: " << iter->m_next_ptr->getIseq();
- Tso::TsoCheckerCmd * temp = iter->m_next_ptr;
- iter->m_next_ptr = cmd;
- cmd->setNext(temp);
- }
- else {
- cerr << "INSERTING " << count << " BETWEEN: " << previous->getIseq() << " AND " << current->getIseq();
- cmd->setNext(current);
- previous->setNext(cmd);
- }
-}
-#endif
-
-//***************************************************************************************************
-void StoreBuffer::print( void )
+void
+StoreBuffer::print()
{
- DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n", m_id, m_buffer_size);
+ DEBUG_OUT("[%d] StoreBuffer: Total entries: %d Outstanding: %d\n",
+ m_id, m_buffer_size);
- if(m_use_storebuffer){
- }
- else{
- DEBUG_OUT("\t WRITE BUFFER NOT USED\n");
- }
+ if (!m_use_storebuffer)
+ DEBUG_OUT("\t WRITE BUFFER NOT USED\n");
}
diff --git a/src/mem/ruby/storebuffer/storebuffer.hh b/src/mem/ruby/storebuffer/storebuffer.hh
index 67555f48f..6d476706b 100644
--- a/src/mem/ruby/storebuffer/storebuffer.hh
+++ b/src/mem/ruby/storebuffer/storebuffer.hh
@@ -26,23 +26,18 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef _STOREBUFFER_H_
-#define _STOREBUFFER_H_
+#ifndef __MEM_RUBY_STOREBUFFER_STOREBUFFER_HH__
+#define __MEM_RUBY_STOREBUFFER_STOREBUFFER_HH__
-#include <map>
#include <deque>
+#include <map>
-#include "config/ruby_tso_checker.hh"
-#include "mem/ruby/storebuffer/hfa.hh"
+#include "mem/ruby/common/TypeDefines.hh"
#include "mem/ruby/libruby.hh"
-#if RUBY_TSO_CHECKER
-#include "TsoCheckerCmd.hh"
-#endif
-
/**
- * Status for write buffer accesses. The Write buffer can hit in fastpath, be full, or
- * successfully enqueue the store request
+ * Status for write buffer accesses. The Write buffer can hit in
+ * fastpath, be full, or successfully enqueue the store request
*/
enum storebuffer_status_t { WB_FULL, WB_OK, WB_FLUSHING };
@@ -51,114 +46,106 @@ enum storebuffer_status_t { WB_FULL, WB_OK, WB_FLUSHING };
*/
enum load_match { NO_MATCH, PARTIAL_MATCH, FULL_MATCH };
-struct SBEntry {
- struct RubyRequest m_request;
-#if RUBY_TSO_CHECKER
- Tso::TsoCheckerCmd * m_next_ptr;
-#endif
- SBEntry(struct RubyRequest request, void * ptr)
- : m_request(request)
+struct SBEntry
+{
+ RubyRequest m_request;
+
+ SBEntry(RubyRequest request, void * ptr)
+ : m_request(request)
{
-#if RUBY_TSO_CHECKER
- m_next_ptr = (Tso::TsoCheckerCmd*) ptr;
-#endif
}
};
-class StoreBuffer {
- public:
- ///Constructor
- /// Note that the size of the Write Buffer is determined by the WRITE_BUFFER_SIZE config parameter
- StoreBuffer(uint32 id, uint32 block_bits, int storebuffer_size);
-
- /// Register hitcallback back to CPU
- void registerHitCallback(void (*hit_callback)(int64_t request_id));
-
- /// Destructor
- ~StoreBuffer();
+class StoreBuffer
+{
+ public:
+ /// Note that the size of the Write Buffer is determined by the
+ /// WRITE_BUFFER_SIZE config parameter
+ StoreBuffer(uint32_t id, uint32_t block_bits, int storebuffer_size);
- ///Adds a store entry to the write buffer
- void addToStoreBuffer(struct RubyRequest request);
+ ~StoreBuffer();
- ///Flushes the entire write buffer
- void flushStoreBuffer();
+ /// Register hitcallback back to CPU
+ void registerHitCallback(void (*hit_callback)(int64_t request_id));
- ///A pseq object calls this when Ruby completes our request
- void complete(uint64_t);
+ ///Adds a store entry to the write buffer
+ void addToStoreBuffer(RubyRequest request);
- /// Returns ID. If ID == -2, HIT, else it's an ID to wait on
- int64_t handleLoad(struct RubyRequest request);
+ ///Flushes the entire write buffer
+ void flushStoreBuffer();
- /// Used by all load insts to check whether it hits to any entry in the WB. If so, the WB is flushed
- load_match checkForLoadHit(struct RubyRequest request);
+ ///A pseq object calls this when Ruby completes our request
+ void complete(uint64_t);
- /// Used to fill the load in case of FULL_MATCH
- void returnMatchedData(struct RubyRequest request);
+ /// Returns ID. If ID == -2, HIT, else it's an ID to wait on
+ int64_t handleLoad(RubyRequest request);
- /// Issue next store in line
- void issueNextStore();
+ /// Used by all load insts to check whether it hits to any entry
+ /// in the WB. If so, the WB is flushed
+ load_match checkForLoadHit(RubyRequest request);
- /// prints out the contents of the Write Buffer
- void print();
+ /// Used to fill the load in case of FULL_MATCH
+ void returnMatchedData(RubyRequest request);
-#if RUBY_TSO_CHECKER
- /// if load completes before store, insert correctly to be issued to TSOChecker
- void insertTsoLL(Tso::TsoCheckerCmd * cmd);
-#endif
+ /// Issue next store in line
+ void issueNextStore();
- /// Returns flag indicating whether we are using the write buffer
- bool useStoreBuffer() { return m_use_storebuffer; }
+ /// prints out the contents of the Write Buffer
+ void print();
- bool storeBufferFull() { return m_storebuffer_full; }
+ /// Returns flag indicating whether we are using the write buffer
+ bool useStoreBuffer() { return m_use_storebuffer; }
- bool storeBufferFlushing() { return m_storebuffer_flushing; }
+ bool storeBufferFull() { return m_storebuffer_full; }
- private:
- /// id of this write buffer (one per sequencer object)
- uint32 m_id;
+ bool storeBufferFlushing() { return m_storebuffer_flushing; }
- /// number of bytes in cacheline
- uint32 m_block_size;
+ private:
+ /// id of this write buffer (one per sequencer object)
+ uint32_t m_id;
- /// the size of the write buffer
- uint32 m_storebuffer_size;
+ /// number of bytes in cacheline
+ uint32_t m_block_size;
- /// mask to strip off non-cache line bits
- pa_t m_block_mask;
+ /// the size of the write buffer
+ uint32_t m_storebuffer_size;
- /// list of store requests in the write buffer
- deque <struct SBEntry> buffer;
+ /// mask to strip off non-cache line bits
+ pa_t m_block_mask;
- /// the current length of the write buffer
- uint32 m_buffer_size;
+ /// list of store requests in the write buffer
+ std::deque<SBEntry> buffer;
- /// whether we want to simulate the write buffer or not:
- bool m_use_storebuffer;
+ /// the current length of the write buffer
+ uint32_t m_buffer_size;
- /// indicates whether the write buffer is full or not
- bool m_storebuffer_full;
+ /// whether we want to simulate the write buffer or not:
+ bool m_use_storebuffer;
- /// indicates that we are currently flushing the write buffer
- bool m_storebuffer_flushing;
+ /// indicates whether the write buffer is full or not
+ bool m_storebuffer_full;
- /// indicates that automatic issue is stalled and the next store to be added should issue itself
- bool m_stalled_issue;
+ /// indicates that we are currently flushing the write buffer
+ bool m_storebuffer_flushing;
- /// RubyPort to make requests to
- RubyPortHandle m_port;
+ /// indicates that automatic issue is stalled and the next store
+ /// to be added should issue itself
+ bool m_stalled_issue;
- /// HitCallback to CPU
- void (*m_hit_callback)(int64_t);
+ /// RubyPort to make requests to
+ RubyPortHandle m_port;
- /// Map the request id to rubyrequest
- map<uint64_t, struct RubyRequest> outstanding_requests;
+ /// HitCallback to CPU
+ void (*m_hit_callback)(int64_t);
- /// current instruction counter
- uint64_t iseq;
+ /// Map the request id to rubyrequest
+ std::map<uint64_t, RubyRequest> outstanding_requests;
+ /// current instruction counter
+ uint64_t iseq;
- /// input into tso counter
- uint64_t tso_iseq;
+ /// input into tso counter
+ uint64_t tso_iseq;
};
-#endif
+#endif // __MEM_RUBY_STOREBUFFER_STOREBUFFER_HH__