summaryrefslogtreecommitdiff
path: root/system/alpha/palcode
diff options
context:
space:
mode:
authorLisa Hsu <hsul@eecs.umich.edu>2003-11-14 10:52:42 -0500
committerLisa Hsu <hsul@eecs.umich.edu>2003-11-14 10:52:42 -0500
commitb8612cbda3625e27f109469a1f5416237c97fb41 (patch)
treee4d195ed22ab29000f1bcada6a560386b6fd1f8f /system/alpha/palcode
parent488280e48b5f227b2eb35df5d6daba2ae94a1de6 (diff)
downloadgem5-b8612cbda3625e27f109469a1f5416237c97fb41.tar.xz
Import changeset
Diffstat (limited to 'system/alpha/palcode')
-rw-r--r--system/alpha/palcode/Makefile157
-rw-r--r--system/alpha/palcode/cserve.h107
-rw-r--r--system/alpha/palcode/dc21164.h961
-rw-r--r--system/alpha/palcode/dc21164FromGasSources.h965
-rw-r--r--system/alpha/palcode/ev5_alpha_defs.h323
-rw-r--r--system/alpha/palcode/ev5_defs.h575
-rw-r--r--system/alpha/palcode/ev5_impure.h392
-rw-r--r--system/alpha/palcode/ev5_osfalpha_defs.h160
-rw-r--r--system/alpha/palcode/ev5_paldef.h185
-rw-r--r--system/alpha/palcode/fromHudsonMacros.h145
-rw-r--r--system/alpha/palcode/fromHudsonOsf.h554
-rw-r--r--system/alpha/palcode/macros.h137
-rw-r--r--system/alpha/palcode/osf.h545
-rw-r--r--system/alpha/palcode/osfpal.nh0
-rw-r--r--system/alpha/palcode/osfpal.s5123
-rw-r--r--system/alpha/palcode/platform.h251
-rw-r--r--system/alpha/palcode/platform_srcmax.s1825
-rw-r--r--system/alpha/palcode/platform_tlaser.s2814
-rw-r--r--system/alpha/palcode/simos.h16
-rw-r--r--system/alpha/palcode/xxm.sed5
20 files changed, 15240 insertions, 0 deletions
diff --git a/system/alpha/palcode/Makefile b/system/alpha/palcode/Makefile
new file mode 100644
index 000000000..7d4f21cd1
--- /dev/null
+++ b/system/alpha/palcode/Makefile
@@ -0,0 +1,157 @@
+#
+# $Id: Makefile,v 1.2 1997/12/16 01:18:38 bugnion Exp $
+#
+# Revision History:
+#
+# $Log: Makefile,v $
+# Revision 1.2 1997/12/16 01:18:38 bugnion
+# Removed bogus TLASER offsets from palcode build. -- roll over
+# simultaneously with the simulation tree
+#
+# Revision 1.1.1.1 1997/10/30 23:27:18 verghese
+# current 10/29/97
+#
+#
+# Set environment variables to point to various things:
+#
+# EB_TOOLBOX - Where your toolset is located
+#
+
+EB_TOOLBOX = /wrl/proj/simos/bin/tools/osf
+CPP = /usr/bin/cpp
+AS = $(EB_TOOLBOX)/gas
+LD = $(EB_TOOLBOX)/gld
+DIS = $(EB_TOOLBOX)/alist
+STRIP = $(EB_TOOLBOX)/astrip
+PVC = $(EB_TOOLBOX)/pvc
+MAKEDEP = $(CPP) -MM
+
+#
+# Define KDEBUG if you want a special unprivileged CALL_PAL
+# breakpoint trap handler for remote kernel debugging.
+#
+# Define CONSOLE_ENTRY to change the sys$enter_console
+# transfer address. Default CONSOLE_ENTRY value is 0x10000.
+#
+# Define DISABLE_CRD to disable CRD. Note that reset sets MCES so that
+# correctable errors are ignored anyway, but this actually stops the
+# interrupt.
+#
+
+DEFINES = -DDISABLE_CRD -DSIMOS -DBUILD_PALCODE
+DEFINES += -I$(SIMTOOLS)/cpus-alpha/simos
+
+CPPFLAGS =
+ASFLAGS = -21164
+#LDFLAGS = -Tstrip 0 -Thdr -N
+#LDFLAGS = -Tstrip 2000 -Thdr -N # removed bugnion
+LDFLAGS = -Tstrip 4000 -Thdr -N
+# Source files:
+#
+# This is the only block in which the list of source files should change.
+#
+# SFILES - assembler source files
+# HFILES - header files
+#
+
+SFILES = osfpal.s platform.s
+
+HFILES = dc21164.h \
+ osf.h \
+ macros.h \
+ ev5_impure.h \
+ cserve.h \
+ platform.h
+
+# Intermediate files:
+#
+# This block should not change.
+#
+
+IFILES = $(SFILES:.s=.i)
+
+# Object files:
+#
+# This block should not change.
+#
+
+OFILES = $(IFILES:.i=.o)
+
+.DEFAULT:
+ co -u $<
+
+.SUFFIXES:
+.SUFFIXES: .s .i .o
+
+.s.i:
+ $(CPP) $(CPPFLAGS) $(DEFINES) $< $*.i
+
+osfpal.nh: osfpal
+ $(STRIP) -a osfpal $@
+ $(DIS) osfpal >osfpal.dis
+
+osfpal: $(OFILES)
+ echo '$OFILES= ' $(OFILES)
+ $(LD) $(LDFLAGS) -o $@ $(OFILES)
+
+osfpal.o: osfpal.i
+ $(AS) $(ASFLAGS) -o $@ osfpal.i
+
+platform.o: platform.i
+ $(AS) $(ASFLAGS) -o $@ platform.i
+
+pvc: osfpal.lis osfpal.nh osfpal.ent osfpal.map
+ (export PVC_PAL PVC_ENTRY PVC_MAP PVC_CPU; \
+ PVC_PAL=osfpal.nh; \
+ PVC_ENTRY=osfpal.ent; \
+ PVC_MAP=osfpal.map; \
+ PVC_CPU=ev5; \
+ $(PVC);)
+
+osfpal.lis: osfpal
+ $(DIS) osfpal > $@
+
+osfpal.map: osfpal
+ $(DIS) -m osfpal > $@
+
+depend:
+ @cat < /dev/null > makedep
+ @(for i in $(SFILES); do echo $$i; \
+ $(MAKEDEP) $(DEFINES) $$i | \
+ awk '{ if ($$1 != prev) {if (rec != "") print rec; \
+ rec = $$0; prev = $$1; } \
+ else { if (length(rec $$2) > 78) { print rec; rec = $$0; } \
+ else rec = rec " " $$2 } } \
+ END { print rec }' | sed 's/\.o/\.i/' \
+ >> makedep; done)
+ @echo '/^# DO NOT DELETE THIS LINE/+1,$$d' > eddep
+ @echo '$$r makedep' >> eddep
+ @echo 'w' >> eddep
+ @cp Makefile Makefile.bak
+ @ed - Makefile < eddep
+ @rm -f eddep makedep
+ @echo '# DEPENDENCIES MUST END AT END OF FILE' >> Makefile
+ @echo '# IF YOU PUT STUFF HERE IT WILL GO AWAY' >> Makefile
+ @echo '# see make depend above' >> Makefile
+
+clean:
+ rm -f core $(OFILES) $(IFILES)
+
+clobber: clean
+ rm -f osfpal.lis osfpal.nh osfpal.map osfpal
+
+rcsinfo:
+ rlog RCS/*
+
+rcsget:
+ co -u $(HFILES) $(SFILES)
+
+# DO NOT DELETE THIS LINE
+osfpal.i: osfpal.s
+platform.i: platform.s ./cserve.h ./platform.h
+# DEPENDENCIES MUST END AT END OF FILE
+# IF YOU PUT STUFF HERE IT WILL GO AWAY
+# see make depend above
+
+
+
diff --git a/system/alpha/palcode/cserve.h b/system/alpha/palcode/cserve.h
new file mode 100644
index 000000000..47a30a0a3
--- /dev/null
+++ b/system/alpha/palcode/cserve.h
@@ -0,0 +1,107 @@
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:20 1995] SF: [cserve.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+#define __CSERVE_LOADED 1
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 OSF/1 PALcode
+**
+** MODULE:
+**
+** cserve.h
+**
+** MODULE DESCRIPTION:
+**
+** Platform specific cserve definitions.
+**
+** AUTHOR: ES
+**
+** CREATION DATE: 21-JUN-1994
+**
+** $Id: cserve.h,v 1.1.1.1 1997/10/30 23:27:18 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: cserve.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:18 verghese
+** current 10/29/97
+**
+** Revision 1.6 1995/04/03 17:29:52 samberg
+** Add rd_bccfg_off
+**
+** Revision 1.5 1995/02/02 19:31:34 samberg
+** Added WR_BCACHE, deleted WR_BCCFG and WR_BCCTL
+**
+** Revision 1.4 1994/12/08 17:13:34 samberg
+** Add CSERVE_K_WR_BCCTL and CSERVE_K_WR_BCCFG
+**
+** Revision 1.3 1994/11/30 15:59:30 samberg
+** Use c-style comments for c compiler use
+**
+** Revision 1.2 1994/11/22 19:02:46 samberg
+** Add constants for ev4 backward compatibility
+**
+** Revision 1.2 1994/11/22 19:02:46 samberg
+** Add constants for ev4 backward compatibility
+**
+** Revision 1.1 1994/07/08 17:01:40 samberg
+** Initial revision
+**
+**
+*/
+
+/*
+** Console Service (cserve) sub-function codes:
+*/
+#define CSERVE_K_LDQP 0x01
+#define CSERVE_K_STQP 0x02
+#define CSERVE_K_JTOPAL 0x09
+#define CSERVE_K_WR_INT 0x0A
+#define CSERVE_K_RD_IMPURE 0x0B
+#define CSERVE_K_PUTC 0x0F
+#define CSERVE_K_WR_ICSR 0x10
+#define CSERVE_K_WR_ICCSR 0x10 /* for ev4 backwards compatibility */
+#define CSERVE_K_RD_ICSR 0x11
+#define CSERVE_K_RD_ICCSR 0x11 /* for ev4 backwards compatibility */
+#define CSERVE_K_RD_BCCTL 0x12
+#define CSERVE_K_RD_BCCFG 0x13
+
+#define CSERVE_K_WR_BCACHE 0x16
+
+#define CSERVE_K_RD_BCCFG_OFF 0x17
+#define CSERVE_K_JTOKERN 0x18
+
+
diff --git a/system/alpha/palcode/dc21164.h b/system/alpha/palcode/dc21164.h
new file mode 100644
index 000000000..4bdb118c0
--- /dev/null
+++ b/system/alpha/palcode/dc21164.h
@@ -0,0 +1,961 @@
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:11 1995] SF: [dc21164.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+#define __DC21164_LOADED 1
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 PALcode
+**
+** MODULE:
+**
+** dc21164.h
+**
+** MODULE DESCRIPTION:
+**
+** DECchip 21164 specific definitions
+**
+** AUTHOR: ER
+**
+** CREATION DATE: 24-Nov-1993
+**
+** $Id: dc21164.h,v 1.1.1.1 1997/10/30 23:27:18 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: dc21164.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:18 verghese
+** current 10/29/97
+**
+** Revision 1.15 1995/04/21 02:06:30 fdh
+** Replaced C++ style comments with Standard C style comments.
+**
+** Revision 1.14 1995/03/20 14:55:23 samberg
+** Add flushIc to make Roger Cruz's life easier.
+**
+** Revision 1.13 1994/12/14 15:52:48 samberg
+** Add slXmit and slRcv bit definitions
+**
+** Revision 1.12 1994/09/07 15:43:49 samberg
+** Changes for Makefile.vpp, take out OSF definition
+**
+** Revision 1.11 1994/07/26 17:38:35 samberg
+** Changes for SD164.
+**
+** Revision 1.10 1994/07/08 17:02:12 samberg
+** Changes to support platform specific additions
+**
+** Revision 1.8 1994/05/31 15:49:21 ericr
+** Moved ptKdebug from pt10 to pt13; pt10 is used in MCHK flows
+**
+** Revision 1.7 1994/05/26 19:29:51 ericr
+** Added BC_CONFIG definitions
+**
+** Revision 1.6 1994/05/25 14:27:25 ericr
+** Added physical bit to ldq_lp and stq_cp macros
+**
+** Revision 1.5 1994/05/20 18:07:50 ericr
+** Changed line comments to C++ style comment character
+**
+** Revision 1.4 1994/01/17 21:46:54 ericr
+** Added floating point register definitions
+**
+** Revision 1.3 1994/01/03 19:31:49 ericr
+** Added cache parity error status register definitions
+**
+** Revision 1.2 1993/12/22 20:42:35 eric
+** Added ptTrap, ptMisc and flag definitions
+** Added PAL shadow regsiter definitions
+**
+** Revision 1.1 1993/12/16 21:55:05 eric
+** Initial revision
+**
+**
+**--
+*/
+
+
+/*
+**
+** INTERNAL PROCESSOR REGISTER DEFINITIONS
+**
+** The internal processor register definitions below are annotated
+** with one of the following symbols:
+**
+** RW - The register may be read and written
+** RO - The register may only be read
+** WO - The register may only be written
+**
+** For RO and WO registers, all bits and fields within the register are
+** also read-only or write-only. For RW registers, each bit or field
+** within the register is annotated with one of the following:
+**
+** RW - The bit/field may be read and written
+** RO - The bit/field may be read; writes are ignored
+** WO - The bit/field may be written; reads return UNPREDICTABLE
+** WZ - The bit/field may be written; reads return a zero value
+** W0C - The bit/field may be read; write-zero-to-clear
+** W1C - The bit/field may be read; write-one-to-clear
+** WA - The bit/field may be read; write-anything-to-clear
+** RC - The bit/field may be read, causing state to clear;
+** writes are ignored
+**
+*/
+
+
+/*
+**
+** Ibox IPR Definitions:
+**
+*/
+
+#define isr 0x100 /* RO - Interrupt Summary */
+#define itbTag 0x101 /* WO - ITB Tag */
+#define itbPte 0x102 /* RW - ITB Page Table Entry */
+#define itbAsn 0x103 /* RW - ITB Address Space Number */
+#define itbPteTemp 0x104 /* RO - ITB Page Table Entry Temporary */
+#define itbIa 0x105 /* WO - ITB Invalidate All */
+#define itbIap 0x106 /* WO - ITB Invalidate All Process */
+#define itbIs 0x107 /* WO - ITB Invalidate Single */
+#define sirr 0x108 /* RW - Software Interrupt Request */
+#define astrr 0x109 /* RW - Async. System Trap Request */
+#define aster 0x10A /* RW - Async. System Trap Enable */
+#define excAddr 0x10B /* RW - Exception Address */
+#define excSum 0x10C /* RW - Exception Summary */
+#define excMask 0x10D /* RO - Exception Mask */
+#define palBase 0x10E /* RW - PAL Base */
+#define ips 0x10F /* RW - Processor Status */
+#define ipl 0x110 /* RW - Interrupt Priority Level */
+#define intId 0x111 /* RO - Interrupt ID */
+#define iFaultVaForm 0x112 /* RO - Formatted Faulting VA */
+#define iVptBr 0x113 /* RW - I-Stream Virtual Page Table Base */
+#define hwIntClr 0x115 /* WO - Hardware Interrupt Clear */
+#define slXmit 0x116 /* WO - Serial Line Transmit */
+#define slRcv 0x117 /* RO - Serial Line Receive */
+#define icsr 0x118 /* RW - Ibox Control/Status */
+#define icFlush 0x119 /* WO - I-Cache Flush Control */
+#define flushIc 0x119 /* WO - I-Cache Flush Control (DC21064 Symbol) */
+#define icPerr 0x11A /* RW - I-Cache Parity Error Status */
+#define PmCtr 0x11C /* RW - Performance Counter */
+
+/*
+**
+** Ibox Control/Status Register (ICSR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <39> 1 TST RW,0 Assert Test Status
+** <38> 1 ISTA RO I-Cache BIST Status
+** <37> 1 DBS RW,1 Debug Port Select
+** <36> 1 FBD RW,0 Force Bad I-Cache Data Parity
+** <35> 1 FBT RW,0 Force Bad I-Cache Tag Parity
+** <34> 1 FMS RW,0 Force I-Cache Miss
+** <33> 1 SLE RW,0 Enable Serial Line Interrupts
+** <32> 1 CRDE RW,0 Enable Correctable Error Interrupts
+** <30> 1 SDE RW,0 Enable PAL Shadow Registers
+** <29:28> 2 SPE RW,0 Enable I-Stream Super Page Mode
+** <27> 1 HWE RW,0 Enable PALRES Instrs in Kernel Mode
+** <26> 1 FPE RW,0 Enable Floating Point Instructions
+** <25> 1 TMD RW,0 Disable Ibox Timeout Counter
+** <24> 1 TMM RW,0 Timeout Counter Mode
+**
+*/
+
+#define ICSR_V_TST 39
+#define ICSR_M_TST (1<<ICSR_V_TST)
+#define ICSR_V_ISTA 38
+#define ICSR_M_ISTA (1<<ICSR_V_ISTA)
+#define ICSR_V_DBS 37
+#define ICSR_M_DBS (1<<ICSR_V_DBS)
+#define ICSR_V_FBD 36
+#define ICSR_M_FBD (1<<ICSR_V_FBD)
+#define ICSR_V_FBT 35
+#define ICSR_M_FBT (1<<ICSR_V_FBT)
+#define ICSR_V_FMS 34
+#define ICSR_M_FMS (1<<ICSR_V_FMS)
+#define ICSR_V_SLE 33
+#define ICSR_M_SLE (1<<ICSR_V_SLE)
+#define ICSR_V_CRDE 32
+#define ICSR_M_CRDE (1<<ICSR_V_CRDE)
+#define ICSR_V_SDE 30
+#define ICSR_M_SDE (1<<ICSR_V_SDE)
+#define ICSR_V_SPE 28
+#define ICSR_M_SPE (3<<ICSR_V_SPE)
+#define ICSR_V_HWE 27
+#define ICSR_M_HWE (1<<ICSR_V_HWE)
+#define ICSR_V_FPE 26
+#define ICSR_M_FPE (1<<ICSR_V_FPE)
+#define ICSR_V_TMD 25
+#define ICSR_M_TMD (1<<ICSR_V_TMD)
+#define ICSR_V_TMM 24
+#define ICSR_M_TMM (1<<ICSR_V_TMM)
+
+/*
+**
+** Serial Line Tranmit Register (SL_XMIT)
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <7> 1 TMT WO,1 Serial line transmit data
+**
+*/
+
+#define SLXMIT_V_TMT 7
+#define SLXMIT_M_TMT (1<<SLXMIT_V_TMT)
+
+/*
+**
+** Serial Line Receive Register (SL_RCV)
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <6> 1 RCV RO Serial line receive data
+**
+*/
+
+#define SLRCV_V_RCV 6
+#define SLRCV_M_RCV (1<<SLRCV_V_RCV)
+
+/*
+**
+** Icache Parity Error Status Register (ICPERR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <13> 1 TMR W1C Timeout reset error
+** <12> 1 TPE W1C Tag parity error
+** <11> 1 DPE W1C Data parity error
+**
+*/
+
+#define ICPERR_V_TMR 13
+#define ICPERR_M_TMR (1<<ICPERR_V_TMR)
+#define ICPERR_V_TPE 12
+#define ICPERR_M_TPE (1<<ICPERR_V_TPE)
+#define ICPERR_V_DPE 11
+#define ICPERR_M_DPE (1<<ICPERR_V_DPE)
+
+#define ICPERR_M_ALL (ICPERR_M_TMR | ICPERR_M_TPE | ICPERR_M_DPE)
+
+/*
+**
+** Exception Summary Register (EXC_SUM) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <16> 1 IOV WA Integer overflow
+** <15> 1 INE WA Inexact result
+** <14> 1 UNF WA Underflow
+** <13> 1 FOV WA Overflow
+** <12> 1 DZE WA Division by zero
+** <11> 1 INV WA Invalid operation
+** <10> 1 SWC WA Software completion
+**
+*/
+
+#define EXC_V_IOV 16
+#define EXC_M_IOV (1<<EXC_V_IOV)
+#define EXC_V_INE 15
+#define EXC_M_INE (1<<EXC_V_INE)
+#define EXC_V_UNF 14
+#define EXC_M_UNF (1<<EXC_V_UNF)
+#define EXC_V_FOV 13
+#define EXC_M_FOV (1<<EXC_V_FOV)
+#define EXC_V_DZE 12
+#define EXC_M_DZE (1<<EXC_V_DZE)
+#define EXC_V_INV 11
+#define EXC_M_INV (1<<EXC_V_INV)
+#define EXC_V_SWC 10
+#define EXC_M_SWC (1<<EXC_V_SWC)
+
+/*
+**
+** Hardware Interrupt Clear Register (HWINT_CLR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <33> 1 SLC W1C Clear Serial Line interrupt
+** <32> 1 CRDC W1C Clear Correctable Read Data interrupt
+** <29> 1 PC2C W1C Clear Performance Counter 2 interrupt
+** <28> 1 PC1C W1C Clear Performance Counter 1 interrupt
+** <27> 1 PC0C W1C Clear Performance Counter 0 interrupt
+**
+*/
+
+#define HWINT_V_SLC 33
+#define HWINT_M_SLC (1<<HWINT_V_SLC)
+#define HWINT_V_CRDC 32
+#define HWINT_M_CRDC (1<<HWINT_V_CRDC)
+#define HWINT_V_PC2C 29
+#define HWINT_M_PC2C (1<<HWINT_V_PC2C)
+#define HWINT_V_PC1C 28
+#define HWINT_M_PC1C (1<<HWINT_V_PC1C)
+#define HWINT_V_PC0C 27
+#define HWINT_M_PC0C (1<<HWINT_V_PC0C)
+
+/*
+**
+** Interrupt Summary Register (ISR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <34> 1 HLT RO External Halt interrupt
+** <33> 1 SLI RO Serial Line interrupt
+** <32> 1 CRD RO Correctable ECC errors
+** <31> 1 MCK RO System Machine Check
+** <30> 1 PFL RO Power Fail
+** <29> 1 PC2 RO Performance Counter 2 interrupt
+** <28> 1 PC1 RO Performance Counter 1 interrupt
+** <27> 1 PC0 RO Performance Counter 0 interrupt
+** <23> 1 I23 RO External Hardware interrupt
+** <22> 1 I22 RO External Hardware interrupt
+** <21> 1 I21 RO External Hardware interrupt
+** <20> 1 I20 RO External Hardware interrupt
+** <19> 1 ATR RO Async. System Trap request
+** <18:4> 15 SIRR RO,0 Software Interrupt request
+** <3:0> 4 ASTRR RO Async. System Trap request (USEK)
+**
+**/
+
+#define ISR_V_HLT 34
+#define ISR_M_HLT (1<<ISR_V_HLT)
+#define ISR_V_SLI 33
+#define ISR_M_SLI (1<<ISR_V_SLI)
+#define ISR_V_CRD 32
+#define ISR_M_CRD (1<<ISR_V_CRD)
+#define ISR_V_MCK 31
+#define ISR_M_MCK (1<<ISR_V_MCK)
+#define ISR_V_PFL 30
+#define ISR_M_PFL (1<<ISR_V_PFL)
+#define ISR_V_PC2 29
+#define ISR_M_PC2 (1<<ISR_V_PC2)
+#define ISR_V_PC1 28
+#define ISR_M_PC1 (1<<ISR_V_PC1)
+#define ISR_V_PC0 27
+#define ISR_M_PC0 (1<<ISR_V_PC0)
+#define ISR_V_I23 23
+#define ISR_M_I23 (1<<ISR_V_I23)
+#define ISR_V_I22 22
+#define ISR_M_I22 (1<<ISR_V_I22)
+#define ISR_V_I21 21
+#define ISR_M_I21 (1<<ISR_V_I21)
+#define ISR_V_I20 20
+#define ISR_M_I20 (1<<ISR_V_I20)
+#define ISR_V_ATR 19
+#define ISR_M_ATR (1<<ISR_V_ATR)
+#define ISR_V_SIRR 4
+#define ISR_M_SIRR (0x7FFF<<ISR_V_SIRR)
+#define ISR_V_ASTRR 0
+#define ISR_M_ASTRR (0xF<<ISR_V_ASTRR)
+
+/*
+**
+** Mbox and D-Cache IPR Definitions:
+**
+*/
+
+#define dtbAsn 0x200 /* WO - DTB Address Space Number */
+#define dtbCm 0x201 /* WO - DTB Current Mode */
+#define dtbTag 0x202 /* WO - DTB Tag */
+#define dtbPte 0x203 /* RW - DTB Page Table Entry */
+#define dtbPteTemp 0x204 /* RO - DTB Page Table Entry Temporary */
+#define mmStat 0x205 /* RO - D-Stream MM Fault Status */
+#define va 0x206 /* RO - Faulting Virtual Address */
+#define vaForm 0x207 /* RO - Formatted Virtual Address */
+#define mVptBr 0x208 /* WO - Mbox Virtual Page Table Base */
+#define dtbIap 0x209 /* WO - DTB Invalidate All Process */
+#define dtbIa 0x20A /* WO - DTB Invalidate All */
+#define dtbIs 0x20B /* WO - DTB Invalidate Single */
+#define altMode 0x20C /* WO - Alternate Mode */
+#define cc 0x20D /* WO - Cycle Counter */
+#define ccCtl 0x20E /* WO - Cycle Counter Control */
+#define mcsr 0x20F /* RW - Mbox Control Register */
+#define dcFlush 0x210 /* WO - Dcache Flush */
+#define dcPerr 0x212 /* RW - Dcache Parity Error Status */
+#define dcTestCtl 0x213 /* RW - Dcache Test Tag Control */
+#define dcTestTag 0x214 /* RW - Dcache Test Tag */
+#define dcTestTagTemp 0x215 /* RW - Dcache Test Tag Temporary */
+#define dcMode 0x216 /* RW - Dcache Mode */
+#define mafMode 0x217 /* RW - Miss Address File Mode */
+
+/*
+**
+** D-Stream MM Fault Status Register (MM_STAT) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <16:11> 6 OPCODE RO Opcode of faulting instruction
+** <10:06> 5 RA RO Ra field of faulting instruction
+** <5> 1 BAD_VA RO Bad virtual address
+** <4> 1 DTB_MISS RO Reference resulted in DTB miss
+** <3> 1 FOW RO Fault on write
+** <2> 1 FOR RO Fault on read
+** <1> 1 ACV RO Access violation
+** <0> 1 WR RO Reference type
+**
+*/
+
+#define MMSTAT_V_OPC 11
+#define MMSTAT_M_OPC (0x3F<<MMSTAT_V_OPC)
+#define MMSTAT_V_RA 6
+#define MMSTAT_M_RA (0x1F<<MMSTAT_V_RA)
+#define MMSTAT_V_BAD_VA 5
+#define MMSTAT_M_BAD_VA (1<<MMSTAT_V_BAD_VA)
+#define MMSTAT_V_DTB_MISS 4
+#define MMSTAT_M_DTB_MISS (1<<MMSTAT_V_DTB_MISS)
+#define MMSTAT_V_FOW 3
+#define MMSTAT_M_FOW (1<<MMSTAT_V_FOW)
+#define MMSTAT_V_FOR 2
+#define MMSTAT_M_FOR (1<<MMSTAT_V_FOR)
+#define MMSTAT_V_ACV 1
+#define MMSTAT_M_ACV (1<<MMSTAT_V_ACV)
+#define MMSTAT_V_WR 0
+#define MMSTAT_M_WR (1<<MMSTAT_V_WR)
+
+
+/*
+**
+** Mbox Control Register (MCSR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <5> 1 DBG1 RW,0 Mbox Debug Packet Select
+** <4> 1 E_BE RW,0 Ebox Big Endian mode enable
+** <3> 1 DBG0 RW,0 Debug Test Select
+** <2:1> 2 SP RW,0 Superpage mode enable
+** <0> 1 M_BE RW,0 Mbox Big Endian mode enable
+**
+*/
+
+#define MCSR_V_DBG1 5
+#define MCSR_M_DBG1 (1<<MCSR_V_DBG1)
+#define MCSR_V_E_BE 4
+#define MCSR_M_E_BE (1<<MCSR_V_E_BE)
+#define MCSR_V_DBG0 3
+#define MCSR_M_DBG0 (1<<MCSR_V_DBG0)
+#define MCSR_V_SP 1
+#define MCSR_M_SP (3<<MCSR_V_SP)
+#define MCSR_V_M_BE 0
+#define MCSR_M_M_BE (1<<MCSR_V_M_BE)
+
+/*
+**
+** Dcache Parity Error Status Register (DCPERR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <5> 1 TP1 RO Dcache bank 1 tag parity error
+** <4> 1 TP0 RO Dcache bank 0 tag parity error
+** <3> 1 DP1 RO Dcache bank 1 data parity error
+** <2> 1 DP0 RO Dcache bank 0 data parity error
+** <1> 1 LOCK W1C Locks/clears bits <5:2>
+** <0> 1 SEO W1C Second Dcache parity error occurred
+**
+*/
+
+#define DCPERR_V_TP1 5
+#define DCPERR_M_TP1 (1<<DCPERR_V_TP1)
+#define DCPERR_V_TP0 4
+#define DCPERR_M_TP0 (1<<DCPERR_V_TP0)
+#define DCPERR_V_DP1 3
+#define DCPERR_M_DP1 (1<<DCPERR_V_DP1)
+#define DCPERR_V_DP0 2
+#define DCPERR_M_DP0 (1<<DCPERR_V_DP0)
+#define DCPERR_V_LOCK 1
+#define DCPERR_M_LOCK (1<<DCPERR_V_LOCK)
+#define DCPERR_V_SEO 0
+#define DCPERR_M_SEO (1<<DCPERR_V_SEO)
+
+#define DCPERR_M_ALL (DCPERR_M_LOCK | DCPERR_M_SEO)
+
+/*
+**
+** Dcache Mode Register (DC_MODE) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <4> 1 DOA RO Hardware Dcache Disable
+** <3> 1 PERR_DIS RW,0 Disable Dcache Parity Error reporting
+** <2> 1 BAD_DP RW,0 Force Dcache data bad parity
+** <1> 1 FHIT RW,0 Force Dcache hit
+** <0> 1 ENA RW,0 Software Dcache Enable
+**
+*/
+
+#define DC_V_DOA 4
+#define DC_M_DOA (1<<DC_V_DOA)
+#define DC_V_PERR_DIS 3
+#define DC_M_PERR_DIS (1<<DC_V_PERR_DIS)
+#define DC_V_BAD_DP 2
+#define DC_M_BAD_DP (1<<DC_V_BAD_DP)
+#define DC_V_FHIT 1
+#define DC_M_FHIT (1<<DC_V_FHIT)
+#define DC_V_ENA 0
+#define DC_M_ENA (1<<DC_V_ENA)
+
+/*
+**
+** Miss Address File Mode Register (MAF_MODE) Bit Summay
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <7> 1 WB RO,0 If set, pending WB request
+** <6> 1 DREAD RO,0 If set, pending D-read request
+**
+*/
+
+#define MAF_V_WB_PENDING 7
+#define MAF_M_WB_PENDING (1<<MAF_V_WB_PENDING)
+#define MAF_V_DREAD_PENDING 6
+#define MAF_M_DREAD_PENDING (1<<MAF_V_DREAD_PENDING)
+
+/*
+**
+** Cbox IPR Definitions:
+**
+*/
+
+#define scCtl 0x0A8 /* RW - Scache Control */
+#define scStat 0x0E8 /* RO - Scache Error Status */
+#define scAddr 0x188 /* RO - Scache Error Address */
+#define bcCtl 0x128 /* WO - Bcache/System Interface Control */
+#define bcCfg 0x1C8 /* WO - Bcache Configuration Parameters */
+#define bcTagAddr 0x108 /* RO - Bcache Tag */
+#define eiStat 0x168 /* RO - Bcache/System Error Status */
+#define eiAddr 0x148 /* RO - Bcache/System Error Address */
+#define fillSyn 0x068 /* RO - Fill Syndrome */
+#define ldLock 0x1E8 /* RO - LDx_L Address */
+
+/*
+**
+** Scache Control Register (SC_CTL) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <15:13> 3 SET_EN RW,1 Set enable
+** <12> 1 BLK_SIZE RW,1 Scache/Bcache block size select
+** <11:08> 4 FB_DP RW,0 Force bad data parity
+** <07:02> 6 TAG_STAT RW Tag status and parity
+** <1> 1 FLUSH RW,0 If set, clear all tag valid bits
+** <0> 1 FHIT RW,0 Force hits
+**
+*/
+
+#define SC_V_SET_EN 13
+#define SC_M_SET_EN (7<<SC_V_SET_EN)
+#define SC_V_BLK_SIZE 12
+#define SC_M_BLK_SIZE (1<<SC_V_BLK_SIZE)
+#define SC_V_FB_DP 8
+#define SC_M_FB_DP (0xF<<SC_V_FB_DP)
+#define SC_V_TAG_STAT 2
+#define SC_M_TAG_STAT (0x3F<<SC_V_TAG_STAT)
+#define SC_V_FLUSH 1
+#define SC_M_FLUSH (1<<SC_V_FLUSH)
+#define SC_V_FHIT 0
+#define SC_M_FHIT (1<<SC_V_FHIT)
+
+/*
+**
+** Bcache Control Register (BC_CTL) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <27> 1 DIS_VIC_BUF WO,0 Disable Scache victim buffer
+** <26> 1 DIS_BAF_BYP WO,0 Disable speculative Bcache reads
+** <25> 1 DBG_MUX_SEL WO,0 Debug MUX select
+** <24:19> 6 PM_MUX_SEL WO,0 Performance counter MUX select
+** <18:17> 2 BC_WAVE WO,0 Number of cycles of wave pipelining
+** <16> 1 TL_PIPE_LATCH WO,0 Pipe system control pins
+** <15> 1 EI_DIS_ERR WO,1 Disable ECC (parity) error
+** <14:13> 2 BC_BAD_DAT WO,0 Force bad data
+** <12:08> 5 BC_TAG_STAT WO Bcache tag status and parity
+** <7> 1 BC_FHIT WO,0 Bcache force hit
+** <6> 1 EI_ECC WO,1 ECC or byte parity mode
+** <5> 1 VTM_FIRST WO,1 Drive out victim block address first
+** <4> 1 CORR_FILL_DAT WO,1 Correct fill data
+** <3> 1 EI_CMD_GRP3 WO,0 Drive MB command to external pins
+** <2> 1 EI_CMD_GRP2 WO,0 Drive LOCK & SET_DIRTY to ext. pins
+** <1> 1 ALLOC_CYC WO,0 Allocate cycle for non-cached LDs.
+** <0> 1 BC_ENA W0,0 Bcache enable
+**
+*/
+#define BC_V_DIS_SC_VIC_BUF 27
+#define BC_M_DIS_SC_VIC_BUF (1<<BC_V_DIS_SC_VIC_BUF)
+#define BC_V_DIS_BAF_BYP 26
+#define BC_M_DIS_BAF_BYP (1<<BC_V_DIS_BAF_BYP)
+#define BC_V_DBG_MUX_SEL 25
+#define BC_M_DBG_MUX_SEL (1<<BC_V_DBG_MUX_SEL)
+#define BC_V_PM_MUX_SEL 19
+#define BC_M_PM_MUX_SEL (0x3F<<BC_V_PM_MUX_SEL)
+#define BC_V_BC_WAVE 17
+#define BC_M_BC_WAVE (3<<BC_V_BC_WAVE)
+#define BC_V_TL_PIPE_LATCH 16
+#define BC_M_TL_PIPE_LATCH (1<<BC_V_TL_PIPE_LATCH)
+#define BC_V_EI_DIS_ERR 15
+#define BC_M_EI_DIS_ERR (1<<BC_V_EI_DIS_ERR)
+#define BC_V_BC_BAD_DAT 13
+#define BC_M_BC_BAD_DAT (3<<BC_V_BC_BAD_DAT)
+#define BC_V_BC_TAG_STAT 8
+#define BC_M_BC_TAG_STAT (0x1F<<BC_V_BC_TAG_STAT)
+#define BC_V_BC_FHIT 7
+#define BC_M_BC_FHIT (1<<BC_V_BC_FHIT)
+#define BC_V_EI_ECC_OR_PARITY 6
+#define BC_M_EI_ECC_OR_PARITY (1<<BC_V_EI_ECC_OR_PARITY)
+#define BC_V_VTM_FIRST 5
+#define BC_M_VTM_FIRST (1<<BC_V_VTM_FIRST)
+#define BC_V_CORR_FILL_DAT 4
+#define BC_M_CORR_FILL_DAT (1<<BC_V_CORR_FILL_DAT)
+#define BC_V_EI_CMD_GRP3 3
+#define BC_M_EI_CMD_GRP3 (1<<BC_V_EI_CMD_GRP3)
+#define BC_V_EI_CMD_GRP2 2
+#define BC_M_EI_CMD_GRP2 (1<<BC_V_EI_CMD_GRP2)
+#define BC_V_ALLOC_CYC 1
+#define BC_M_ALLOC_CYC (1<<BC_V_ALLOC_CYC)
+#define BC_V_BC_ENA 0
+#define BC_M_BC_ENA (1<<BC_V_BC_ENA)
+
+#define BC_K_DFAULT \
+ (((BC_M_EI_DIS_ERR) | \
+ (BC_M_EI_ECC_OR_PARITY) | \
+ (BC_M_VTM_FIRST) | \
+ (BC_M_CORR_FILL_DAT))>>1)
+/*
+**
+** Bcache Configuration Register (BC_CONFIG) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <35:29> 7 RSVD WO Reserved - Must Be Zero
+** <28:20> 9 WE_CTL WO,0 Bcache write enable control
+** <19:19> 1 RSVD WO,0 Reserved - Must Be Zero
+** <18:16> 3 WE_OFF WO,1 Bcache fill write enable pulse offset
+** <15:15> 1 RSVD WO,0 Reserved - Must Be Zero
+** <14:12> 3 RD_WR_SPC WO,7 Bcache private read/write spacing
+** <11:08> 4 WR_SPD WO,4 Bcache write speed in CPU cycles
+** <07:04> 4 RD_SPD WO,4 Bcache read speed in CPU cycles
+** <03:03> 1 RSVD WO,0 Reserved - Must Be Zero
+** <02:00> 3 SIZE WO,1 Bcache size
+*/
+#define BC_V_WE_CTL 20
+#define BC_M_WE_CTL (0x1FF<<BC_V_WE_CTL)
+#define BC_V_WE_OFF 16
+#define BC_M_WE_OFF (0x7<<BC_V_WE_OFF)
+#define BC_V_RD_WR_SPC 12
+#define BC_M_RD_WR_SPC (0x7<<BC_V_RD_WR_SPC)
+#define BC_V_WR_SPD 8
+#define BC_M_WR_SPD (0xF<<BC_V_WR_SPD)
+#define BC_V_RD_SPD 4
+#define BC_M_RD_SPD (0xF<<BC_V_RD_SPD)
+#define BC_V_SIZE 0
+#define BC_M_SIZE (0x7<<BC_V_SIZE)
+
+#define BC_K_CONFIG \
+ ((0x1<<BC_V_WE_OFF) | \
+ (0x7<<BC_V_RD_WR_SPC) | \
+ (0x4<<BC_V_WR_SPD) | \
+ (0x4<<BC_V_RD_SPD) | \
+ (0x1<<BC_V_SIZE))
+
+/*
+**
+** DECchip 21164 Privileged Architecture Library Entry Offsets:
+**
+** Entry Name Offset (Hex)
+**
+** RESET 0000
+** IACCVIO 0080
+** INTERRUPT 0100
+** ITB_MISS 0180
+** DTB_MISS (Single) 0200
+** DTB_MISS (Double) 0280
+** UNALIGN 0300
+** D_FAULT 0380
+** MCHK 0400
+** OPCDEC 0480
+** ARITH 0500
+** FEN 0580
+** CALL_PAL (Privileged) 2000
+** CALL_PAL (Unprivileged) 3000
+**
+*/
+
+#define PAL_RESET_ENTRY 0x0000
+#define PAL_IACCVIO_ENTRY 0x0080
+#define PAL_INTERRUPT_ENTRY 0x0100
+#define PAL_ITB_MISS_ENTRY 0x0180
+#define PAL_DTB_MISS_ENTRY 0x0200
+#define PAL_DOUBLE_MISS_ENTRY 0x0280
+#define PAL_UNALIGN_ENTRY 0x0300
+#define PAL_D_FAULT_ENTRY 0x0380
+#define PAL_MCHK_ENTRY 0x0400
+#define PAL_OPCDEC_ENTRY 0x0480
+#define PAL_ARITH_ENTRY 0x0500
+#define PAL_FEN_ENTRY 0x0580
+#define PAL_CALL_PAL_PRIV_ENTRY 0x2000
+#define PAL_CALL_PAL_UNPRIV_ENTRY 0x3000
+
+/*
+**
+** Architecturally Reserved Opcode (PALRES) Definitions:
+**
+*/
+
+#define mtpr hw_mtpr
+#define mfpr hw_mfpr
+
+#define ldl_a hw_ldl/a
+#define ldq_a hw_ldq/a
+#define stq_a hw_stq/a
+#define stl_a hw_stl/a
+
+#define ldl_p hw_ldl/p
+#define ldq_p hw_ldq/p
+#define stl_p hw_stl/p
+#define stq_p hw_stq/p
+
+/*
+** Virtual PTE fetch variants of HW_LD.
+*/
+#define ld_vpte hw_ldq/v
+
+/*
+** Physical mode load-lock and store-conditional variants of
+** HW_LD and HW_ST.
+*/
+
+#define ldq_lp hw_ldq/pl
+#define stq_cp hw_stq/pc
+
+/*
+**
+** General Purpose Register Definitions:
+**
+*/
+
+#define r0 $0
+#define r1 $1
+#define r2 $2
+#define r3 $3
+#define r4 $4
+#define r5 $5
+#define r6 $6
+#define r7 $7
+#define r8 $8
+#define r9 $9
+#define r10 $10
+#define r11 $11
+#define r12 $12
+#define r13 $13
+#define r14 $14
+#define r15 $15
+#define r16 $16
+#define r17 $17
+#define r18 $18
+#define r19 $19
+#define r20 $20
+#define r21 $21
+#define r22 $22
+#define r23 $23
+#define r24 $24
+#define r25 $25
+#define r26 $26
+#define r27 $27
+#define r28 $28
+#define r29 $29
+#define r30 $30
+#define r31 $31
+
+/*
+**
+** Floating Point Register Definitions:
+**
+*/
+
+#define f0 $f0
+#define f1 $f1
+#define f2 $f2
+#define f3 $f3
+#define f4 $f4
+#define f5 $f5
+#define f6 $f6
+#define f7 $f7
+#define f8 $f8
+#define f9 $f9
+#define f10 $f10
+#define f11 $f11
+#define f12 $f12
+#define f13 $f13
+#define f14 $f14
+#define f15 $f15
+#define f16 $f16
+#define f17 $f17
+#define f18 $f18
+#define f19 $f19
+#define f20 $f20
+#define f21 $f21
+#define f22 $f22
+#define f23 $f23
+#define f24 $f24
+#define f25 $f25
+#define f26 $f26
+#define f27 $f27
+#define f28 $f28
+#define f29 $f29
+#define f30 $f30
+#define f31 $f31
+
+/*
+**
+** PAL Temporary Register Definitions:
+**
+*/
+
+#define pt0 0x140
+#define pt1 0x141
+#define pt2 0x142
+#define pt3 0x143
+#define pt4 0x144
+#define pt5 0x145
+#define pt6 0x146
+#define pt7 0x147
+#define pt8 0x148
+#define pt9 0x149
+#define pt10 0x14A
+#define pt11 0x14B
+#define pt12 0x14C
+#define pt13 0x14D
+#define pt14 0x14E
+#define pt15 0x14F
+#define pt16 0x150
+#define pt17 0x151
+#define pt18 0x152
+#define pt19 0x153
+#define pt20 0x154
+#define pt21 0x155
+#define pt22 0x156
+#define pt23 0x157
+
+/*
+** PAL Shadow Registers:
+**
+** The DECchip 21164 shadows r8-r14 and r25 when in PALmode and
+** ICSR<SDE> = 1.
+*/
+
+#define p0 r8 /* ITB/DTB Miss Scratch */
+#define p1 r9 /* ITB/DTB Miss Scratch */
+#define p2 r10 /* ITB/DTB Miss Scratch */
+#define p3 r11
+#define ps r11 /* Processor Status */
+#define p4 r12 /* Local Scratch */
+#define p5 r13 /* Local Scratch */
+#define p6 r14 /* Local Scratch */
+#define p7 r25 /* Local Scratch */
+
+/*
+** SRM Defined State Definitions:
+*/
+
+/*
+** This table is an accounting of the DECchip 21164 storage used to
+** implement the SRM defined state for OSF/1.
+**
+** IPR Name Internal Storage
+** -------- ----------------
+** Processor Status ps, dtbCm, ipl, r11
+** Program Counter Ibox
+** Interrupt Entry ptEntInt
+** Arith Trap Entry ptEntArith
+** MM Fault Entry ptEntMM
+** Unaligned Access Entry ptEntUna
+** Instruction Fault Entry ptEntIF
+** Call System Entry ptEntSys
+** User Stack Pointer ptUsp
+** Kernel Stack Pointer ptKsp
+** Kernel Global Pointer ptKgp
+** System Value ptSysVal
+** Page Table Base Register ptPtbr
+** Virtual Page Table Base iVptBr, mVptBr
+** Process Control Block Base ptPcbb
+** Address Space Number itbAsn, dtbAsn
+** Cycle Counter cc, ccCtl
+** Float Point Enable icsr
+** Lock Flag Cbox/System
+** Unique PCB
+** Who-Am-I ptWhami
+*/
+
+#define ptEntUna pt2 /* Unaligned Access Dispatch Entry */
+#define ptImpure pt3 /* Pointer To PAL Scratch Area */
+#define ptEntIF pt7 /* Instruction Fault Dispatch Entry */
+#define ptIntMask pt8 /* Interrupt Enable Mask */
+#define ptEntSys pt9 /* Call System Dispatch Entry */
+#define ptTrap pt11
+#define ptEntInt pt11 /* Hardware Interrupt Dispatch Entry */
+#define ptEntArith pt12 /* Arithmetic Trap Dispatch Entry */
+#if defined(KDEBUG)
+#define ptEntDbg pt13 /* Kernel Debugger Dispatch Entry */
+#endif /* KDEBUG */
+#define ptMisc pt16 /* Miscellaneous Flags */
+#define ptWhami pt16 /* Who-Am-I Register Pt16<15:8> */
+#define ptMces pt16 /* Machine Check Error Summary Pt16<4:0> */
+#define ptSysVal pt17 /* Per-Processor System Value */
+#define ptUsp pt18 /* User Stack Pointer */
+#define ptKsp pt19 /* Kernel Stack Pointer */
+#define ptPtbr pt20 /* Page Table Base Register */
+#define ptEntMM pt21 /* MM Fault Dispatch Entry */
+#define ptKgp pt22 /* Kernel Global Pointer */
+#define ptPcbb pt23 /* Process Control Block Base */
+
+/*
+**
+** Miscellaneous PAL State Flags (ptMisc) Bit Summary
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <55:48> 8 SWAP Swap PALcode flag -- character 'S'
+** <47:32> 16 MCHK Machine Check Error code
+** <31:16> 16 SCB System Control Block vector
+** <15:08> 8 WHAMI Who-Am-I identifier
+** <04:00> 5 MCES Machine Check Error Summary bits
+**
+*/
+
+#define PT16_V_MCES 0
+#define PT16_V_WHAMI 8
+#define PT16_V_SCB 16
+#define PT16_V_MCHK 32
+#define PT16_V_SWAP 48
+
diff --git a/system/alpha/palcode/dc21164FromGasSources.h b/system/alpha/palcode/dc21164FromGasSources.h
new file mode 100644
index 000000000..9b3389269
--- /dev/null
+++ b/system/alpha/palcode/dc21164FromGasSources.h
@@ -0,0 +1,965 @@
+#ifndef DC21164FROMGASSOURCES_INCLUDED
+#define DC21164FROMGASSOURCES_INCLUDED 1
+
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 PALcode
+**
+** MODULE:
+**
+** dc21164.h
+**
+** MODULE DESCRIPTION:
+**
+** DECchip 21164 specific definitions
+**
+** AUTHOR: ER
+**
+** CREATION DATE: 24-Nov-1993
+**
+** $Id: dc21164FromGasSources.h,v 1.1.1.1 1997/10/30 23:27:19 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: dc21164FromGasSources.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:19 verghese
+** current 10/29/97
+**
+** Revision 1.1 1995/11/18 01:45:46 boyle
+** Initial revision
+**
+** Revision 1.15 1995/04/21 02:06:30 fdh
+** Replaced C++ style comments with Standard C style comments.
+**
+** Revision 1.14 1995/03/20 14:55:23 samberg
+** Add flushIc to make Roger Cruz's life easier.
+**
+** Revision 1.13 1994/12/14 15:52:48 samberg
+** Add slXmit and slRcv bit definitions
+**
+** Revision 1.12 1994/09/07 15:43:49 samberg
+** Changes for Makefile.vpp, take out OSF definition
+**
+** Revision 1.11 1994/07/26 17:38:35 samberg
+** Changes for SD164.
+**
+** Revision 1.10 1994/07/08 17:02:12 samberg
+** Changes to support platform specific additions
+**
+** Revision 1.8 1994/05/31 15:49:21 ericr
+** Moved ptKdebug from pt10 to pt13; pt10 is used in MCHK flows
+**
+** Revision 1.7 1994/05/26 19:29:51 ericr
+** Added BC_CONFIG definitions
+**
+** Revision 1.6 1994/05/25 14:27:25 ericr
+** Added physical bit to ldq_lp and stq_cp macros
+**
+** Revision 1.5 1994/05/20 18:07:50 ericr
+** Changed line comments to C++ style comment character
+**
+** Revision 1.4 1994/01/17 21:46:54 ericr
+** Added floating point register definitions
+**
+** Revision 1.3 1994/01/03 19:31:49 ericr
+** Added cache parity error status register definitions
+**
+** Revision 1.2 1993/12/22 20:42:35 eric
+** Added ptTrap, ptMisc and flag definitions
+** Added PAL shadow regsiter definitions
+**
+** Revision 1.1 1993/12/16 21:55:05 eric
+** Initial revision
+**
+**
+**--
+*/
+
+
+/*
+**
+** INTERNAL PROCESSOR REGISTER DEFINITIONS
+**
+** The internal processor register definitions below are annotated
+** with one of the following symbols:
+**
+** RW - The register may be read and written
+** RO - The register may only be read
+** WO - The register may only be written
+**
+** For RO and WO registers, all bits and fields within the register are
+** also read-only or write-only. For RW registers, each bit or field
+** within the register is annotated with one of the following:
+**
+** RW - The bit/field may be read and written
+** RO - The bit/field may be read; writes are ignored
+** WO - The bit/field may be written; reads return UNPREDICTABLE
+** WZ - The bit/field may be written; reads return a zero value
+** W0C - The bit/field may be read; write-zero-to-clear
+** W1C - The bit/field may be read; write-one-to-clear
+** WA - The bit/field may be read; write-anything-to-clear
+** RC - The bit/field may be read, causing state to clear;
+** writes are ignored
+**
+*/
+
+
+/*
+**
+** Ibox IPR Definitions:
+**
+*/
+
+// replaced by ev5_defs.h #define isr 0x100 /* RO - Interrupt Summary */
+#define itbTag 0x101 /* WO - ITB Tag */
+#define itbPte 0x102 /* RW - ITB Page Table Entry */
+#define itbAsn 0x103 /* RW - ITB Address Space Number */
+#define itbPteTemp 0x104 /* RO - ITB Page Table Entry Temporary */
+#define itbIa 0x105 /* WO - ITB Invalidate All */
+#define itbIap 0x106 /* WO - ITB Invalidate All Process */
+#define itbIs 0x107 /* WO - ITB Invalidate Single */
+// replaced by ev5_defs.h #define sirr 0x108 /* RW - Software Interrupt Request */
+// replaced by ev5_defs.h #define astrr 0x109 /* RW - Async. System Trap Request */
+// replaced by ev5_defs.h #define aster 0x10A /* RW - Async. System Trap Enable */
+#define excAddr 0x10B /* RW - Exception Address */
+#define excSum 0x10C /* RW - Exception Summary */
+#define excMask 0x10D /* RO - Exception Mask */
+#define palBase 0x10E /* RW - PAL Base */
+#define ips 0x10F /* RW - Processor Status */
+// replaced by ev5_defs.h #define ipl 0x110 /* RW - Interrupt Priority Level */
+#define intId 0x111 /* RO - Interrupt ID */
+#define iFaultVaForm 0x112 /* RO - Formatted Faulting VA */
+#define iVptBr 0x113 /* RW - I-Stream Virtual Page Table Base */
+#define hwIntClr 0x115 /* WO - Hardware Interrupt Clear */
+#define slXmit 0x116 /* WO - Serial Line Transmit */
+#define slRcv 0x117 /* RO - Serial Line Receive */
+// replaced by ev5_defs.h #define icsr 0x118 /* RW - Ibox Control/Status */
+#define icFlush 0x119 /* WO - I-Cache Flush Control */
+#define flushIc 0x119 /* WO - I-Cache Flush Control (DC21064 Symbol) */
+#define icPerr 0x11A /* RW - I-Cache Parity Error Status */
+#define PmCtr 0x11C /* RW - Performance Counter */
+
+/*
+**
+** Ibox Control/Status Register (ICSR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <39> 1 TST RW,0 Assert Test Status
+** <38> 1 ISTA RO I-Cache BIST Status
+** <37> 1 DBS RW,1 Debug Port Select
+** <36> 1 FBD RW,0 Force Bad I-Cache Data Parity
+** <35> 1 FBT RW,0 Force Bad I-Cache Tag Parity
+** <34> 1 FMS RW,0 Force I-Cache Miss
+** <33> 1 SLE RW,0 Enable Serial Line Interrupts
+** <32> 1 CRDE RW,0 Enable Correctable Error Interrupts
+** <30> 1 SDE RW,0 Enable PAL Shadow Registers
+** <29:28> 2 SPE RW,0 Enable I-Stream Super Page Mode
+** <27> 1 HWE RW,0 Enable PALRES Instrs in Kernel Mode
+** <26> 1 FPE RW,0 Enable Floating Point Instructions
+** <25> 1 TMD RW,0 Disable Ibox Timeout Counter
+** <24> 1 TMM RW,0 Timeout Counter Mode
+**
+*/
+
+#define ICSR_V_TST 39
+#define ICSR_M_TST (1<<ICSR_V_TST)
+#define ICSR_V_ISTA 38
+#define ICSR_M_ISTA (1<<ICSR_V_ISTA)
+#define ICSR_V_DBS 37
+#define ICSR_M_DBS (1<<ICSR_V_DBS)
+#define ICSR_V_FBD 36
+#define ICSR_M_FBD (1<<ICSR_V_FBD)
+#define ICSR_V_FBT 35
+#define ICSR_M_FBT (1<<ICSR_V_FBT)
+#define ICSR_V_FMS 34
+#define ICSR_M_FMS (1<<ICSR_V_FMS)
+#define ICSR_V_SLE 33
+#define ICSR_M_SLE (1<<ICSR_V_SLE)
+#define ICSR_V_CRDE 32
+#define ICSR_M_CRDE (1<<ICSR_V_CRDE)
+#define ICSR_V_SDE 30
+#define ICSR_M_SDE (1<<ICSR_V_SDE)
+#define ICSR_V_SPE 28
+#define ICSR_M_SPE (3<<ICSR_V_SPE)
+#define ICSR_V_HWE 27
+#define ICSR_M_HWE (1<<ICSR_V_HWE)
+#define ICSR_V_FPE 26
+#define ICSR_M_FPE (1<<ICSR_V_FPE)
+#define ICSR_V_TMD 25
+#define ICSR_M_TMD (1<<ICSR_V_TMD)
+#define ICSR_V_TMM 24
+#define ICSR_M_TMM (1<<ICSR_V_TMM)
+
+/*
+**
+** Serial Line Tranmit Register (SL_XMIT)
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <7> 1 TMT WO,1 Serial line transmit data
+**
+*/
+
+#define SLXMIT_V_TMT 7
+#define SLXMIT_M_TMT (1<<SLXMIT_V_TMT)
+
+/*
+**
+** Serial Line Receive Register (SL_RCV)
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <6> 1 RCV RO Serial line receive data
+**
+*/
+
+#define SLRCV_V_RCV 6
+#define SLRCV_M_RCV (1<<SLRCV_V_RCV)
+
+/*
+**
+** Icache Parity Error Status Register (ICPERR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <13> 1 TMR W1C Timeout reset error
+** <12> 1 TPE W1C Tag parity error
+** <11> 1 DPE W1C Data parity error
+**
+*/
+
+#define ICPERR_V_TMR 13
+#define ICPERR_M_TMR (1<<ICPERR_V_TMR)
+#define ICPERR_V_TPE 12
+#define ICPERR_M_TPE (1<<ICPERR_V_TPE)
+#define ICPERR_V_DPE 11
+#define ICPERR_M_DPE (1<<ICPERR_V_DPE)
+
+#define ICPERR_M_ALL (ICPERR_M_TMR | ICPERR_M_TPE | ICPERR_M_DPE)
+
+/*
+**
+** Exception Summary Register (EXC_SUM) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <16> 1 IOV WA Integer overflow
+** <15> 1 INE WA Inexact result
+** <14> 1 UNF WA Underflow
+** <13> 1 FOV WA Overflow
+** <12> 1 DZE WA Division by zero
+** <11> 1 INV WA Invalid operation
+** <10> 1 SWC WA Software completion
+**
+*/
+
+#define EXC_V_IOV 16
+#define EXC_M_IOV (1<<EXC_V_IOV)
+#define EXC_V_INE 15
+#define EXC_M_INE (1<<EXC_V_INE)
+#define EXC_V_UNF 14
+#define EXC_M_UNF (1<<EXC_V_UNF)
+#define EXC_V_FOV 13
+#define EXC_M_FOV (1<<EXC_V_FOV)
+#define EXC_V_DZE 12
+#define EXC_M_DZE (1<<EXC_V_DZE)
+#define EXC_V_INV 11
+#define EXC_M_INV (1<<EXC_V_INV)
+#define EXC_V_SWC 10
+#define EXC_M_SWC (1<<EXC_V_SWC)
+
+/*
+**
+** Hardware Interrupt Clear Register (HWINT_CLR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <33> 1 SLC W1C Clear Serial Line interrupt
+** <32> 1 CRDC W1C Clear Correctable Read Data interrupt
+** <29> 1 PC2C W1C Clear Performance Counter 2 interrupt
+** <28> 1 PC1C W1C Clear Performance Counter 1 interrupt
+** <27> 1 PC0C W1C Clear Performance Counter 0 interrupt
+**
+*/
+
+#define HWINT_V_SLC 33
+#define HWINT_M_SLC (1<<HWINT_V_SLC)
+#define HWINT_V_CRDC 32
+#define HWINT_M_CRDC (1<<HWINT_V_CRDC)
+#define HWINT_V_PC2C 29
+#define HWINT_M_PC2C (1<<HWINT_V_PC2C)
+#define HWINT_V_PC1C 28
+#define HWINT_M_PC1C (1<<HWINT_V_PC1C)
+#define HWINT_V_PC0C 27
+#define HWINT_M_PC0C (1<<HWINT_V_PC0C)
+
+/*
+**
+** Interrupt Summary Register (ISR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <34> 1 HLT RO External Halt interrupt
+** <33> 1 SLI RO Serial Line interrupt
+** <32> 1 CRD RO Correctable ECC errors
+** <31> 1 MCK RO System Machine Check
+** <30> 1 PFL RO Power Fail
+** <29> 1 PC2 RO Performance Counter 2 interrupt
+** <28> 1 PC1 RO Performance Counter 1 interrupt
+** <27> 1 PC0 RO Performance Counter 0 interrupt
+** <23> 1 I23 RO External Hardware interrupt
+** <22> 1 I22 RO External Hardware interrupt
+** <21> 1 I21 RO External Hardware interrupt
+** <20> 1 I20 RO External Hardware interrupt
+** <19> 1 ATR RO Async. System Trap request
+** <18:4> 15 SIRR RO,0 Software Interrupt request
+** <3:0> 4 ASTRR RO Async. System Trap request (USEK)
+**
+**/
+
+#define ISR_V_HLT 34
+#define ISR_M_HLT (1<<ISR_V_HLT)
+#define ISR_V_SLI 33
+#define ISR_M_SLI (1<<ISR_V_SLI)
+#define ISR_V_CRD 32
+#define ISR_M_CRD (1<<ISR_V_CRD)
+#define ISR_V_MCK 31
+#define ISR_M_MCK (1<<ISR_V_MCK)
+#define ISR_V_PFL 30
+#define ISR_M_PFL (1<<ISR_V_PFL)
+#define ISR_V_PC2 29
+#define ISR_M_PC2 (1<<ISR_V_PC2)
+#define ISR_V_PC1 28
+#define ISR_M_PC1 (1<<ISR_V_PC1)
+#define ISR_V_PC0 27
+#define ISR_M_PC0 (1<<ISR_V_PC0)
+#define ISR_V_I23 23
+#define ISR_M_I23 (1<<ISR_V_I23)
+#define ISR_V_I22 22
+#define ISR_M_I22 (1<<ISR_V_I22)
+#define ISR_V_I21 21
+#define ISR_M_I21 (1<<ISR_V_I21)
+#define ISR_V_I20 20
+#define ISR_M_I20 (1<<ISR_V_I20)
+#define ISR_V_ATR 19
+#define ISR_M_ATR (1<<ISR_V_ATR)
+#define ISR_V_SIRR 4
+#define ISR_M_SIRR (0x7FFF<<ISR_V_SIRR)
+#define ISR_V_ASTRR 0
+#define ISR_M_ASTRR (0xF<<ISR_V_ASTRR)
+
+/*
+**
+** Mbox and D-Cache IPR Definitions:
+**
+*/
+
+#define dtbAsn 0x200 /* WO - DTB Address Space Number */
+#define dtbCm 0x201 /* WO - DTB Current Mode */
+#define dtbTag 0x202 /* WO - DTB Tag */
+#define dtbPte 0x203 /* RW - DTB Page Table Entry */
+#define dtbPteTemp 0x204 /* RO - DTB Page Table Entry Temporary */
+#define mmStat 0x205 /* RO - D-Stream MM Fault Status */
+// replaced by ev5_defs.h #define va 0x206 /* RO - Faulting Virtual Address */
+#define vaForm 0x207 /* RO - Formatted Virtual Address */
+#define mVptBr 0x208 /* WO - Mbox Virtual Page Table Base */
+#define dtbIap 0x209 /* WO - DTB Invalidate All Process */
+#define dtbIa 0x20A /* WO - DTB Invalidate All */
+#define dtbIs 0x20B /* WO - DTB Invalidate Single */
+#define altMode 0x20C /* WO - Alternate Mode */
+// replaced by ev5_defs.h #define cc 0x20D /* WO - Cycle Counter */
+#define ccCtl 0x20E /* WO - Cycle Counter Control */
+// replaced by ev5_defs.h #define mcsr 0x20F /* RW - Mbox Control Register */
+#define dcFlush 0x210 /* WO - Dcache Flush */
+#define dcPerr 0x212 /* RW - Dcache Parity Error Status */
+#define dcTestCtl 0x213 /* RW - Dcache Test Tag Control */
+#define dcTestTag 0x214 /* RW - Dcache Test Tag */
+#define dcTestTagTemp 0x215 /* RW - Dcache Test Tag Temporary */
+#define dcMode 0x216 /* RW - Dcache Mode */
+#define mafMode 0x217 /* RW - Miss Address File Mode */
+
+/*
+**
+** D-Stream MM Fault Status Register (MM_STAT) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <16:11> 6 OPCODE RO Opcode of faulting instruction
+** <10:06> 5 RA RO Ra field of faulting instruction
+** <5> 1 BAD_VA RO Bad virtual address
+** <4> 1 DTB_MISS RO Reference resulted in DTB miss
+** <3> 1 FOW RO Fault on write
+** <2> 1 FOR RO Fault on read
+** <1> 1 ACV RO Access violation
+** <0> 1 WR RO Reference type
+**
+*/
+
+#define MMSTAT_V_OPC 11
+#define MMSTAT_M_OPC (0x3F<<MMSTAT_V_OPC)
+#define MMSTAT_V_RA 6
+#define MMSTAT_M_RA (0x1F<<MMSTAT_V_RA)
+#define MMSTAT_V_BAD_VA 5
+#define MMSTAT_M_BAD_VA (1<<MMSTAT_V_BAD_VA)
+#define MMSTAT_V_DTB_MISS 4
+#define MMSTAT_M_DTB_MISS (1<<MMSTAT_V_DTB_MISS)
+#define MMSTAT_V_FOW 3
+#define MMSTAT_M_FOW (1<<MMSTAT_V_FOW)
+#define MMSTAT_V_FOR 2
+#define MMSTAT_M_FOR (1<<MMSTAT_V_FOR)
+#define MMSTAT_V_ACV 1
+#define MMSTAT_M_ACV (1<<MMSTAT_V_ACV)
+#define MMSTAT_V_WR 0
+#define MMSTAT_M_WR (1<<MMSTAT_V_WR)
+
+
+/*
+**
+** Mbox Control Register (MCSR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <5> 1 DBG1 RW,0 Mbox Debug Packet Select
+** <4> 1 E_BE RW,0 Ebox Big Endian mode enable
+** <3> 1 DBG0 RW,0 Debug Test Select
+** <2:1> 2 SP RW,0 Superpage mode enable
+** <0> 1 M_BE RW,0 Mbox Big Endian mode enable
+**
+*/
+
+#define MCSR_V_DBG1 5
+#define MCSR_M_DBG1 (1<<MCSR_V_DBG1)
+#define MCSR_V_E_BE 4
+#define MCSR_M_E_BE (1<<MCSR_V_E_BE)
+#define MCSR_V_DBG0 3
+#define MCSR_M_DBG0 (1<<MCSR_V_DBG0)
+#define MCSR_V_SP 1
+#define MCSR_M_SP (3<<MCSR_V_SP)
+#define MCSR_V_M_BE 0
+#define MCSR_M_M_BE (1<<MCSR_V_M_BE)
+
+/*
+**
+** Dcache Parity Error Status Register (DCPERR) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ------------------------------------
+** <5> 1 TP1 RO Dcache bank 1 tag parity error
+** <4> 1 TP0 RO Dcache bank 0 tag parity error
+** <3> 1 DP1 RO Dcache bank 1 data parity error
+** <2> 1 DP0 RO Dcache bank 0 data parity error
+** <1> 1 LOCK W1C Locks/clears bits <5:2>
+** <0> 1 SEO W1C Second Dcache parity error occurred
+**
+*/
+
+#define DCPERR_V_TP1 5
+#define DCPERR_M_TP1 (1<<DCPERR_V_TP1)
+#define DCPERR_V_TP0 4
+#define DCPERR_M_TP0 (1<<DCPERR_V_TP0)
+#define DCPERR_V_DP1 3
+#define DCPERR_M_DP1 (1<<DCPERR_V_DP1)
+#define DCPERR_V_DP0 2
+#define DCPERR_M_DP0 (1<<DCPERR_V_DP0)
+#define DCPERR_V_LOCK 1
+#define DCPERR_M_LOCK (1<<DCPERR_V_LOCK)
+#define DCPERR_V_SEO 0
+#define DCPERR_M_SEO (1<<DCPERR_V_SEO)
+
+#define DCPERR_M_ALL (DCPERR_M_LOCK | DCPERR_M_SEO)
+
+/*
+**
+** Dcache Mode Register (DC_MODE) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <4> 1 DOA RO Hardware Dcache Disable
+** <3> 1 PERR_DIS RW,0 Disable Dcache Parity Error reporting
+** <2> 1 BAD_DP RW,0 Force Dcache data bad parity
+** <1> 1 FHIT RW,0 Force Dcache hit
+** <0> 1 ENA RW,0 Software Dcache Enable
+**
+*/
+
+#define DC_V_DOA 4
+#define DC_M_DOA (1<<DC_V_DOA)
+#define DC_V_PERR_DIS 3
+#define DC_M_PERR_DIS (1<<DC_V_PERR_DIS)
+#define DC_V_BAD_DP 2
+#define DC_M_BAD_DP (1<<DC_V_BAD_DP)
+#define DC_V_FHIT 1
+#define DC_M_FHIT (1<<DC_V_FHIT)
+#define DC_V_ENA 0
+#define DC_M_ENA (1<<DC_V_ENA)
+
+/*
+**
+** Miss Address File Mode Register (MAF_MODE) Bit Summay
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <7> 1 WB RO,0 If set, pending WB request
+** <6> 1 DREAD RO,0 If set, pending D-read request
+**
+*/
+
+#define MAF_V_WB_PENDING 7
+#define MAF_M_WB_PENDING (1<<MAF_V_WB_PENDING)
+#define MAF_V_DREAD_PENDING 6
+#define MAF_M_DREAD_PENDING (1<<MAF_V_DREAD_PENDING)
+
+/*
+**
+** Cbox IPR Definitions:
+**
+*/
+
+#define scCtl 0x0A8 /* RW - Scache Control */
+#define scStat 0x0E8 /* RO - Scache Error Status */
+#define scAddr 0x188 /* RO - Scache Error Address */
+#define bcCtl 0x128 /* WO - Bcache/System Interface Control */
+#define bcCfg 0x1C8 /* WO - Bcache Configuration Parameters */
+#define bcTagAddr 0x108 /* RO - Bcache Tag */
+#define eiStat 0x168 /* RO - Bcache/System Error Status */
+#define eiAddr 0x148 /* RO - Bcache/System Error Address */
+#define fillSyn 0x068 /* RO - Fill Syndrome */
+#define ldLock 0x1E8 /* RO - LDx_L Address */
+
+/*
+**
+** Scache Control Register (SC_CTL) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <15:13> 3 SET_EN RW,1 Set enable
+** <12> 1 BLK_SIZE RW,1 Scache/Bcache block size select
+** <11:08> 4 FB_DP RW,0 Force bad data parity
+** <07:02> 6 TAG_STAT RW Tag status and parity
+** <1> 1 FLUSH RW,0 If set, clear all tag valid bits
+** <0> 1 FHIT RW,0 Force hits
+**
+*/
+
+#define SC_V_SET_EN 13
+#define SC_M_SET_EN (7<<SC_V_SET_EN)
+#define SC_V_BLK_SIZE 12
+#define SC_M_BLK_SIZE (1<<SC_V_BLK_SIZE)
+#define SC_V_FB_DP 8
+#define SC_M_FB_DP (0xF<<SC_V_FB_DP)
+#define SC_V_TAG_STAT 2
+#define SC_M_TAG_STAT (0x3F<<SC_V_TAG_STAT)
+#define SC_V_FLUSH 1
+#define SC_M_FLUSH (1<<SC_V_FLUSH)
+#define SC_V_FHIT 0
+#define SC_M_FHIT (1<<SC_V_FHIT)
+
+/*
+**
+** Bcache Control Register (BC_CTL) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <27> 1 DIS_VIC_BUF WO,0 Disable Scache victim buffer
+** <26> 1 DIS_BAF_BYP WO,0 Disable speculative Bcache reads
+** <25> 1 DBG_MUX_SEL WO,0 Debug MUX select
+** <24:19> 6 PM_MUX_SEL WO,0 Performance counter MUX select
+** <18:17> 2 BC_WAVE WO,0 Number of cycles of wave pipelining
+** <16> 1 TL_PIPE_LATCH WO,0 Pipe system control pins
+** <15> 1 EI_DIS_ERR WO,1 Disable ECC (parity) error
+** <14:13> 2 BC_BAD_DAT WO,0 Force bad data
+** <12:08> 5 BC_TAG_STAT WO Bcache tag status and parity
+** <7> 1 BC_FHIT WO,0 Bcache force hit
+** <6> 1 EI_ECC WO,1 ECC or byte parity mode
+** <5> 1 VTM_FIRST WO,1 Drive out victim block address first
+** <4> 1 CORR_FILL_DAT WO,1 Correct fill data
+** <3> 1 EI_CMD_GRP3 WO,0 Drive MB command to external pins
+** <2> 1 EI_CMD_GRP2 WO,0 Drive LOCK & SET_DIRTY to ext. pins
+** <1> 1 ALLOC_CYC WO,0 Allocate cycle for non-cached LDs.
+** <0> 1 BC_ENA W0,0 Bcache enable
+**
+*/
+#define BC_V_DIS_SC_VIC_BUF 27
+#define BC_M_DIS_SC_VIC_BUF (1<<BC_V_DIS_SC_VIC_BUF)
+#define BC_V_DIS_BAF_BYP 26
+#define BC_M_DIS_BAF_BYP (1<<BC_V_DIS_BAF_BYP)
+#define BC_V_DBG_MUX_SEL 25
+#define BC_M_DBG_MUX_SEL (1<<BC_V_DBG_MUX_SEL)
+#define BC_V_PM_MUX_SEL 19
+#define BC_M_PM_MUX_SEL (0x3F<<BC_V_PM_MUX_SEL)
+#define BC_V_BC_WAVE 17
+#define BC_M_BC_WAVE (3<<BC_V_BC_WAVE)
+#define BC_V_TL_PIPE_LATCH 16
+#define BC_M_TL_PIPE_LATCH (1<<BC_V_TL_PIPE_LATCH)
+#define BC_V_EI_DIS_ERR 15
+#define BC_M_EI_DIS_ERR (1<<BC_V_EI_DIS_ERR)
+#define BC_V_BC_BAD_DAT 13
+#define BC_M_BC_BAD_DAT (3<<BC_V_BC_BAD_DAT)
+#define BC_V_BC_TAG_STAT 8
+#define BC_M_BC_TAG_STAT (0x1F<<BC_V_BC_TAG_STAT)
+#define BC_V_BC_FHIT 7
+#define BC_M_BC_FHIT (1<<BC_V_BC_FHIT)
+#define BC_V_EI_ECC_OR_PARITY 6
+#define BC_M_EI_ECC_OR_PARITY (1<<BC_V_EI_ECC_OR_PARITY)
+#define BC_V_VTM_FIRST 5
+#define BC_M_VTM_FIRST (1<<BC_V_VTM_FIRST)
+#define BC_V_CORR_FILL_DAT 4
+#define BC_M_CORR_FILL_DAT (1<<BC_V_CORR_FILL_DAT)
+#define BC_V_EI_CMD_GRP3 3
+#define BC_M_EI_CMD_GRP3 (1<<BC_V_EI_CMD_GRP3)
+#define BC_V_EI_CMD_GRP2 2
+#define BC_M_EI_CMD_GRP2 (1<<BC_V_EI_CMD_GRP2)
+#define BC_V_ALLOC_CYC 1
+#define BC_M_ALLOC_CYC (1<<BC_V_ALLOC_CYC)
+#define BC_V_BC_ENA 0
+#define BC_M_BC_ENA (1<<BC_V_BC_ENA)
+
+#define BC_K_DFAULT \
+ (((BC_M_EI_DIS_ERR) | \
+ (BC_M_EI_ECC_OR_PARITY) | \
+ (BC_M_VTM_FIRST) | \
+ (BC_M_CORR_FILL_DAT))>>1)
+/*
+**
+** Bcache Configuration Register (BC_CONFIG) Bit Summary
+**
+** Extent Size Name Type Function
+** ------ ---- ---- ---- ---------------------------------
+** <35:29> 7 RSVD WO Reserved - Must Be Zero
+** <28:20> 9 WE_CTL WO,0 Bcache write enable control
+** <19:19> 1 RSVD WO,0 Reserved - Must Be Zero
+** <18:16> 3 WE_OFF WO,1 Bcache fill write enable pulse offset
+** <15:15> 1 RSVD WO,0 Reserved - Must Be Zero
+** <14:12> 3 RD_WR_SPC WO,7 Bcache private read/write spacing
+** <11:08> 4 WR_SPD WO,4 Bcache write speed in CPU cycles
+** <07:04> 4 RD_SPD WO,4 Bcache read speed in CPU cycles
+** <03:03> 1 RSVD WO,0 Reserved - Must Be Zero
+** <02:00> 3 SIZE WO,1 Bcache size
+*/
+#define BC_V_WE_CTL 20
+#define BC_M_WE_CTL (0x1FF<<BC_V_WE_CTL)
+#define BC_V_WE_OFF 16
+#define BC_M_WE_OFF (0x7<<BC_V_WE_OFF)
+#define BC_V_RD_WR_SPC 12
+#define BC_M_RD_WR_SPC (0x7<<BC_V_RD_WR_SPC)
+#define BC_V_WR_SPD 8
+#define BC_M_WR_SPD (0xF<<BC_V_WR_SPD)
+#define BC_V_RD_SPD 4
+#define BC_M_RD_SPD (0xF<<BC_V_RD_SPD)
+#define BC_V_SIZE 0
+#define BC_M_SIZE (0x7<<BC_V_SIZE)
+
+#define BC_K_CONFIG \
+ ((0x1<<BC_V_WE_OFF) | \
+ (0x7<<BC_V_RD_WR_SPC) | \
+ (0x4<<BC_V_WR_SPD) | \
+ (0x4<<BC_V_RD_SPD) | \
+ (0x1<<BC_V_SIZE))
+
+/*
+**
+** DECchip 21164 Privileged Architecture Library Entry Offsets:
+**
+** Entry Name Offset (Hex)
+**
+** RESET 0000
+** IACCVIO 0080
+** INTERRUPT 0100
+** ITB_MISS 0180
+** DTB_MISS (Single) 0200
+** DTB_MISS (Double) 0280
+** UNALIGN 0300
+** D_FAULT 0380
+** MCHK 0400
+** OPCDEC 0480
+** ARITH 0500
+** FEN 0580
+** CALL_PAL (Privileged) 2000
+** CALL_PAL (Unprivileged) 3000
+**
+*/
+
+#define PAL_RESET_ENTRY 0x0000
+#define PAL_IACCVIO_ENTRY 0x0080
+#define PAL_INTERRUPT_ENTRY 0x0100
+#define PAL_ITB_MISS_ENTRY 0x0180
+#define PAL_DTB_MISS_ENTRY 0x0200
+#define PAL_DOUBLE_MISS_ENTRY 0x0280
+#define PAL_UNALIGN_ENTRY 0x0300
+#define PAL_D_FAULT_ENTRY 0x0380
+#define PAL_MCHK_ENTRY 0x0400
+#define PAL_OPCDEC_ENTRY 0x0480
+#define PAL_ARITH_ENTRY 0x0500
+#define PAL_FEN_ENTRY 0x0580
+#define PAL_CALL_PAL_PRIV_ENTRY 0x2000
+#define PAL_CALL_PAL_UNPRIV_ENTRY 0x3000
+
+/*
+**
+** Architecturally Reserved Opcode (PALRES) Definitions:
+**
+*/
+
+#define mtpr hw_mtpr
+#define mfpr hw_mfpr
+
+#define ldl_a hw_ldl/a
+#define ldq_a hw_ldq/a
+#define stq_a hw_stq/a
+#define stl_a hw_stl/a
+
+#define ldl_p hw_ldl/p
+#define ldq_p hw_ldq/p
+#define stl_p hw_stl/p
+#define stq_p hw_stq/p
+
+/*
+** Virtual PTE fetch variants of HW_LD.
+*/
+#define ld_vpte hw_ldq/v
+
+/*
+** Physical mode load-lock and store-conditional variants of
+** HW_LD and HW_ST.
+*/
+
+#define ldq_lp hw_ldq/pl
+#define stq_cp hw_stq/pc
+
+/*
+**
+** General Purpose Register Definitions:
+**
+*/
+
+#define r0 $0
+#define r1 $1
+#define r2 $2
+#define r3 $3
+#define r4 $4
+#define r5 $5
+#define r6 $6
+#define r7 $7
+#define r8 $8
+#define r9 $9
+#define r10 $10
+#define r11 $11
+#define r12 $12
+#define r13 $13
+#define r14 $14
+#define r15 $15
+#define r16 $16
+#define r17 $17
+#define r18 $18
+#define r19 $19
+#define r20 $20
+#define r21 $21
+#define r22 $22
+#define r23 $23
+#define r24 $24
+#define r25 $25
+#define r26 $26
+#define r27 $27
+#define r28 $28
+#define r29 $29
+#define r30 $30
+#define r31 $31
+
+/*
+**
+** Floating Point Register Definitions:
+**
+*/
+
+#define f0 $f0
+#define f1 $f1
+#define f2 $f2
+#define f3 $f3
+#define f4 $f4
+#define f5 $f5
+#define f6 $f6
+#define f7 $f7
+#define f8 $f8
+#define f9 $f9
+#define f10 $f10
+#define f11 $f11
+#define f12 $f12
+#define f13 $f13
+#define f14 $f14
+#define f15 $f15
+#define f16 $f16
+#define f17 $f17
+#define f18 $f18
+#define f19 $f19
+#define f20 $f20
+#define f21 $f21
+#define f22 $f22
+#define f23 $f23
+#define f24 $f24
+#define f25 $f25
+#define f26 $f26
+#define f27 $f27
+#define f28 $f28
+#define f29 $f29
+#define f30 $f30
+#define f31 $f31
+
+/*
+**
+** PAL Temporary Register Definitions:
+**
+*/
+
+// covered by fetch distribution..pb Nov/95
+
+// #define pt0 0x140
+// #define pt1 0x141
+// #define pt2 0x142
+// #define pt3 0x143
+// #define pt4 0x144
+// #define pt5 0x145
+// #define pt6 0x146
+// #define pt7 0x147
+// #define pt8 0x148
+// #define pt9 0x149
+// #define pt10 0x14A
+// #define pt11 0x14B
+// #define pt12 0x14C
+// #define pt13 0x14D
+// #define pt14 0x14E
+// #define pt15 0x14F
+// #define pt16 0x150
+// #define pt17 0x151
+// #define pt18 0x152
+// #define pt19 0x153
+// #define pt20 0x154
+// #define pt21 0x155
+// #define pt22 0x156
+// #define pt23 0x157
+
+/*
+** PAL Shadow Registers:
+**
+** The DECchip 21164 shadows r8-r14 and r25 when in PALmode and
+** ICSR<SDE> = 1.
+*/
+
+#define p0 r8 /* ITB/DTB Miss Scratch */
+#define p1 r9 /* ITB/DTB Miss Scratch */
+#define p2 r10 /* ITB/DTB Miss Scratch */
+#define p3 r11
+// #define ps r11 /* Processor Status */
+#define p4 r12 /* Local Scratch */
+#define p5 r13 /* Local Scratch */
+#define p6 r14 /* Local Scratch */
+#define p7 r25 /* Local Scratch */
+
+/*
+** SRM Defined State Definitions:
+*/
+
+/*
+** This table is an accounting of the DECchip 21164 storage used to
+** implement the SRM defined state for OSF/1.
+**
+** IPR Name Internal Storage
+** -------- ----------------
+** Processor Status ps, dtbCm, ipl, r11
+** Program Counter Ibox
+** Interrupt Entry ptEntInt
+** Arith Trap Entry ptEntArith
+** MM Fault Entry ptEntMM
+** Unaligned Access Entry ptEntUna
+** Instruction Fault Entry ptEntIF
+** Call System Entry ptEntSys
+** User Stack Pointer ptUsp
+** Kernel Stack Pointer ptKsp
+** Kernel Global Pointer ptKgp
+** System Value ptSysVal
+** Page Table Base Register ptPtbr
+** Virtual Page Table Base iVptBr, mVptBr
+** Process Control Block Base ptPcbb
+** Address Space Number itbAsn, dtbAsn
+** Cycle Counter cc, ccCtl
+** Float Point Enable icsr
+** Lock Flag Cbox/System
+** Unique PCB
+** Who-Am-I ptWhami
+*/
+
+#define ptEntUna pt2 /* Unaligned Access Dispatch Entry */
+#define ptImpure pt3 /* Pointer To PAL Scratch Area */
+#define ptEntIF pt7 /* Instruction Fault Dispatch Entry */
+#define ptIntMask pt8 /* Interrupt Enable Mask */
+#define ptEntSys pt9 /* Call System Dispatch Entry */
+#define ptTrap pt11
+#define ptEntInt pt11 /* Hardware Interrupt Dispatch Entry */
+#define ptEntArith pt12 /* Arithmetic Trap Dispatch Entry */
+#if defined(KDEBUG)
+#define ptEntDbg pt13 /* Kernel Debugger Dispatch Entry */
+#endif /* KDEBUG */
+#define ptMisc pt16 /* Miscellaneous Flags */
+#define ptWhami pt16 /* Who-Am-I Register Pt16<15:8> */
+#define ptMces pt16 /* Machine Check Error Summary Pt16<4:0> */
+#define ptSysVal pt17 /* Per-Processor System Value */
+#define ptUsp pt18 /* User Stack Pointer */
+#define ptKsp pt19 /* Kernel Stack Pointer */
+#define ptPtbr pt20 /* Page Table Base Register */
+#define ptEntMM pt21 /* MM Fault Dispatch Entry */
+#define ptKgp pt22 /* Kernel Global Pointer */
+#define ptPcbb pt23 /* Process Control Block Base */
+
+/*
+**
+** Miscellaneous PAL State Flags (ptMisc) Bit Summary
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <55:48> 8 SWAP Swap PALcode flag -- character 'S'
+** <47:32> 16 MCHK Machine Check Error code
+** <31:16> 16 SCB System Control Block vector
+** <15:08> 8 WHAMI Who-Am-I identifier
+** <04:00> 5 MCES Machine Check Error Summary bits
+**
+*/
+
+#define PT16_V_MCES 0
+#define PT16_V_WHAMI 8
+#define PT16_V_SCB 16
+#define PT16_V_MCHK 32
+#define PT16_V_SWAP 48
+
+#endif /* DC21164FROMGASSOURCES_INCLUDED */
diff --git a/system/alpha/palcode/ev5_alpha_defs.h b/system/alpha/palcode/ev5_alpha_defs.h
new file mode 100644
index 000000000..33dc3c57a
--- /dev/null
+++ b/system/alpha/palcode/ev5_alpha_defs.h
@@ -0,0 +1,323 @@
+#ifndef EV5_ALPHA_DEFS_INCLUDED
+#define EV5_ALPHA_DEFS_INCLUDED 1
+
+// from ev5_alpha_defs.mar from Lance's fetch directory
+// Lower-caseified and $ signs removed ... pb Nov/95
+
+// .MACRO _ALPHADEFS
+// ALPHADEF_VER == 6 ; Flag the version number of this file.
+// .ENDM
+// .MACRO _PSDEF,_GBL
+// _DEFINI PS,_GBL
+//;+
+//; PS Layout - PS
+//; Loc Size name function
+//; ------ ------ ______ -----------------------------------
+//; <31:29> 3 SA stack alignment
+//; <31:13> 24 RES Reserved MBZ
+//; <12:8> 5 IPL Priority level
+//; <7> 1 VMM Virtual Mach Monitor
+//; <6:5> 2 RES Reserved MBZ
+//; <4:3> 2 CM Current Mode
+//; <2> 1 IP Interrupt Pending
+//; <1:0> 2 SW Software bits
+//;-
+
+#define ps_v_sw 0
+#define ps_m_sw (3<<ps_v_sw)
+
+#define ps_v_ip 2
+#define ps_m_ip (1<<ps_v_ip)
+
+#define ps_v_cm 3
+#define ps_m_cm (3<<ps_v_cm)
+
+#define ps_v_vmm 7
+#define ps_m_vmm (1<<ps_v_vmm)
+
+#define ps_v_ipl 8
+#define ps_m_ipl (0x1f<<ps_v_ipl)
+
+#define ps_v_sp (0x38)
+#define ps_m_sp (0x3f<<ps_v_sp)
+
+
+#define ps_c_kern (0x00)
+#define ps_c_exec (0x08)
+#define ps_c_supr (0x10)
+#define ps_c_user (0x18)
+#define ps_c_ipl0 (0x0000)
+#define ps_c_ipl1 (0x0100)
+#define ps_c_ipl2 (0x0200)
+#define ps_c_ipl3 (0x0300)
+#define ps_c_ipl4 (0x0400)
+#define ps_c_ipl5 (0x0500)
+#define ps_c_ipl6 (0x0600)
+#define ps_c_ipl7 (0x0700)
+#define ps_c_ipl8 (0x0800)
+#define ps_c_ipl9 (0x0900)
+#define ps_c_ipl10 (0x0A00)
+#define ps_c_ipl11 (0x0B00)
+#define ps_c_ipl12 (0x0C00)
+#define ps_c_ipl13 (0x0D00)
+#define ps_c_ipl14 (0x0E00)
+#define ps_c_ipl15 (0x0F00)
+#define ps_c_ipl16 (0x1000)
+#define ps_c_ipl17 (0x1100)
+#define ps_c_ipl18 (0x1200)
+#define ps_c_ipl19 (0x1300)
+#define ps_c_ipl20 (0x1400)
+#define ps_c_ipl21 (0x1500)
+#define ps_c_ipl22 (0x1600)
+#define ps_c_ipl23 (0x1700)
+#define ps_c_ipl24 (0x1800)
+#define ps_c_ipl25 (0x1900)
+#define ps_c_ipl26 (0x1A00)
+#define ps_c_ipl27 (0x1B00)
+#define ps_c_ipl28 (0x1C00)
+#define ps_c_ipl29 (0x1D00)
+#define ps_c_ipl30 (0x1E00)
+#define ps_c_ipl31 (0x1F00)
+
+// _DEFEND PS,_GBL,DEF
+// .ENDM
+//;+
+//; PTE layout - symbol prefix PTE_
+//;
+//; Loc Size name function
+//; ------ ------ ------ -----------------------------------
+//; <63:32> 32 PFN Page Frame Number
+//; <31:16> 16 SOFT Bits reserved for software use
+//; <15> 1 UWE User write enable
+//; <14> 1 SWE Super write enable
+//; <13> 1 EWE Exec write enable
+//; <12> 1 KWE Kernel write enable
+//; <11> 1 URE User read enable
+//; <10> 1 SRE Super read enable
+//; <9> 1 ERE Exec read enable
+//; <8> 1 KRE Kernel read enable
+//; <7:6> 2 RES Reserved SBZ
+//; <5> 1 HPF Huge Page Flag
+//; <4> 1 ASM Wild card address space number match
+//; <3> 1 FOE Fault On execute
+//; <2> 1 FOW Fault On Write
+//; <1> 1 FOR Fault On Read
+//; <0> 1 V valid bit
+//;-
+// .MACRO _PTEDEF,_GBL
+// _DEFINI PTE,_GBL
+
+#define pte_v_pfn 32
+#define pte_m_soft (0xFFFF0000)
+#define pte_v_soft 16
+#define pte_m_uwe (0x8000)
+#define pte_v_uwe 15
+#define pte_m_swe (0x4000)
+#define pte_v_swe 14
+#define pte_m_ewe (0x2000)
+#define pte_v_ewe 13
+#define pte_m_kwe (0x1000)
+#define pte_v_kwe 12
+#define pte_m_ure (0x0800)
+#define pte_v_ure 11
+#define pte_m_sre (0x0400)
+#define pte_v_sre 10
+#define pte_m_ere (0x0200)
+#define pte_v_ere 9
+#define pte_m_kre (0x0100)
+#define pte_v_kre 8
+#define pte_m_hpf (0x0020)
+#define pte_v_hpf 5
+#define pte_m_asm (0x0010)
+#define pte_v_asm 4
+#define pte_m_foe (0x0008)
+#define pte_v_foe 3
+#define pte_m_fow (0x0004)
+#define pte_v_fow 2
+#define pte_m_for (0x0002)
+#define pte_v_for 1
+#define pte_m_v (0x0001)
+#define pte_v_v 0
+
+// _DEFEND PTE,_GBL,DEF
+// .ENDM
+//;+
+//; VA layout - symbol prefix VA_
+//;
+//; Loc Size name function
+//; ------ ------ ------- -----------------------------------
+//; <42:33> 10 SEG1 First seg table offset for mapping
+//; <32:23> 10 SEG2 Second seg table offset for mapping
+//; <22:13> 10 SEG3 Third seg table offset for mapping
+//; <12:0> 13 OFFSET Byte within page
+//;-
+// .MACRO _VADEF,_GBL
+// _DEFINI VA,_GBL
+
+#define va_m_offset (0x000000001FFF)
+#define va_v_offset 0
+#define va_m_seg3 (0x0000007FE000)
+#define va_v_seg3 13
+#define va_m_seg2 (0x0001FF800000)
+#define va_v_seg2 23
+#define va_m_seg1 (0x7FE00000000)
+#define va_v_seg1 33
+
+// _DEFEND VA,_GBL,DEF
+// .ENDM
+//;+
+//; PRIVILEGED CONTEXT BLOCK (PCB)
+//;-
+// .MACRO _PCBDEF,_GBL
+// _DEFINI PCB,_GBL
+#define pcb_q_ksp 0
+#define pcb_q_esp 8
+#define pcb_q_ssp 16
+#define pcb_q_usp 24
+#define pcb_q_ptbr 32
+#define pcb_q_asn 40
+#define pcb_q_ast 48
+#define pcb_q_fen 56
+#define pcb_q_cc 64
+#define pcb_q_unq 72
+#define pcb_q_sct 80
+
+#define pcb_v_asten 0
+#define pcb_m_asten (0x0f<<pcb_v_asten)
+#define pcb_v_astsr 4
+#define pcb_m_astsr (0x0f<<pcb_v_astsr)
+#define pcb_v_dat 63
+#define pcb_v_pme 62
+
+// _DEFEND PCB,_GBL,DEF
+// .ENDM
+//;+
+//; SYSTEM CONTROL BLOCK (SCB)
+//;-
+// .MACRO _SCBDEF,_GBL
+// _DEFINI SCB,_GBL
+
+#define scb_v_fen (0x0010)
+#define scb_v_acv (0x0080)
+#define scb_v_tnv (0x0090)
+#define scb_v_for (0x00A0)
+#define scb_v_fow (0x00B0)
+#define scb_v_foe (0x00C0)
+#define scb_v_arith (0x0200)
+#define scb_v_kast (0x0240)
+#define scb_v_east (0x0250)
+#define scb_v_sast (0x0260)
+#define scb_v_uast (0x0270)
+#define scb_v_unalign (0x0280)
+#define scb_v_bpt (0x0400)
+#define scb_v_bugchk (0x0410)
+#define scb_v_opcdec (0x0420)
+#define scb_v_illpal (0x0430)
+#define scb_v_trap (0x0440)
+#define scb_v_chmk (0x0480)
+#define scb_v_chme (0x0490)
+#define scb_v_chms (0x04A0)
+#define scb_v_chmu (0x04B0)
+#define scb_v_sw0 (0x0500)
+#define scb_v_sw1 (0x0510)
+#define scb_v_sw2 (0x0520)
+#define scb_v_sw3 (0x0530)
+#define scb_v_sw4 (0x0540)
+#define scb_v_sw5 (0x0550)
+#define scb_v_sw6 (0x0560)
+#define scb_v_sw7 (0x0570)
+#define scb_v_sw8 (0x0580)
+#define scb_v_sw9 (0x0590)
+#define scb_v_sw10 (0x05A0)
+#define scb_v_sw11 (0x05B0)
+#define scb_v_sw12 (0x05C0)
+#define scb_v_sw13 (0x05D0)
+#define scb_v_sw14 (0x05E0)
+#define scb_v_sw15 (0x05F0)
+#define scb_v_clock (0x0600)
+#define scb_v_inter (0x0610)
+#define scb_v_sys_corr_err (0x0620)
+#define scb_v_proc_corr_err (0x0630)
+#define scb_v_pwrfail (0x0640)
+#define scb_v_perfmon (0x0650)
+#define scb_v_sysmchk (0x0660)
+#define scb_v_procmchk (0x0670)
+#define scb_v_passive_rel (0x06F0)
+
+// _DEFEND SCB,_GBL,DEF
+// .ENDM
+//;+
+//; Stack frame (FRM)
+//;-
+// .MACRO _FRMDEF,_GBL
+// _DEFINI FRM,_GBL
+
+#define frm_v_r2 (0x0000)
+#define frm_v_r3 (0x0008)
+#define frm_v_r4 (0x0010)
+#define frm_v_r5 (0x0018)
+#define frm_v_r6 (0x0020)
+#define frm_v_r7 (0x0028)
+#define frm_v_pc (0x0030)
+#define frm_v_ps (0x0038)
+
+// _DEFEND FRM,_GBL,DEF
+// .ENDM
+//;+
+//; Exeception summary register (EXS)
+//;-
+// .MACRO _EXSDEF,_GBL
+// _DEFINI EXS,_GBL
+// exs_v_swc <0> ; Software completion
+// exs_v_inv <1> ; Ivalid operation
+// exs_v_dze <2> ; Div by zero
+// exs_v_fov <3> ; Floating point overflow
+// exs_v_unf <4> ; Floating point underflow
+// exs_v_ine <5> ; Floating point inexact
+// exs_v_iov <6> ; Floating convert to integer overflow
+#define exs_v_swc 0
+#define exs_v_inv 1
+#define exs_v_dze 2
+#define exs_v_fov 3
+#define exs_v_unf 4
+#define exs_v_ine 5
+#define exs_v_iov 6
+
+#define exs_m_swc (1<<exs_v_swc)
+#define exs_m_inv (1<<exs_v_inv)
+#define exs_m_dze (1<<exs_v_dze)
+#define exs_m_fov (1<<exs_v_fov)
+#define exs_m_unf (1<<exs_v_unf)
+#define exs_m_ine (1<<exs_v_ine)
+#define exs_m_iov (1<<exs_v_iov)
+
+// _defend exs,_gbl,def
+// .endm
+//;+
+//; machine check error summary register (mces)
+//;-
+// .macro _mcesdef,_gbl
+// _defini mces,_gbl
+// mces_v_mchk <0> ; machine check in progress
+// mces_v_sce <1> ; system correctable error
+// mces_v_pce <2> ; processor correctable error
+// mces_v_dpc <3> ; disable reporting of processor correctable errors
+// mces_v_dsc <4> ; disable reporting of system correctable errors
+#define mces_v_mchk 0
+#define mces_v_sce 1
+#define mces_v_pce 2
+#define mces_v_dpc 3
+#define mces_v_dsc 4
+
+#define mces_m_mchk (1<<mces_v_mchk)
+#define mces_m_sce (1<<mces_v_sce)
+#define mces_m_pce (1<<mces_v_pce)
+#define mces_m_dpc (1<<mces_v_dpc)
+#define mces_m_dsc (1<<mces_v_dsc)
+#define mces_m_all ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce) | (1<<mces_v_dpc) ! (1<<mces_v_dsc))
+// _defend mces,_gbl,def
+// .endm
+
+
+
+#endif
diff --git a/system/alpha/palcode/ev5_defs.h b/system/alpha/palcode/ev5_defs.h
new file mode 100644
index 000000000..110d338e0
--- /dev/null
+++ b/system/alpha/palcode/ev5_defs.h
@@ -0,0 +1,575 @@
+#ifndef EV5_DEFS_INCLUDED
+#define EV5_DEFS_INCLUDED 1
+
+// adapted from the version emailed to lance..pb Nov/95
+
+
+// ********************************************************************************************************************************
+// Created 25-JUL-1995 14:21:23 by VAX SDL V3.2-12 Source: 21-JUL-1995 11:03:08 EV5$:[EV5.DVT.SUP]EV5_DEFS.SDL;24
+// ********************************************************************************************************************************
+
+// .MACRO $EV5DEF,..EQU=<=>,..COL=<:>
+// EV5$K_REVISION'..equ'34
+// In the definitions below, registers are annotated with one of the following
+// symbols:
+//
+// RW - The register may be read and written
+// RO - The register may only be read
+// WO - The register may only be written
+//
+// For RO and WO registers, all bits and fields within the register are also
+// read-only or write-only. For RW registers, each bit or field within
+// the register is annotated with one of the following:
+//
+// RW - The bit/field may be read and written
+// RO - The bit/field may be read; writes are ignored
+// WO - The bit/field may be written; reads return an UNPREDICTABLE result.
+// WZ - The bit/field may be written; reads return a 0
+// WC - The bit/field may be read; writes cause state to clear
+// RC - The bit/field may be read, which also causes state to clear; writes are ignored
+// Architecturally-defined (SRM) registers for EVMS
+#define pt0 320
+#define pt1 321
+#define pt2 322
+#define pt3 323
+#define pt4 324
+#define pt5 325
+#define pt6 326
+#define pt7 327
+#define pt8 328
+#define pt9 329
+#define pt10 330
+#define pt11 331
+#define pt12 332
+#define pt13 333
+#define pt14 334
+#define pt15 335
+#define pt16 336
+#define pt17 337
+#define pt18 338
+#define pt19 339
+#define pt20 340
+#define pt21 341
+#define pt22 342
+#define pt23 343
+#define cbox_ipr_offset 16777200
+#define sc_ctl 168
+#define sc_stat 232
+#define sc_addr 392
+#define sc_addr_nm 392
+#define sc_addr_fhm 392
+#define bc_ctl 296
+#define bc_config 456
+#define ei_stat 360
+#define ei_addr 328
+#define fill_syn 104
+#define bc_tag_addr 264
+#define ld_lock 488
+#define aster 266
+#define astrr 265
+#define exc_addr 267
+#define exc_sum 268
+#define exc_mask 269
+#define hwint_clr 277
+#define ic_flush_ctl 281
+#define icperr_stat 282
+#define ic_perr_stat 282
+#define ic_row_map 283
+#define icsr 280
+#define ifault_va_form 274
+#define intid 273
+#define ipl 272
+#define isr 256
+#define itb_is 263
+#define itb_asn 259
+#define itb_ia 261
+#define itb_iap 262
+#define itb_pte 258
+#define itb_pte_temp 260
+#define itb_tag 257
+#define ivptbr 275
+#define pal_base 270
+#define pmctr 284
+// this is not the register ps .. pb #define ps 271
+#define sirr 264
+#define sl_txmit 278
+#define sl_rcv 279
+#define alt_mode 524
+#define cc 525
+#define cc_ctl 526
+#define dc_flush 528
+#define dcperr_stat 530
+#define dc_test_ctl 531
+#define dc_test_tag 532
+#define dc_test_tag_temp 533
+#define dtb_asn 512
+#define dtb_cm 513
+#define dtb_ia 522
+#define dtb_iap 521
+#define dtb_is 523
+#define dtb_pte 515
+#define dtb_pte_temp 516
+#define dtb_tag 514
+#define mcsr 527
+#define dc_mode 534
+#define maf_mode 535
+#define mm_stat 517
+#define mvptbr 520
+#define va 518
+#define va_form 519
+#define ev5_srm__ps 0
+#define ev5_srm__pc 0
+#define ev5_srm__asten 0
+#define ev5_srm__astsr 0
+#define ev5_srm__ipir 0
+#define ev5_srm__ipl 0
+#define ev5_srm__mces 0
+#define ev5_srm__pcbb 0
+#define ev5_srm__prbr 0
+#define ev5_srm__ptbr 0
+#define ev5_srm__scbb 0
+#define ev5_srm__sirr 0
+#define ev5_srm__sisr 0
+#define ev5_srm__tbchk 0
+#define ev5_srm__tb1a 0
+#define ev5_srm__tb1ap 0
+#define ev5_srm__tb1ad 0
+#define ev5_srm__tb1ai 0
+#define ev5_srm__tbis 0
+#define ev5_srm__ksp 0
+#define ev5_srm__esp 0
+#define ev5_srm__ssp 0
+#define ev5_srm__usp 0
+#define ev5_srm__vptb 0
+#define ev5_srm__whami 0
+#define ev5_srm__cc 0
+#define ev5_srm__unq 0
+// processor-specific iprs.
+#define ev5__sc_ctl 168
+#define ev5__sc_stat 232
+#define ev5__sc_addr 392
+#define ev5__bc_ctl 296
+#define ev5__bc_config 456
+#define bc_config_k_size_1mb 1
+#define bc_config_k_size_2mb 2
+#define bc_config_k_size_4mb 3
+#define bc_config_k_size_8mb 4
+#define bc_config_k_size_16mb 5
+#define bc_config_k_size_32mb 6
+#define bc_config_k_size_64mb 7
+#define ev5__ei_stat 360
+#define ev5__ei_addr 328
+#define ev5__fill_syn 104
+#define ev5__bc_tag_addr 264
+#define ev5__aster 266
+#define ev5__astrr 265
+#define ev5__exc_addr 267
+#define exc_addr_v_pa 2
+#define exc_addr_s_pa 62
+#define ev5__exc_sum 268
+#define ev5__exc_mask 269
+#define ev5__hwint_clr 277
+#define ev5__ic_flush_ctl 281
+#define ev5__icperr_stat 282
+#define ev5__ic_perr_stat 282
+#define ev5__ic_row_map 283
+#define ev5__icsr 280
+#define ev5__ifault_va_form 274
+#define ev5__ifault_va_form_nt 274
+#define ifault_va_form_nt_v_vptb 30
+#define ifault_va_form_nt_s_vptb 34
+#define ev5__intid 273
+#define ev5__ipl 272
+#define ev5__itb_is 263
+#define ev5__itb_asn 259
+#define ev5__itb_ia 261
+#define ev5__itb_iap 262
+#define ev5__itb_pte 258
+#define ev5__itb_pte_temp 260
+#define ev5__itb_tag 257
+#define ev5__ivptbr 275
+#define ivptbr_v_vptb 30
+#define ivptbr_s_vptb 34
+#define ev5__pal_base 270
+#define ev5__pmctr 284
+#define ev5__ps 271
+#define ev5__isr 256
+#define ev5__sirr 264
+#define ev5__sl_txmit 278
+#define ev5__sl_rcv 279
+#define ev5__alt_mode 524
+#define ev5__cc 525
+#define ev5__cc_ctl 526
+#define ev5__dc_flush 528
+#define ev5__dcperr_stat 530
+#define ev5__dc_test_ctl 531
+#define ev5__dc_test_tag 532
+#define ev5__dc_test_tag_temp 533
+#define ev5__dtb_asn 512
+#define ev5__dtb_cm 513
+#define ev5__dtb_ia 522
+#define ev5__dtb_iap 521
+#define ev5__dtb_is 523
+#define ev5__dtb_pte 515
+#define ev5__dtb_pte_temp 516
+#define ev5__dtb_tag 514
+#define ev5__mcsr 527
+#define ev5__dc_mode 534
+#define ev5__maf_mode 535
+#define ev5__mm_stat 517
+#define ev5__mvptbr 520
+#define ev5__va 518
+#define ev5__va_form 519
+#define ev5__va_form_nt 519
+#define va_form_nt_s_va 19
+#define va_form_nt_v_vptb 30
+#define va_form_nt_s_vptb 34
+#define ev5s_ev5_def 10
+#define ev5_def 0
+// cbox registers.
+#define sc_ctl_v_sc_fhit 0
+#define sc_ctl_v_sc_flush 1
+#define sc_ctl_s_sc_tag_stat 6
+#define sc_ctl_v_sc_tag_stat 2
+#define sc_ctl_s_sc_fb_dp 4
+#define sc_ctl_v_sc_fb_dp 8
+#define sc_ctl_v_sc_blk_size 12
+#define sc_ctl_s_sc_set_en 3
+#define sc_ctl_v_sc_set_en 13
+#define sc_ctl_s_sc_soft_repair 3
+#define sc_ctl_v_sc_soft_repair 16
+#define sc_stat_s_sc_tperr 3
+#define sc_stat_v_sc_tperr 0
+#define sc_stat_s_sc_dperr 8
+#define sc_stat_v_sc_dperr 3
+#define sc_stat_s_cbox_cmd 5
+#define sc_stat_v_cbox_cmd 11
+#define sc_stat_v_sc_scnd_err 16
+#define sc_addr_fhm_v_sc_tag_parity 4
+#define sc_addr_fhm_s_tag_stat_sb0 3
+#define sc_addr_fhm_v_tag_stat_sb0 5
+#define sc_addr_fhm_s_tag_stat_sb1 3
+#define sc_addr_fhm_v_tag_stat_sb1 8
+#define sc_addr_fhm_s_ow_mod0 2
+#define sc_addr_fhm_v_ow_mod0 11
+#define sc_addr_fhm_s_ow_mod1 2
+#define sc_addr_fhm_v_ow_mod1 13
+#define sc_addr_fhm_s_tag_lo 17
+#define sc_addr_fhm_v_tag_lo 15
+#define sc_addr_fhm_s_tag_hi 7
+#define sc_addr_fhm_v_tag_hi 32
+#define bc_ctl_v_bc_enabled 0
+#define bc_ctl_v_alloc_cyc 1
+#define bc_ctl_v_ei_opt_cmd 2
+#define bc_ctl_v_ei_opt_cmd_mb 3
+#define bc_ctl_v_corr_fill_dat 4
+#define bc_ctl_v_vtm_first 5
+#define bc_ctl_v_ei_ecc_or_parity 6
+#define bc_ctl_v_bc_fhit 7
+#define bc_ctl_s_bc_tag_stat 5
+#define bc_ctl_v_bc_tag_stat 8
+#define bc_ctl_s_bc_bad_dat 2
+#define bc_ctl_v_bc_bad_dat 13
+#define bc_ctl_v_ei_dis_err 15
+#define bc_ctl_v_tl_pipe_latch 16
+#define bc_ctl_s_bc_wave_pipe 2
+#define bc_ctl_v_bc_wave_pipe 17
+#define bc_ctl_s_pm_mux_sel 6
+#define bc_ctl_v_pm_mux_sel 19
+#define bc_ctl_v_dbg_mux_sel 25
+#define bc_ctl_v_dis_baf_byp 26
+#define bc_ctl_v_dis_sc_vic_buf 27
+#define bc_ctl_v_dis_sys_addr_par 28
+#define bc_ctl_v_read_dirty_cln_shr 29
+#define bc_ctl_v_write_read_bubble 30
+#define bc_ctl_v_bc_wave_pipe_2 31
+#define bc_ctl_v_auto_dack 32
+#define bc_ctl_v_dis_byte_word 33
+#define bc_ctl_v_stclk_delay 34
+#define bc_ctl_v_write_under_miss 35
+#define bc_config_s_bc_size 3
+#define bc_config_v_bc_size 0
+#define bc_config_s_bc_rd_spd 4
+#define bc_config_v_bc_rd_spd 4
+#define bc_config_s_bc_wr_spd 4
+#define bc_config_v_bc_wr_spd 8
+#define bc_config_s_bc_rd_wr_spc 3
+#define bc_config_v_bc_rd_wr_spc 12
+#define bc_config_s_fill_we_offset 3
+#define bc_config_v_fill_we_offset 16
+#define bc_config_s_bc_we_ctl 9
+#define bc_config_v_bc_we_ctl 20
+// cbox registers, continued
+#define ei_stat_s_sys_id 4
+#define ei_stat_v_sys_id 24
+#define ei_stat_v_bc_tperr 28
+#define ei_stat_v_bc_tc_perr 29
+#define ei_stat_v_ei_es 30
+#define ei_stat_v_cor_ecc_err 31
+#define ei_stat_v_unc_ecc_err 32
+#define ei_stat_v_ei_par_err 33
+#define ei_stat_v_fil_ird 34
+#define ei_stat_v_seo_hrd_err 35
+//
+#define bc_tag_addr_v_hit 12
+#define bc_tag_addr_v_tagctl_p 13
+#define bc_tag_addr_v_tagctl_d 14
+#define bc_tag_addr_v_tagctl_s 15
+#define bc_tag_addr_v_tagctl_v 16
+#define bc_tag_addr_v_tag_p 17
+#define bc_tag_addr_s_bc_tag 19
+#define bc_tag_addr_v_bc_tag 20
+// ibox and icache registers.
+#define aster_v_kar 0
+#define aster_v_ear 1
+#define aster_v_sar 2
+#define aster_v_uar 3
+#define astrr_v_kar 0
+#define astrr_v_ear 1
+#define astrr_v_sar 2
+#define astrr_v_uar 3
+#define exc_addr_v_pal 0
+#define exc_sum_v_swc 10
+#define exc_sum_v_inv 11
+#define exc_sum_v_dze 12
+#define exc_sum_v_fov 13
+#define exc_sum_v_unf 14
+#define exc_sum_v_ine 15
+#define exc_sum_v_iov 16
+#define hwint_clr_v_pc0c 27
+#define hwint_clr_v_pc1c 28
+#define hwint_clr_v_pc2c 29
+#define hwint_clr_v_crdc 32
+#define hwint_clr_v_slc 33
+// ibox and icache registers, continued
+#define icperr_stat_v_dpe 11
+#define icperr_stat_v_tpe 12
+#define icperr_stat_v_tmr 13
+#define ic_perr_stat_v_dpe 11
+#define ic_perr_stat_v_tpe 12
+#define ic_perr_stat_v_tmr 13
+#define icsr_v_pma 8
+#define icsr_v_pmp 9
+#define icsr_v_byt 17
+#define icsr_v_fmp 18
+#define icsr_v_im0 20
+#define icsr_v_im1 21
+#define icsr_v_im2 22
+#define icsr_v_im3 23
+#define icsr_v_tmm 24
+#define icsr_v_tmd 25
+#define icsr_v_fpe 26
+#define icsr_v_hwe 27
+#define icsr_s_spe 2
+#define icsr_v_spe 28
+#define icsr_v_sde 30
+#define icsr_v_crde 32
+#define icsr_v_sle 33
+#define icsr_v_fms 34
+#define icsr_v_fbt 35
+#define icsr_v_fbd 36
+#define icsr_v_dbs 37
+#define icsr_v_ista 38
+#define icsr_v_tst 39
+#define ifault_va_form_s_va 30
+#define ifault_va_form_v_va 3
+#define ifault_va_form_s_vptb 31
+#define ifault_va_form_v_vptb 33
+#define ifault_va_form_nt_s_va 19
+#define ifault_va_form_nt_v_va 3
+#define intid_s_intid 5
+#define intid_v_intid 0
+// ibox and icache registers, continued
+#define ipl_s_ipl 5
+#define ipl_v_ipl 0
+#define itb_is_s_va 30
+#define itb_is_v_va 13
+#define itb_asn_s_asn 7
+#define itb_asn_v_asn 4
+#define itb_pte_v_asm 4
+#define itb_pte_s_gh 2
+#define itb_pte_v_gh 5
+#define itb_pte_v_kre 8
+#define itb_pte_v_ere 9
+#define itb_pte_v_sre 10
+#define itb_pte_v_ure 11
+#define itb_pte_s_pfn 27
+#define itb_pte_v_pfn 32
+#define itb_pte_temp_v_asm 13
+#define itb_pte_temp_v_kre 18
+#define itb_pte_temp_v_ere 19
+#define itb_pte_temp_v_sre 20
+#define itb_pte_temp_v_ure 21
+#define itb_pte_temp_s_gh 3
+#define itb_pte_temp_v_gh 29
+#define itb_pte_temp_s_pfn 27
+#define itb_pte_temp_v_pfn 32
+// ibox and icache registers, continued
+#define itb_tag_s_va 30
+#define itb_tag_v_va 13
+#define pal_base_s_pal_base 26
+#define pal_base_v_pal_base 14
+#define pmctr_s_sel2 4
+#define pmctr_v_sel2 0
+#define pmctr_s_sel1 4
+#define pmctr_v_sel1 4
+#define pmctr_v_killk 8
+#define pmctr_v_killp 9
+#define pmctr_s_ctl2 2
+#define pmctr_v_ctl2 10
+#define pmctr_s_ctl1 2
+#define pmctr_v_ctl1 12
+#define pmctr_s_ctl0 2
+#define pmctr_v_ctl0 14
+#define pmctr_s_ctr2 14
+#define pmctr_v_ctr2 16
+#define pmctr_v_killu 30
+#define pmctr_v_sel0 31
+#define pmctr_s_ctr1 16
+#define pmctr_v_ctr1 32
+#define pmctr_s_ctr0 16
+#define pmctr_v_ctr0 48
+#define ps_v_cm0 3
+#define ps_v_cm1 4
+#define isr_s_astrr 4
+#define isr_v_astrr 0
+#define isr_s_sisr 15
+#define isr_v_sisr 4
+#define isr_v_atr 19
+#define isr_v_i20 20
+#define isr_v_i21 21
+#define isr_v_i22 22
+#define isr_v_i23 23
+#define isr_v_pc0 27
+#define isr_v_pc1 28
+#define isr_v_pc2 29
+#define isr_v_pfl 30
+#define isr_v_mck 31
+#define isr_v_crd 32
+#define isr_v_sli 33
+#define isr_v_hlt 34
+#define sirr_s_sirr 15
+#define sirr_v_sirr 4
+// ibox and icache registers, continued
+#define sl_txmit_v_tmt 7
+#define sl_rcv_v_rcv 6
+// mbox and dcache registers.
+#define alt_mode_v_am0 3
+#define alt_mode_v_am1 4
+#define cc_ctl_v_cc_ena 32
+#define dcperr_stat_v_seo 0
+#define dcperr_stat_v_lock 1
+#define dcperr_stat_v_dp0 2
+#define dcperr_stat_v_dp1 3
+#define dcperr_stat_v_tp0 4
+#define dcperr_stat_v_tp1 5
+// the following two registers are used exclusively for test and diagnostics.
+// they should not be referenced in normal operation.
+#define dc_test_ctl_v_bank0 0
+#define dc_test_ctl_v_bank1 1
+#define dc_test_ctl_v_fill_0 2
+#define dc_test_ctl_s_index 10
+#define dc_test_ctl_v_index 3
+#define dc_test_ctl_s_fill_1 19
+#define dc_test_ctl_v_fill_1 13
+#define dc_test_ctl_s_fill_2 32
+#define dc_test_ctl_v_fill_2 32
+// mbox and dcache registers, continued.
+#define dc_test_tag_v_tag_par 2
+#define dc_test_tag_v_ow0 11
+#define dc_test_tag_v_ow1 12
+#define dc_test_tag_s_tag 26
+#define dc_test_tag_v_tag 13
+#define dc_test_tag_temp_v_tag_par 2
+#define dc_test_tag_temp_v_d0p0 3
+#define dc_test_tag_temp_v_d0p1 4
+#define dc_test_tag_temp_v_d1p0 5
+#define dc_test_tag_temp_v_d1p1 6
+#define dc_test_tag_temp_v_ow0 11
+#define dc_test_tag_temp_v_ow1 12
+#define dc_test_tag_temp_s_tag 26
+#define dc_test_tag_temp_v_tag 13
+#define dtb_asn_s_asn 7
+#define dtb_asn_v_asn 57
+#define dtb_cm_v_cm0 3
+#define dtb_cm_v_cm1 4
+#define dtbis_s_va0 30
+#define dtbis_v_va0 13
+#define dtb_pte_v_for 1
+#define dtb_pte_v_fow 2
+#define dtb_pte_v_asm 4
+#define dtb_pte_s_gh 2
+#define dtb_pte_v_gh 5
+#define dtb_pte_v_kre 8
+#define dtb_pte_v_ere 9
+#define dtb_pte_v_sre 10
+#define dtb_pte_v_ure 11
+#define dtb_pte_v_kwe 12
+#define dtb_pte_v_ewe 13
+#define dtb_pte_v_swe 14
+#define dtb_pte_v_uwe 15
+#define dtb_pte_s_pfn 27
+#define dtb_pte_v_pfn 32
+// mbox and dcache registers, continued.
+#define dtb_pte_temp_v_for 0
+#define dtb_pte_temp_v_fow 1
+#define dtb_pte_temp_v_kre 2
+#define dtb_pte_temp_v_ere 3
+#define dtb_pte_temp_v_sre 4
+#define dtb_pte_temp_v_ure 5
+#define dtb_pte_temp_v_kwe 6
+#define dtb_pte_temp_v_ewe 7
+#define dtb_pte_temp_v_swe 8
+#define dtb_pte_temp_v_uwe 9
+#define dtb_pte_temp_v_asm 10
+#define dtb_pte_temp_s_fill_0 2
+#define dtb_pte_temp_v_fill_0 11
+#define dtb_pte_temp_s_pfn 27
+#define dtb_pte_temp_v_pfn 13
+#define dtb_tag_s_va 30
+#define dtb_tag_v_va 13
+// most mcsr bits are used for testability and diagnostics only.
+// for normal operation, they will be supported in the following configuration:
+// split_dcache = 1, maf_nomerge = 0, wb_flush_always = 0, wb_nomerge = 0,
+// dc_ena<1:0> = 1, dc_fhit = 0, dc_bad_parity = 0
+#define mcsr_v_big_endian 0
+#define mcsr_v_sp0 1
+#define mcsr_v_sp1 2
+#define mcsr_v_mbox_sel 3
+#define mcsr_v_e_big_endian 4
+#define mcsr_v_dbg_packet_sel 5
+#define dc_mode_v_dc_ena 0
+#define dc_mode_v_dc_fhit 1
+#define dc_mode_v_dc_bad_parity 2
+#define dc_mode_v_dc_perr_dis 3
+#define dc_mode_v_dc_doa 4
+#define maf_mode_v_maf_nomerge 0
+#define maf_mode_v_wb_flush_always 1
+#define maf_mode_v_wb_nomerge 2
+#define maf_mode_v_io_nomerge 3
+#define maf_mode_v_wb_cnt_disable 4
+#define maf_mode_v_maf_arb_disable 5
+#define maf_mode_v_dread_pending 6
+#define maf_mode_v_wb_pending 7
+// mbox and dcache registers, continued.
+#define mm_stat_v_wr 0
+#define mm_stat_v_acv 1
+#define mm_stat_v_for 2
+#define mm_stat_v_fow 3
+#define mm_stat_v_dtb_miss 4
+#define mm_stat_v_bad_va 5
+#define mm_stat_s_ra 5
+#define mm_stat_v_ra 6
+#define mm_stat_s_opcode 6
+#define mm_stat_v_opcode 11
+#define mvptbr_s_vptb 31
+#define mvptbr_v_vptb 33
+#define va_form_s_va 30
+#define va_form_v_va 3
+#define va_form_s_vptb 31
+#define va_form_v_vptb 33
+#define va_form_nt_s_va 19
+#define va_form_nt_v_va 3
+//.endm
+
+#endif
diff --git a/system/alpha/palcode/ev5_impure.h b/system/alpha/palcode/ev5_impure.h
new file mode 100644
index 000000000..84d700c32
--- /dev/null
+++ b/system/alpha/palcode/ev5_impure.h
@@ -0,0 +1,392 @@
+#ifndef EV5_IMPURE_INCLUDED
+#define EV5_IMPURE_INCLUDED
+
+/*
+// This uses the Hudson file format from "impure.h" but with the fields from
+// the distrubuted palcode "ev5_impure.sdl" .. pboyle Nov/95
+
+//orig file: impure.sdl
+//orig
+//orig Abstract: PAL impure scratch area and logout area data structure definitions for
+//orig Alpha firmware.
+//orig
+//orig
+//orig module $pal_impure;
+//orig
+//orig Edit Date Who Description
+//orig ---- --------- --- ---------------------
+//orig 1 7-Jul-93 JEM Initial Entry
+//orig 2 18-nov-93 JEM Add shadow bc_ctl and pmctr_ctl to impure area
+//orig Delete mvptbr
+//orig Calculate pal$logout from end of impure area
+//orig 3 6-dec-93 JEM Add pmctr_ctl bitfield definitions
+//orig 4 3-feb-94 JEM Remove f31,r31 from impure area; Remove bc_ctl, pmctr_ctl;
+//orig add ic_perr_stat, pmctr, dc_perr_stat, sc_stat, sc_addr, sc_ctl,
+//orig bc_tag_addr, ei_stat, ei_addr, fill_syn, ld_lock
+//orig 5 19-feb-94 JEM add gpr constants, and add f31,r31 back in to be consistent with ev4
+//orig add cns$ipr_offset
+//orig 6 18-apr-94 JEM Add shadow bc_ctl and pmctr_ctl to impure area again.
+//orig 7 18-jul-94 JEM Add bc_config shadow. Add mchk$sys_base constant to mchk logout frame
+//orig
+//orig
+//orig constant REVISION equals 7 prefix IMPURE$; // Revision number of this file
+//orig
+
+** Macros for saving/restoring data to/from the PAL impure scratch
+** area.
+**
+** The console save state area is larger than the addressibility
+** of the HW_LD/ST instructions (10-bit signed byte displacement),
+** so some adjustments to the base offsets, as well as the offsets
+** within each base region, are necessary.
+**
+** The console save state area is divided into two segments; the
+** CPU-specific segment and the platform-specific segment. The
+** state that is saved in the CPU-specific segment includes GPRs,
+** FPRs, IPRs, halt code, MCHK flag, etc. All other state is saved
+** in the platform-specific segment.
+**
+** The impure pointer will need to be adjusted by a different offset
+** value for each region within a given segment. The SAVE and RESTORE
+** macros will auto-magically adjust the offsets accordingly.
+**
+*/
+
+#define SAVE_GPR(reg,offset,base) \
+ stq_p reg, ((offset-0x200)&0x3FF)(base)
+
+#define RESTORE_GPR(reg,offset,base) \
+ ldq_p reg, ((offset-0x200)&0x3FF)(base)
+
+
+#define SAVE_FPR(reg,offset,base) \
+ stt reg, ((offset-0x200)&0x3FF)(base)
+
+#define RESTORE_FPR(reg,offset,base) \
+ ldt reg, ((offset-0x200)&0x3FF)(base)
+
+#define SAVE_IPR(reg,offset,base) \
+ mfpr v0, reg; \
+ stq_p v0, ((offset-CNS_Q_IPR)&0x3FF)(base)
+
+#define RESTORE_IPR(reg,offset,base) \
+ ldq_p v0, ((offset-CNS_Q_IPR)&0x3FF)(base); \
+ mtpr v0, reg
+
+#define SAVE_SHADOW(reg,offset,base) \
+ stq_p reg, ((offset-CNS_Q_IPR)&0x3FF)(base)
+
+#define RESTORE_SHADOW(reg,offset,base)\
+ ldq_p reg, ((offset-CNS_Q_IPR)&0x3FF)(base)
+
+/* orig Structure of the processor-specific impure area */
+
+/* orig aggregate impure struct prefix "" tag "";
+ * orig cns$flag quadword;
+ * orig cns$hlt quadword;
+*/
+
+/* Define base for debug monitor compatibility */
+#define CNS_Q_BASE 0x000
+#define CNS_Q_FLAG 0x100
+#define CNS_Q_HALT 0x108
+
+
+/* orig constant (
+ * orig cns$r0,cns$r1,cns$r2,cns$r3,cns$r4,cns$r5,cns$r6,cns$r7,
+ * orig cns$r8,cns$r9,cns$r10,cns$r11,cns$r12,cns$r13,cns$r14,cns$r15,
+ * orig cns$r16,cns$r17,cns$r18,cns$r19,cns$r20,cns$r21,cns$r22,cns$r23,
+ * orig cns$r24,cns$r25,cns$r26,cns$r27,cns$r28,cns$r29,cns$r30,cns$r31
+ * orig ) equals . increment 8 prefix "" tag "";
+ * orig cns$gpr quadword dimension 32;
+*/
+/* Offset to base of saved GPR area - 32 quadword */
+#define CNS_Q_GPR 0x110
+#define cns_gpr CNS_Q_GPR
+
+/* orig constant (
+ * orig cns$f0,cns$f1,cns$f2,cns$f3,cns$f4,cns$f5,cns$f6,cns$f7,
+ * orig cns$f8,cns$f9,cns$f10,cns$f11,cns$f12,cns$f13,cns$f14,cns$f15,
+ * orig cns$f16,cns$f17,cns$f18,cns$f19,cns$f20,cns$f21,cns$f22,cns$f23,
+ * orig cns$f24,cns$f25,cns$f26,cns$f27,cns$f28,cns$f29,cns$f30,cns$f31
+ * orig ) equals . increment 8 prefix "" tag "";
+ * orig cns$fpr quadword dimension 32;
+*/
+/* Offset to base of saved FPR area - 32 quadwords */
+#define CNS_Q_FPR 0x210
+
+/* orig #t=.;
+ * orig cns$mchkflag quadword;
+*/
+#define CNS_Q_MCHK 0x310
+
+/* orig constant cns$pt_offset equals .;
+ * orig constant (
+ * orig cns$pt0,cns$pt1,cns$pt2,cns$pt3,cns$pt4,cns$pt5,cns$pt6,
+ * orig cns$pt7,cns$pt8,cns$pt9,cns$pt10,cns$pt11,cns$pt12,cns$pt13,
+ * orig cns$pt14,cns$pt15,cns$pt16,cns$pt17,cns$pt18,cns$pt19,cns$pt20,
+ * orig cns$pt21,cns$pt22,cns$pt23
+ * orig ) equals . increment 8 prefix "" tag "";
+ * orig cns$pt quadword dimension 24;
+*/
+/* Offset to base of saved PALtemp area - 25 quadwords */
+#define CNS_Q_PT 0x318
+
+/* orig cns$shadow8 quadword;
+ * orig cns$shadow9 quadword;
+ * orig cns$shadow10 quadword;
+ * orig cns$shadow11 quadword;
+ * orig cns$shadow12 quadword;
+ * orig cns$shadow13 quadword;
+ * orig cns$shadow14 quadword;
+ * orig cns$shadow25 quadword;
+*/
+/* Offset to base of saved PALshadow area - 8 quadwords */
+#define CNS_Q_SHADOW 0x3D8
+
+/* Offset to base of saved IPR area */
+#define CNS_Q_IPR 0x418
+
+/* orig constant cns$ipr_offset equals .; */
+/* orig cns$exc_addr quadword; */
+#define CNS_Q_EXC_ADDR 0x418
+/* orig cns$pal_base quadword; */
+#define CNS_Q_PAL_BASE 0x420
+/* orig cns$mm_stat quadword; */
+#define CNS_Q_MM_STAT 0x428
+/* orig cns$va quadword; */
+#define CNS_Q_VA 0x430
+/* orig cns$icsr quadword; */
+#define CNS_Q_ICSR 0x438
+/* orig cns$ipl quadword; */
+#define CNS_Q_IPL 0x440
+/* orig cns$ps quadword; // Ibox current mode */
+#define CNS_Q_IPS 0x448
+/* orig cns$itb_asn quadword; */
+#define CNS_Q_ITB_ASN 0x450
+/* orig cns$aster quadword; */
+#define CNS_Q_ASTER 0x458
+/* orig cns$astrr quadword; */
+#define CNS_Q_ASTRR 0x460
+/* orig cns$isr quadword; */
+#define CNS_Q_ISR 0x468
+/* orig cns$ivptbr quadword; */
+#define CNS_Q_IVPTBR 0x470
+/* orig cns$mcsr quadword; */
+#define CNS_Q_MCSR 0x478
+/* orig cns$dc_mode quadword; */
+#define CNS_Q_DC_MODE 0x480
+/* orig cns$maf_mode quadword; */
+#define CNS_Q_MAF_MODE 0x488
+/* orig cns$sirr quadword; */
+#define CNS_Q_SIRR 0x490
+/* orig cns$fpcsr quadword; */
+#define CNS_Q_FPCSR 0x498
+/* orig cns$icperr_stat quadword; */
+#define CNS_Q_ICPERR_STAT 0x4A0
+/* orig cns$pmctr quadword; */
+#define CNS_Q_PM_CTR 0x4A8
+/* orig cns$exc_sum quadword; */
+#define CNS_Q_EXC_SUM 0x4B0
+/* orig cns$exc_mask quadword; */
+#define CNS_Q_EXC_MASK 0x4B8
+/* orig cns$intid quadword; */
+#define CNS_Q_INT_ID 0x4C0
+/* orig cns$dcperr_stat quadword; */
+#define CNS_Q_DCPERR_STAT 0x4C8
+/* orig cns$sc_stat quadword; */
+#define CNS_Q_SC_STAT 0x4D0
+/* orig cns$sc_addr quadword; */
+#define CNS_Q_SC_ADDR 0x4D8
+/* orig cns$sc_ctl quadword; */
+#define CNS_Q_SC_CTL 0x4E0
+/* orig cns$bc_tag_addr quadword; */
+#define CNS_Q_BC_TAG_ADDR 0x4E8
+/* orig cns$ei_stat quadword; */
+#define CNS_Q_EI_STAT 0x4F0
+/* orig cns$ei_addr quadword; */
+#define CNS_Q_EI_ADDR 0x4F8
+/* orig cns$fill_syn quadword; */
+#define CNS_Q_FILL_SYN 0x500
+/* orig cns$ld_lock quadword; */
+#define CNS_Q_LD_LOCK 0x508
+/* orig cns$bc_ctl quadword; // shadow of on chip bc_ctl */
+#define CNS_Q_BC_CTL 0x510
+/* orig cns$pmctr_ctl quadword; // saved frequency select info for performance monitor counter */
+#define CNS_Q_PM_CTL 0x518
+/* orig cns$bc_config quadword; // shadow of on chip bc_config */
+#define CNS_Q_BC_CFG 0x520
+
+/* orig constant cns$size equals .;
+ * orig
+ * orig constant pal$impure_common_size equals (%x0200 +7) & %xfff8;
+ * orig constant pal$impure_specific_size equals (.+7) & %xfff8;
+ * orig constant cns$mchksize equals (.+7-#t) & %xfff8;
+ * orig constant pal$logout_area equals pal$impure_specific_size ;
+ * orig end impure;
+*/
+
+/* This next set of stuff came from the old code ..pb */
+#define CNS_Q_SROM_REV 0x528
+#define CNS_Q_PROC_ID 0x530
+#define CNS_Q_MEM_SIZE 0x538
+#define CNS_Q_CYCLE_CNT 0x540
+#define CNS_Q_SIGNATURE 0x548
+#define CNS_Q_PROC_MASK 0x550
+#define CNS_Q_SYSCTX 0x558
+
+
+
+#define MACHINE_CHECK_CRD_BASE 0
+#define MACHINE_CHECK_SIZE ((CNS_Q_SYSCTX + 7 - CNS_Q_MCHK) & 0xfff8)
+
+
+
+/* orig
+ * orig aggregate EV5PMCTRCTL_BITS structure fill prefix PMCTR_CTL$;
+ * orig SPROCESS bitfield length 1 ;
+ * orig FILL_0 bitfield length 3 fill tag $$;
+ * orig FRQ2 bitfield length 2 ;
+ * orig FRQ1 bitfield length 2 ;
+ * orig FRQ0 bitfield length 2 ;
+ * orig CTL2 bitfield length 2 ;
+ * orig CTL1 bitfield length 2 ;
+ * orig CTL0 bitfield length 2 ;
+ * orig FILL_1 bitfield length 16 fill tag $$;
+ * orig FILL_2 bitfield length 32 fill tag $$;
+ * orig end EV5PMCTRCTL_BITS;
+ * orig
+ * orig end_module $pal_impure;
+ * orig
+ * orig module $pal_logout;
+ * orig
+ * orig //
+ * orig // Start definition of Corrected Error Frame
+ * orig //
+ */
+
+/*
+ * orig aggregate crd_logout struct prefix "" tag "";
+ */
+
+#ifdef SIMOS
+#define pal_logout_area 0x600
+#define mchk_crd_base 0
+#endif
+
+/* orig mchk$crd_flag quadword; */
+#define mchk_crd_flag 0
+/* orig mchk$crd_offsets quadword; */
+#define mchk_crd_offsets 8
+/* orig
+ * orig // Pal-specific information */
+#define mchk_crd_mchk_code 0x10
+/* orig mchk$crd_mchk_code quadword;
+ * orig
+ * orig // CPU-specific information
+ * orig constant mchk$crd_cpu_base equals . ;
+ * orig mchk$crd_ei_addr quadword; */
+#define mchk_crd_ei_addr 0x18
+/* orig mchk$crd_fill_syn quadword; */
+#define mchk_crd_fill_syn 0x20
+/* orig mchk$crd_ei_stat quadword; */
+#define mchk_crd_ei_stat 0x28
+/* orig mchk$crd_isr quadword; */
+#define mchk_crd_isr 0x30
+
+/*
+ * Hacked up constants for the turbolaser build. Hope
+ * this is moreless correct
+ */
+
+#define mchk_crd_whami 0x38
+#define mchk_crd_tldev 0x40
+#define mchk_crd_tlber 0x48
+#define mchk_crd_tlesr0 0x50
+#define mchk_crd_tlesr1 0x58
+#define mchk_crd_tlesr2 0x60
+#define mchk_crd_tlesr3 0x68
+#define mchk_crd_rsvd 0x70
+
+
+#ifdef SIMOS
+/*
+ * mchk area seems different for tlaser
+ */
+
+#define mchk_crd_size 0x80
+#define mchk_mchk_base (mchk_crd_size)
+
+#define mchk_tlber 0x0
+#define mchk_tlepaerr 0x8
+#define mchk_tlepderr 0x10
+#define mchk_tlepmerr 0x18
+
+
+#endif
+
+
+/* orig
+ * orig // System-specific information
+ * orig constant mchk$crd_sys_base equals . ;
+ * orig constant mchk$crd_size equals (.+7) & %xfff8;
+ * orig
+ * orig end crd_logout;
+ * orig //
+ * orig // Start definition of Machine check logout Frame
+ * orig //
+ * orig aggregate logout struct prefix "" tag "";
+ * orig mchk$flag quadword; */
+/* orig mchk$offsets quadword; */
+/* orig
+ * orig // Pal-specific information
+ * orig mchk$mchk_code quadword; */
+/*
+
+ * orig mchk$pt quadword dimension 24;
+ * orig
+ * orig // CPU-specific information
+ * orig constant mchk$cpu_base equals . ;
+ * orig mchk$exc_addr quadword;
+ * orig mchk$exc_sum quadword;
+ * orig mchk$exc_mask quadword;
+ * orig mchk$pal_base quadword;
+ * orig mchk$isr quadword;
+ * orig mchk$icsr quadword;
+ * orig mchk$ic_perr_stat quadword;
+ * orig mchk$dc_perr_stat quadword;
+ * orig mchk$va quadword;
+ * orig mchk$mm_stat quadword;
+ * orig mchk$sc_addr quadword;
+ * orig mchk$sc_stat quadword;
+ * orig mchk$bc_tag_addr quadword;
+ * orig mchk$ei_addr quadword;
+ * orig mchk$fill_syn quadword;
+ * orig mchk$ei_stat quadword;
+ * orig mchk$ld_lock quadword;
+ * orig
+ * orig // System-specific information
+ * orig
+ * orig constant mchk$sys_base equals . ;
+ * orig mchk$sys_ipr1 quadword ; // Holder for system-specific stuff
+ * orig
+ * orig constant mchk$size equals (.+7) & %xfff8;
+ * orig
+ * orig
+ * orig constant mchk$crd_base equals 0 ;
+ * orig constant mchk$mchk_base equals mchk$crd_size ;
+ * orig
+ * orig
+ * orig end logout;
+ * orig
+ * orig end_module $pal_logout;
+*/
+
+
+
+
+/* this is lingering in the old ladbx code but looks like it was from ev4 days.
+ * This was 0x160 in the old days..pb
+*/
+#define LAF_K_SIZE MACHINE_CHECK_SIZE
+#endif
diff --git a/system/alpha/palcode/ev5_osfalpha_defs.h b/system/alpha/palcode/ev5_osfalpha_defs.h
new file mode 100644
index 000000000..08a110075
--- /dev/null
+++ b/system/alpha/palcode/ev5_osfalpha_defs.h
@@ -0,0 +1,160 @@
+#ifndef EV5_OSFALPHA_DEFS_INCLUDED
+#define EV5_OSFALPHA_DEFS_INCLUDED 1
+
+
+// from ev5_osfalpha_defs.mar from Lance's fetch directory
+// lowercaseified and $ changed to _ and reformatting for gas...pb Nov/95
+
+//orig .MACRO $OSF_ALPHADEFS
+//orig OSF_ALPHADEF_VER == 5 ; Flag the version number of this file.
+//orig .ENDM
+//orig .MACRO $OSF_PSDEF,$GBL
+//orig $DEFINI OSFPS,$GBL
+//orig;+
+//orig; PS Layout - PS
+//orig; Loc Size name function
+//orig; ------ ------ ----- -----------------------------------
+//orig; <0:2> 3 IPL Prio level
+//orig; <3> 1 CM Current Mode
+//orig;-
+
+#define osfps_v_mode 3
+#define osfps_m_mode (1<<osfps_v_mode)
+#define osfps_v_ipl 0
+#define osfps_m_ipl (7<<osfps_v_ipl)
+
+#define osfipl_c_mchk 7
+#define osfipl_c_rt 6
+#define osfipl_c_clk 5
+#define osfipl_c_dev1 4
+#define osfipl_c_dev0 3
+#define osfipl_c_sw1 2
+#define osfipl_c_sw0 1
+#define osfipl_c_zero 0
+
+#define osfint_c_mchk 2
+#define osfint_c_clk 1
+#define osfint_c_dev 3
+#define osfint_c_ip 0
+#define osfint_c_perf 4
+#define osfint_c_passrel 5
+
+//orig _DEFEND OSFPS,_GBL,DEF
+//orig .ENDM
+
+//orig;+
+//orig; PTE layout - symbol prefix osfpte_
+//orig;
+//orig; Loc Size name function
+//orig; ------ ------ ------ -----------------------------------
+//orig; <63:32> 32 PFN Page Frame Number
+//orig; <31:16> 16 SOFT Bits reserved for software use
+//orig; <15:14> 2
+//orig; <13> 1 UWE User write enable
+//orig; <12> 1 KWE Kernel write enable
+//orig; <11:10> 2
+//orig; <9> 1 URE User read enable
+//orig; <8> 1 KRE Kernel read enable
+//orig; <7:6> 2 RES Reserved SBZ
+//orig; <5> 1 HPF Huge Page Flag
+//orig; <4> 1 ASM Wild card address space number match
+//orig; <3> 1 FOE Fault On execute
+//orig; <2> 1 FOW Fault On Write
+//orig; <1> 1 FOR Fault On Read
+//orig; <0> 1 V valid bit
+//orig;-
+
+//orig .MACRO _OSF_PTEDEF,_GBL
+//orig _DEFINI OSFPTE,_GBL
+
+#define osfpte_v_pfn 32
+#define osfpte_m_soft (0xFFFF0000)
+#define osfpte_v_soft 16
+#define osfpte_m_uwe (0x2000)
+#define osfpte_v_uwe 13
+#define osfpte_m_kwe (0x1000)
+#define osfpte_v_kwe 12
+#define osfpte_m_ure (0x0200)
+#define osfpte_v_ure 9
+#define osfpte_m_kre (0x0100)
+#define osfpte_v_kre 8
+#define osfpte_m_hpf (0x0020)
+#define osfpte_v_hpf 5
+#define osfpte_m_asm (0x0010)
+#define osfpte_v_asm 4
+#define osfpte_m_foe (0x0008)
+#define osfpte_v_foe 3
+#define osfpte_m_fow (0x0004)
+#define osfpte_v_fow 2
+#define osfpte_m_for (0x0002)
+#define osfpte_v_for 1
+#define osfpte_m_v (0x0001)
+#define osfpte_v_v 0
+
+#define osfpte_m_ksegbits (osfpte_m_kre | osfpte_m_kwe | osfpte_m_v | osfpte_m_asm)
+#define osfpte_m_prot (osfpte_m_ure+osfpte_m_uwe | osfpte_m_kre | osfpte_m_kwe)
+
+//orig _DEFEND OSFPTE,_GBL,DEF
+//orig .ENDM
+
+//orig;+
+//orig; VA layout - symbol prefix VA_
+//orig;
+//orig; Loc Size name function
+//orig; ------ ------ ------- -----------------------------------
+//orig; <42:33> 10 SEG1 First seg table offset for mapping
+//orig; <32:23> 10 SEG2 Second seg table offset for mapping
+//orig; <22:13> 10 SEG3 Third seg table offset for mapping
+//orig; <12:0> 13 OFFSET Byte within page
+//orig;-
+//orig .MACRO _OSF_VADEF,_GBL
+//orig _DEFINI OSFVA,_GBL
+
+#define osfva_m_offset (0x000000001FFF)
+#define osfva_v_offset 0
+#define osfva_m_seg3 (0x0000007FE000)
+#define osfva_v_seg3 13
+#define osfva_m_seg2 (0x0001FF800000)
+#define osfva_v_seg2 23
+#define osfva_m_seg1 (0x7FE00000000)
+#define osfva_v_seg1 33
+
+//orig _DEFEND OSFVA,_GBL,DEF
+//orig .ENDM
+//orig;+
+//orig; PRIVILEGED CONTEXT BLOCK (PCB)
+//orig;-
+//orig .MACRO _OSF_PCBDEF,_GBL
+//orig _DEFINI OSFPCB,_GBL
+
+#define osfpcb_q_ksp (0x0000)
+#define osfpcb_q_usp (0x0008)
+#define osfpcb_q_Usp (0x0008)
+#define osfpcb_q_mmptr (0x0010)
+#define osfpcb_q_Mmptr (0x0010)
+#define osfpcb_l_cc (0x0018)
+#define osfpcb_l_asn (0x001C)
+#define osfpcb_q_unique (0x0020)
+#define osfpcb_q_fen (0x0028)
+#define osfpcb_v_pme 62
+
+//orig _DEFEND OSFPCB,_GBL,DEF
+//orig .ENDM
+//orig;+
+//orig; Stack Frame
+//orig;-
+//orig .MACRO _OSF_SFDEF,_GBL
+//orig _DEFINI OSFSF,_GBL
+
+#define osfsf_ps (0x00)
+#define osfsf_pc (0x08)
+#define osfsf_gp (0x10)
+#define osfsf_a0 (0x18)
+#define osfsf_a1 (0x20)
+#define osfsf_a2 (0x28)
+#define osfsf_c_size (0x30)
+
+//orig _DEFEND OSFSF,_GBL,DEF
+//orig .ENDM
+
+#endif
diff --git a/system/alpha/palcode/ev5_paldef.h b/system/alpha/palcode/ev5_paldef.h
new file mode 100644
index 000000000..5f9241b12
--- /dev/null
+++ b/system/alpha/palcode/ev5_paldef.h
@@ -0,0 +1,185 @@
+#ifndef EV5_PALDEF_INCLUDED
+#define EV5_PALDEF_INCLUDED 1
+
+// from ev5_paldef.mar from Lance's fetch directory...pb Nov/95
+// some entries have been superceeded by the more recent evt_defs.h
+
+// These are lower-caseified and have the $ signs (unnecessarily we now discover) removed.
+
+// Note that at the bottom of this file is the version of ev5_defs.mar
+// which is more recent than the top part of the file and contains overlapping information...pb Nov/95
+
+// .MACRO PALDEFS
+// PALDEF_VER == 1 ; Flag the version number of this file.
+// .ENDM
+//
+//
+// .MACRO _HALT_CODES,_GBL
+// _DEFINI HALT_CODES,_GBL
+
+#define hlt_c_reset 0
+#define hlt_c_hw_halt 1
+#define hlt_c_ksp_inval 2
+#define hlt_c_scbb_inval 3
+#define hlt_c_ptbr_inval 4
+#define hlt_c_sw_halt 5
+#define hlt_c_dbl_mchk 6
+#define hlt_c_mchk_from_pal 7
+#define hlt_c_start 32
+#define hlt_c_callback 33
+#define hlt_c_mpstart 34
+#define hlt_c_lfu_start 35
+
+// halt codes above 255 reserved for platform specific errors
+// _DEFEND HALT_CODES,_GBL,DEF
+// .ENDM
+// .MACRO _MCHK_CODES,_GBL
+// _DEFINI MCHK_CODES,_GBL
+
+#define mchk_c_tperr (64<<1)
+#define mchk_c_tcperr (65<<1)
+#define mchk_c_herr (66<<1)
+#define mchk_c_ecc_c (67<<1)
+#define mchk_c_ecc_nc (68<<1)
+#define mchk_c_unknown (69<<1)
+#define mchk_c_cacksoft (70<<1)
+#define mchk_c_bugcheck (71<<1)
+#define mchk_c_os_bugcheck (72<<1)
+#define mchk_c_dcperr (73<<1)
+#define mchk_c_icperr (74<<1)
+#define mchk_c_retryable_ird (75<<1)
+#define mchk_c_proc_hrd_error (76<<1)
+#define mchk_c_scperr (77<<1)
+#define mchk_c_bcperr (78<<1)
+//; mchk codes above 255 reserved for platform specific errors
+
+
+#define mchk_c_read_nxm (256<<1)
+#define mchk_c_sys_hrd_error (257<<1)
+#define mchk_c_sys_ecc (258<<1)
+
+// _DEFEND MCHK_CODES,_GBL,DEF
+// .ENDM
+
+// .MACRO _EV5_MM,_GBL
+// _DEFINI _EV5_MM,_GBL
+
+#define page_seg_size_bits 10
+#define page_offset_size_bits 13
+#define page_size_bytes 8192
+#define va_size_bits 43
+#define pa_size_bits 45
+
+// _DEFEND _EV5_MM,_GBL,DEF
+// .ENDM
+
+// .MACRO _PALTEMP,_GBL
+// _DEFINI _PALTEMP,_GBL
+
+// replaced by ev5_defs.h #define pt0 (0x140)
+// replaced by ev5_defs.h #define pt1 (0x141)
+// replaced by ev5_defs.h #define pt2 (0x142)
+#define pt_entuna (0x142)
+// replaced by ev5_defs.h #define pt3 (0x143)
+#define pt_impure (0x143)
+// replaced by ev5_defs.h #define pt4 (0x144)
+// replaced by ev5_defs.h #define pt5 (0x145)
+// replaced by ev5_defs.h #define pt6 (0x146)
+// replaced by ev5_defs.h #define pt7 (0x147)
+#define pt_entif (0x147)
+// replaced by ev5_defs.h #define pt8 (0x148)
+#define pt_intmask (0x148)
+// replaced by ev5_defs.h #define pt9 (0x149)
+#define pt_entsys (0x149)
+#define pt_ps (0x149)
+// replaced by ev5_defs.h #define pt10 (0x14a)
+// replaced by ev5_defs.h #define pt11 (0x14b)
+#define pt_trap (0x14b)
+#define pt_entint (0x14b)
+// replaced by ev5_defs.h #define pt12 (0x14c)
+#define pt_entarith (0x14c)
+// replaced by ev5_defs.h #define pt13 (0x14d)
+#define pt_sys0 (0x14d)
+// replaced by ev5_defs.h #define pt14 (0x14e)
+#define pt_sys1 (0x14e)
+// replaced by ev5_defs.h #define pt15 (0x14f)
+#define pt_sys2 (0x14f)
+// replaced by ev5_defs.h #define pt16 (0x150)
+#define pt_whami (0x150)
+#define pt_mces (0x150)
+#define pt_misc (0x150)
+// replaced by ev5_defs.h #define pt17 (0x151)
+#define pt_scc (0x151)
+#define pt_sysval (0x151)
+// replaced by ev5_defs.h #define pt18 (0x152)
+#define pt_prbr (0x152)
+#define pt_usp (0x152)
+// replaced by ev5_defs.h #define pt19 (0x153)
+#define pt_ksp (0x153)
+// replaced by ev5_defs.h #define pt20 (0x154)
+#define pt_ptbr (0x154)
+// replaced by ev5_defs.h #define pt21 (0x155)
+#define pt_vptbr (0x155)
+#define pt_entmm (0x155)
+// replaced by ev5_defs.h #define pt22 (0x156)
+#define pt_scbb (0x156)
+#define pt_kgp (0x156)
+// replaced by ev5_defs.h #define pt23 (0x157)
+#define pt_pcbb (0x157)
+
+
+#define pt_misc_v_switch 48
+#define pt_misc_v_cm 56
+
+// _DEFEND _PALTEMP,_GBL,DEF
+// .ENDM
+
+// .MACRO _OSF_MMCSR_DEF,_GBL
+// _DEFINI OSFMMCSR,_GBL
+
+#define mmcsr_c_tnv 0
+#define mmcsr_c_acv 1
+#define mmcsr_c_for 2
+#define mmcsr_c_foe 3
+#define mmcsr_c_fow 4
+
+// _DEFEND OSFMMCSR,_GBL,DEF
+// .ENDM
+
+// .MACRO _MM_STAT_DEF,_GBL
+// _DEFINI MMSTATDEF,_GBL
+
+#define mm_stat_m_opcode (0x3F)
+#define mm_stat_m_ra (0x1F)
+#define evx_opc_sync (0x18)
+#define EVX_OPC_SYNC (0x18)
+#define evx_opc_hw_ld (0x1B)
+
+// _DEFEND MMSTATDEF,_GBL,DEF
+// .ENDM
+
+// .MACRO _OSF_A0_DEF,_GBL
+// _DEFINI OSFA0DEF,_GBL
+
+#define osf_a0_bpt (0x0)
+#define osf_a0_bugchk (0x1)
+#define osf_a0_gentrap (0x2)
+#define osf_a0_fen (0x3)
+#define osf_a0_opdec (0x4)
+
+// _DEFEND OSFA0DEF,_GBL,DEF
+// .ENDM
+
+// .MACRO _EV5_IPLDEF,_GBL
+// _DEFINI EV5_IPLDEF,_GBL
+
+#define ipl_machine_check 31
+#define ipl_powerfail 30
+#define ipl_perf_count 29
+#define ipl_clock 22
+#define ipl_interprocessor 22
+
+// _DEFEND EV5_IPLDEF,_GBL,DEF
+// .ENDM
+
+#endif
diff --git a/system/alpha/palcode/fromHudsonMacros.h b/system/alpha/palcode/fromHudsonMacros.h
new file mode 100644
index 000000000..4b22d87ac
--- /dev/null
+++ b/system/alpha/palcode/fromHudsonMacros.h
@@ -0,0 +1,145 @@
+#ifndef HUDSON_MACROS_LOADED
+#define HUDSON_MACROS_LOADED 1
+
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:16 1995] SF: [macros.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 PALcode
+**
+** MODULE:
+**
+** macros.h
+**
+** MODULE DESCRIPTION:
+**
+** DECchip 21164 PALcode macro definitions
+**
+** AUTHOR: ER
+**
+** CREATION DATE: 29-Nov-1993
+**
+** $Id: fromHudsonMacros.h,v 1.1.1.1 1997/10/30 23:27:19 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: fromHudsonMacros.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:19 verghese
+** current 10/29/97
+**
+** Revision 1.1 1995/11/18 01:46:23 boyle
+** Initial revision
+**
+** Revision 1.5 1994/07/08 17:03:12 samberg
+** Changes to support platform specific additions
+**
+** Revision 1.4 1994/05/20 19:24:19 ericr
+** Moved STALL macro from osfpal.s to here
+** Added LDLI macro
+**
+** Revision 1.3 1994/05/20 18:08:14 ericr
+** Changed line comments to C++ style comment character
+**
+** Revision 1.2 1994/02/28 18:45:51 ericr
+** Fixed EGORE related bugs
+**
+** Revision 1.1 1993/12/16 21:55:05 eric
+** Initial revision
+**
+**
+**--
+*/
+
+#define STALL \
+ mfpr r31, pt0
+
+#define NOP \
+ bis $31, $31, $31
+
+/*
+** Align code on an 8K byte page boundary.
+*/
+
+#define ALIGN_PAGE \
+ .align 13
+
+/*
+** Align code on a 32 byte block boundary.
+*/
+
+#define ALIGN_BLOCK \
+ .align 5
+
+/*
+** Align code on a quadword boundary.
+*/
+
+#define ALIGN_BRANCH \
+ .align 3
+
+/*
+** Hardware vectors go in .text 0 sub-segment.
+*/
+
+#define HDW_VECTOR(offset) \
+ . = offset
+
+/*
+** Privileged CALL_PAL functions are in .text 1 sub-segment.
+*/
+
+#define CALL_PAL_PRIV(vector) \
+ . = (PAL_CALL_PAL_PRIV_ENTRY+(vector<<6))
+
+/*
+** Unprivileged CALL_PAL functions are in .text 1 sub-segment,
+** the privileged bit is removed from these vectors.
+*/
+
+#define CALL_PAL_UNPRIV(vector) \
+ . = (PAL_CALL_PAL_UNPRIV_ENTRY+((vector&0x3F)<<6))
+
+/*
+** Implements a load "immediate" longword function
+*/
+#define LDLI(reg,val) \
+ ldah reg, ((val+0x8000) >> 16)(zero); \
+ lda reg, (val&0xffff)(reg)
+
+
+
+#endif
diff --git a/system/alpha/palcode/fromHudsonOsf.h b/system/alpha/palcode/fromHudsonOsf.h
new file mode 100644
index 000000000..f9cb2f747
--- /dev/null
+++ b/system/alpha/palcode/fromHudsonOsf.h
@@ -0,0 +1,554 @@
+#ifndef FROMHUDSONOSF_INCLUDED
+#define FROMHUDSONOSF_INCLUDED 1
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:14 1995] SF: [osf.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+#define __OSF_LOADED 1
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 PALcode
+**
+** MODULE:
+**
+** osf.h
+**
+** MODULE DESCRIPTION:
+**
+** OSF/1 specific definitions
+**
+** AUTHOR: ER
+**
+** CREATION DATE: 24-Nov-1993
+**
+** $Id: fromHudsonOsf.h,v 1.1.1.1 1997/10/30 23:27:19 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: fromHudsonOsf.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:19 verghese
+** current 10/29/97
+**
+** Revision 1.1 1995/11/18 01:46:31 boyle
+** Initial revision
+**
+** Revision 1.11 1995/04/21 02:06:30 fdh
+** Replaced C++ style comments with Standard C style comments.
+**
+** Revision 1.10 1994/09/26 14:17:47 samberg
+** Complete VICE work and EB164/SD164 breakout.
+**
+** Revision 1.9 1994/07/26 17:39:10 samberg
+** Changes for SD164.
+**
+** Revision 1.8 1994/07/08 17:03:48 samberg
+** Changes to support platform specific additions
+**
+** Revision 1.7 1994/05/20 19:23:51 ericr
+** Moved STACK_FRAME macro from osfpal.s to here
+**
+** Revision 1.6 1994/05/20 18:08:19 ericr
+** Changed line comments to C++ style comment character
+**
+** Revision 1.5 1994/01/11 18:43:33 ericr
+** Removed PAL version/revision and size constants
+**
+** Revision 1.4 1994/01/05 16:22:32 ericr
+** Added more SCB vector offsets and MCHK error code
+**
+** Revision 1.3 1994/01/03 19:35:40 ericr
+** Derive mask definitions from field constants
+**
+** Revision 1.2 1993/12/22 20:43:01 eric
+** Added mask definitions for MCES bits
+**
+** Revision 1.1 1993/12/16 21:55:05 eric
+** Initial revision
+**
+**
+**--
+*/
+
+/*
+** Seg0 and Seg1 Virtual Address (VA) Format
+**
+** Loc Size Name Function
+** ----- ---- ---- ---------------------------------
+** <42:33> 10 SEG1 First level page table offset
+** <32:23> 10 SEG2 Second level page table offset
+** <22:13> 10 SEG3 Third level page table offset
+** <12:00> 13 OFFSET Byte within page offset
+*/
+
+#define VA_V_SEG1 33
+#define VA_M_SEG1 (0x3FF<<VA_V_SEG1)
+#define VA_V_SEG2 23
+#define VA_M_SEG2 (0x3FF<<VA_V_SEG2)
+#define VA_V_SEG3 13
+#define VA_M_SEG3 (0x3FF<<VA_V_SEG3)
+#define VA_V_OFFSET 0
+#define VA_M_OFFSET 0x1FFF
+
+/*
+** Virtual Address Options: 8K byte page size
+*/
+
+#define VA_S_SIZE 43
+#define VA_S_OFF 13
+#define va_s_off 13
+#define VA_S_SEG 10
+#define VA_S_PAGE_SIZE 8192
+
+/*
+** Page Table Entry (PTE) Format
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <63:32> 32 PFN Page Frame Number
+** <31:16> 16 SW Reserved for software
+** <15:14> 2 RSV0 Reserved for hardware SBZ
+** <13> 1 UWE User Write Enable
+** <12> 1 KWE Kernel Write Enable
+** <11:10> 2 RSV1 Reserved for hardware SBZ
+** <9> 1 URE User Read Enable
+** <8> 1 KRE Kernel Read Enable
+** <7> 1 RSV2 Reserved for hardware SBZ
+** <6:5> 2 GH Granularity Hint
+** <4> 1 ASM Address Space Match
+** <3> 1 FOE Fault On Execute
+** <2> 1 FOW Fault On Write
+** <1> 1 FOR Fault On Read
+** <0> 1 V Valid
+*/
+
+#define PTE_V_PFN 32
+#define PTE_M_PFN 0xFFFFFFFF00000000
+#define PTE_V_SW 16
+#define PTE_M_SW 0x00000000FFFF0000
+#define PTE_V_UWE 13
+#define PTE_M_UWE (1<<PTE_V_UWE)
+#define PTE_V_KWE 12
+#define PTE_M_KWE (1<<PTE_V_KWE)
+#define PTE_V_URE 9
+#define PTE_M_URE (1<<PTE_V_URE)
+#define PTE_V_KRE 8
+#define PTE_M_KRE (1<<PTE_V_KRE)
+#define PTE_V_GH 5
+#define PTE_M_GH (3<<PTE_V_GH)
+#define PTE_V_ASM 4
+#define PTE_M_ASM (1<<PTE_V_ASM)
+#define PTE_V_FOE 3
+#define PTE_M_FOE (1<<PTE_V_FOE)
+#define PTE_V_FOW 2
+#define PTE_M_FOW (1<<PTE_V_FOW)
+#define PTE_V_FOR 1
+#define PTE_M_FOR (1<<PTE_V_FOR)
+#define PTE_V_VALID 0
+#define PTE_M_VALID (1<<PTE_V_VALID)
+
+#define PTE_M_KSEG 0x1111
+#define PTE_M_PROT 0x3300
+#define pte_m_prot 0x3300
+
+/*
+** System Entry Instruction Fault (entIF) Constants:
+*/
+
+#define IF_K_BPT 0x0
+#define IF_K_BUGCHK 0x1
+#define IF_K_GENTRAP 0x2
+#define IF_K_FEN 0x3
+#define IF_K_OPCDEC 0x4
+
+/*
+** System Entry Hardware Interrupt (entInt) Constants:
+*/
+
+#define INT_K_IP 0x0
+#define INT_K_CLK 0x1
+#define INT_K_MCHK 0x2
+#define INT_K_DEV 0x3
+#define INT_K_PERF 0x4
+
+/*
+** System Entry MM Fault (entMM) Constants:
+*/
+
+#define MM_K_TNV 0x0
+#define MM_K_ACV 0x1
+#define MM_K_FOR 0x2
+#define MM_K_FOE 0x3
+#define MM_K_FOW 0x4
+
+/*
+** Process Control Block (PCB) Offsets:
+*/
+
+#define PCB_Q_KSP 0x0000
+#define PCB_Q_USP 0x0008
+#define PCB_Q_PTBR 0x0010
+#define PCB_L_PCC 0x0018
+#define PCB_L_ASN 0x001C
+#define PCB_Q_UNIQUE 0x0020
+#define PCB_Q_FEN 0x0028
+#define PCB_Q_RSV0 0x0030
+#define PCB_Q_RSV1 0x0038
+
+/*
+** Processor Status Register (PS) Bit Summary
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <3> 1 CM Current Mode
+** <2:0> 3 IPL Interrupt Priority Level
+**/
+
+#define PS_V_CM 3
+#define PS_M_CM (1<<PS_V_CM)
+#define PS_V_IPL 0
+#define PS_M_IPL (7<<PS_V_IPL)
+
+#define PS_K_KERN (0<<PS_V_CM)
+#define PS_K_USER (1<<PS_V_CM)
+
+#define IPL_K_ZERO 0x0
+#define IPL_K_SW0 0x1
+#define IPL_K_SW1 0x2
+#define IPL_K_DEV0 0x3
+#define IPL_K_DEV1 0x4
+#define IPL_K_CLK 0x5
+#define IPL_K_RT 0x6
+#define IPL_K_PERF 0x6
+#define IPL_K_PFAIL 0x6
+#define IPL_K_MCHK 0x7
+
+#define IPL_K_LOW 0x0
+#define IPL_K_HIGH 0x7
+
+/*
+** SCB Offset Definitions:
+*/
+
+#define SCB_Q_FEN 0x0010
+#define SCB_Q_ACV 0x0080
+#define SCB_Q_TNV 0x0090
+#define SCB_Q_FOR 0x00A0
+#define SCB_Q_FOW 0x00B0
+#define SCB_Q_FOE 0x00C0
+#define SCB_Q_ARITH 0x0200
+#define SCB_Q_KAST 0x0240
+#define SCB_Q_EAST 0x0250
+#define SCB_Q_SAST 0x0260
+#define SCB_Q_UAST 0x0270
+#define SCB_Q_UNALIGN 0x0280
+#define SCB_Q_BPT 0x0400
+#define SCB_Q_BUGCHK 0x0410
+#define SCB_Q_OPCDEC 0x0420
+#define SCB_Q_ILLPAL 0x0430
+#define SCB_Q_TRAP 0x0440
+#define SCB_Q_CHMK 0x0480
+#define SCB_Q_CHME 0x0490
+#define SCB_Q_CHMS 0x04A0
+#define SCB_Q_CHMU 0x04B0
+#define SCB_Q_SW0 0x0500
+#define SCB_Q_SW1 0x0510
+#define SCB_Q_SW2 0x0520
+#define SCB_Q_SW3 0x0530
+#define SCB_Q_SW4 0x0540
+#define SCB_Q_SW5 0x0550
+#define SCB_Q_SW6 0x0560
+#define SCB_Q_SW7 0x0570
+#define SCB_Q_SW8 0x0580
+#define SCB_Q_SW9 0x0590
+#define SCB_Q_SW10 0x05A0
+#define SCB_Q_SW11 0x05B0
+#define SCB_Q_SW12 0x05C0
+#define SCB_Q_SW13 0x05D0
+#define SCB_Q_SW14 0x05E0
+#define SCB_Q_SW15 0x05F0
+#define SCB_Q_CLOCK 0x0600
+#define SCB_Q_INTER 0x0610
+#define SCB_Q_SYSERR 0x0620
+#define SCB_Q_PROCERR 0x0630
+#define SCB_Q_PWRFAIL 0x0640
+#define SCB_Q_PERFMON 0x0650
+#define SCB_Q_SYSMCHK 0x0660
+#define SCB_Q_PROCMCHK 0x0670
+#define SCB_Q_PASSREL 0x0680
+
+/*
+** Stack Frame (FRM) Offsets:
+**
+** There are two types of system entries for OSF/1 - those for the
+** callsys CALL_PAL function and those for exceptions and interrupts.
+** Both entry types use the same stack frame layout. The stack frame
+** contains space for the PC, the PS, the saved GP, and the saved
+** argument registers a0, a1, and a2. On entry, SP points to the
+** saved PS.
+*/
+
+#define FRM_Q_PS 0x0000
+#define FRM_Q_PC 0x0008
+#define FRM_Q_GP 0x0010
+#define FRM_Q_A0 0x0018
+#define FRM_Q_A1 0x0020
+#define FRM_Q_A2 0x0028
+
+#define FRM_K_SIZE 48
+
+#define STACK_FRAME(tmp1,tmp2) \
+ sll ps, 63-PS_V_CM, p7; \
+ bge p7, 0f; \
+ bis zero, zero, ps; \
+ mtpr sp, ptUsp; \
+ mfpr sp, ptKsp; \
+0: lda sp, 0-FRM_K_SIZE(sp); \
+ stq tmp1, FRM_Q_PS(sp); \
+ stq tmp2, FRM_Q_PC(sp); \
+ stq gp, FRM_Q_GP(sp); \
+ stq a0, FRM_Q_A0(sp); \
+ stq a1, FRM_Q_A1(sp); \
+ stq a2, FRM_Q_A2(sp)
+
+/*
+** Halt Codes:
+*/
+
+#define HLT_K_RESET 0x0000
+#define HLT_K_HW_HALT 0x0001
+#define HLT_K_KSP_INVAL 0x0002
+#define HLT_K_SCBB_INVAL 0x0003
+#define HLT_K_PTBR_INVAL 0x0004
+#define HLT_K_SW_HALT 0x0005
+#define HLT_K_DBL_MCHK 0x0006
+#define HLT_K_MCHK_FROM_PAL 0x0007
+
+/*
+** Machine Check Codes:
+*/
+
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_UNKNOWN 0x008A
+#define MCHK_K_CACKSOFT 0x008C
+#define MCHK_K_BUGCHECK 0x008E
+#define MCHK_K_OS_BUGCHECK 0x0090
+#define MCHK_K_DCPERR 0x0092
+#define MCHK_K_ICPERR 0x0094
+#define MCHK_K_RETRY_IRD 0x0096
+#define MCHK_K_PROC_HERR 0x0098
+
+/*
+** System Machine Check Codes:
+*/
+
+#define MCHK_K_READ_NXM 0x0200
+#define MCHK_K_SYS_HERR 0x0202
+
+/*
+** Machine Check Error Status Summary (MCES) Register Format
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <0> 1 MIP Machine check in progress
+** <1> 1 SCE System correctable error in progress
+** <2> 1 PCE Processor correctable error in progress
+** <3> 1 DPC Disable PCE error reporting
+** <4> 1 DSC Disable SCE error reporting
+*/
+
+#define MCES_V_MIP 0
+#define MCES_M_MIP (1<<MCES_V_MIP)
+#define MCES_V_SCE 1
+#define MCES_M_SCE (1<<MCES_V_SCE)
+#define MCES_V_PCE 2
+#define MCES_M_PCE (1<<MCES_V_PCE)
+#define MCES_V_DPC 3
+#define MCES_M_DPC (1<<MCES_V_DPC)
+#define MCES_V_DSC 4
+#define MCES_M_DSC (1<<MCES_V_DSC)
+
+#define MCES_M_ALL (MCES_M_MIP | MCES_M_SCE | MCES_M_PCE | MCES_M_DPC \
+ | MCES_M_DSC)
+
+/*
+** Who-Am-I (WHAMI) Register Format
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <7:0> 8 ID Who-Am-I identifier
+** <15:8> 1 SWAP Swap PALcode flag - character 'S'
+*/
+
+#define WHAMI_V_SWAP 8
+#define WHAMI_M_SWAP (1<<WHAMI_V_SWAP)
+#define WHAMI_V_ID 0
+#define WHAMI_M_ID 0xFF
+
+#define WHAMI_K_SWAP 0x53 /* Character 'S' */
+
+/*
+** Conventional Register Usage Definitions
+**
+** Assembler temporary `at' is `AT' so it doesn't conflict with the
+** `.set at' assembler directive.
+*/
+
+#define v0 $0 /* Function Return Value Register */
+#define t0 $1 /* Scratch (Temporary) Registers ... */
+#define t1 $2
+#define t2 $3
+#define t3 $4
+#define t4 $5
+#define t5 $6
+#define t6 $7
+#define t7 $8
+#define s0 $9 /* Saved (Non-Volatile) Registers ... */
+#define s1 $10
+#define s2 $11
+#define s3 $12
+#define s4 $13
+#define s5 $14
+#define fp $15 /* Frame Pointer Register, Or S6 */
+#define s6 $15
+#define a0 $16 /* Argument Registers ... */
+#define a1 $17
+#define a2 $18
+#define a3 $19
+#define a4 $20
+#define a5 $21
+#define t8 $22 /* Scratch (Temporary) Registers ... */
+#define t9 $23
+#define t10 $24
+#define t11 $25
+#define ra $26 /* Return Address Register */
+#define pv $27 /* Procedure Value Register, Or T12 */
+#define t12 $27
+#define AT $28 /* Assembler Temporary (Volatile) Register */
+#define gp $29 /* Global Pointer Register */
+#define sp $30 /* Stack Pointer Register */
+#define zero $31 /* Zero Register */
+
+/*
+** OSF/1 Unprivileged CALL_PAL Entry Offsets:
+**
+** Entry Name Offset (Hex)
+**
+** bpt 0080
+** bugchk 0081
+** callsys 0083
+** imb 0086
+** rdunique 009E
+** wrunique 009F
+** gentrap 00AA
+** dbgstop 00AD
+*/
+
+#define UNPRIV 0x80
+#define PAL_BPT_ENTRY 0x80
+#define PAL_BUGCHK_ENTRY 0x81
+#define PAL_CALLSYS_ENTRY 0x83
+#define PAL_IMB_ENTRY 0x86
+#define PAL_RDUNIQUE_ENTRY 0x9E
+#define PAL_WRUNIQUE_ENTRY 0x9F
+#define PAL_GENTRAP_ENTRY 0xAA
+
+#if defined(KDEBUG)
+#define PAL_DBGSTOP_ENTRY 0xAD
+/* #define NUM_UNPRIV_CALL_PALS 10 */
+#else
+/* #define NUM_UNPRIV_CALL_PALS 9 */
+#endif /* KDEBUG */
+
+/*
+** OSF/1 Privileged CALL_PAL Entry Offsets:
+**
+** Entry Name Offset (Hex)
+**
+** halt 0000
+** cflush 0001
+** draina 0002
+** cserve 0009
+** swppal 000A
+** rdmces 0010
+** wrmces 0011
+** wrfen 002B
+** wrvptptr 002D
+** swpctx 0030
+** wrval 0031
+** rdval 0032
+** tbi 0033
+** wrent 0034
+** swpipl 0035
+** rdps 0036
+** wrkgp 0037
+** wrusp 0038
+** rdusp 003A
+** whami 003C
+** retsys 003D
+** rti 003F
+*/
+
+#define PAL_HALT_ENTRY 0x0000
+#define PAL_CFLUSH_ENTRY 0x0001
+#define PAL_DRAINA_ENTRY 0x0002
+#define PAL_CSERVE_ENTRY 0x0009
+#define PAL_SWPPAL_ENTRY 0x000A
+#define PAL_WRIPIR_ENTRY 0x000D
+#define PAL_RDMCES_ENTRY 0x0010
+#define PAL_WRMCES_ENTRY 0x0011
+#define PAL_WRFEN_ENTRY 0x002B
+#define PAL_WRVPTPTR_ENTRY 0x002D
+#define PAL_SWPCTX_ENTRY 0x0030
+#define PAL_WRVAL_ENTRY 0x0031
+#define PAL_RDVAL_ENTRY 0x0032
+#define PAL_TBI_ENTRY 0x0033
+#define PAL_WRENT_ENTRY 0x0034
+#define PAL_SWPIPL_ENTRY 0x0035
+#define PAL_RDPS_ENTRY 0x0036
+#define PAL_WRKGP_ENTRY 0x0037
+#define PAL_WRUSP_ENTRY 0x0038
+#define PAL_RDUSP_ENTRY 0x003A
+#define PAL_WHAMI_ENTRY 0x003C
+#define PAL_RETSYS_ENTRY 0x003D
+#define PAL_RTI_ENTRY 0x003F
+
+#define NUM_PRIV_CALL_PALS 23
+
+#endif
+
diff --git a/system/alpha/palcode/macros.h b/system/alpha/palcode/macros.h
new file mode 100644
index 000000000..ab902c06f
--- /dev/null
+++ b/system/alpha/palcode/macros.h
@@ -0,0 +1,137 @@
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:16 1995] SF: [macros.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+#define __MACROS_LOADED 1
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 PALcode
+**
+** MODULE:
+**
+** macros.h
+**
+** MODULE DESCRIPTION:
+**
+** DECchip 21164 PALcode macro definitions
+**
+** AUTHOR: ER
+**
+** CREATION DATE: 29-Nov-1993
+**
+** $Id: macros.h,v 1.1.1.1 1997/10/30 23:27:19 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: macros.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:19 verghese
+** current 10/29/97
+**
+** Revision 1.5 1994/07/08 17:03:12 samberg
+** Changes to support platform specific additions
+**
+** Revision 1.4 1994/05/20 19:24:19 ericr
+** Moved STALL macro from osfpal.s to here
+** Added LDLI macro
+**
+** Revision 1.3 1994/05/20 18:08:14 ericr
+** Changed line comments to C++ style comment character
+**
+** Revision 1.2 1994/02/28 18:45:51 ericr
+** Fixed EGORE related bugs
+**
+** Revision 1.1 1993/12/16 21:55:05 eric
+** Initial revision
+**
+**
+**--
+*/
+
+#define STALL \
+ mfpr r31, pt0
+
+#define NOP \
+ bis $31, $31, $31
+
+/*
+** Align code on an 8K byte page boundary.
+*/
+
+#define ALIGN_PAGE \
+ .align 13
+
+/*
+** Align code on a 32 byte block boundary.
+*/
+
+#define ALIGN_BLOCK \
+ .align 5
+
+/*
+** Align code on a quadword boundary.
+*/
+
+#define ALIGN_BRANCH \
+ .align 3
+
+/*
+** Hardware vectors go in .text 0 sub-segment.
+*/
+
+#define HDW_VECTOR(offset) \
+ . = offset
+
+/*
+** Privileged CALL_PAL functions are in .text 1 sub-segment.
+*/
+
+#define CALL_PAL_PRIV(vector) \
+ . = (PAL_CALL_PAL_PRIV_ENTRY+(vector<<6))
+
+/*
+** Unprivileged CALL_PAL functions are in .text 1 sub-segment,
+** the privileged bit is removed from these vectors.
+*/
+
+#define CALL_PAL_UNPRIV(vector) \
+ . = (PAL_CALL_PAL_UNPRIV_ENTRY+((vector&0x3F)<<6))
+
+/*
+** Implements a load "immediate" longword function
+*/
+#define LDLI(reg,val) \
+ ldah reg, ((val+0x8000) >> 16)(zero); \
+ lda reg, (val&0xffff)(reg)
+
diff --git a/system/alpha/palcode/osf.h b/system/alpha/palcode/osf.h
new file mode 100644
index 000000000..316d83cf1
--- /dev/null
+++ b/system/alpha/palcode/osf.h
@@ -0,0 +1,545 @@
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:14 1995] SF: [osf.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+#define __OSF_LOADED 1
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 PALcode
+**
+** MODULE:
+**
+** osf.h
+**
+** MODULE DESCRIPTION:
+**
+** OSF/1 specific definitions
+**
+** AUTHOR: ER
+**
+** CREATION DATE: 24-Nov-1993
+**
+** $Id: osf.h,v 1.1.1.1 1997/10/30 23:27:19 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: osf.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:19 verghese
+** current 10/29/97
+**
+** Revision 1.11 1995/04/21 02:06:30 fdh
+** Replaced C++ style comments with Standard C style comments.
+**
+** Revision 1.10 1994/09/26 14:17:47 samberg
+** Complete VICE work and EB164/SD164 breakout.
+**
+** Revision 1.9 1994/07/26 17:39:10 samberg
+** Changes for SD164.
+**
+** Revision 1.8 1994/07/08 17:03:48 samberg
+** Changes to support platform specific additions
+**
+** Revision 1.7 1994/05/20 19:23:51 ericr
+** Moved STACK_FRAME macro from osfpal.s to here
+**
+** Revision 1.6 1994/05/20 18:08:19 ericr
+** Changed line comments to C++ style comment character
+**
+** Revision 1.5 1994/01/11 18:43:33 ericr
+** Removed PAL version/revision and size constants
+**
+** Revision 1.4 1994/01/05 16:22:32 ericr
+** Added more SCB vector offsets and MCHK error code
+**
+** Revision 1.3 1994/01/03 19:35:40 ericr
+** Derive mask definitions from field constants
+**
+** Revision 1.2 1993/12/22 20:43:01 eric
+** Added mask definitions for MCES bits
+**
+** Revision 1.1 1993/12/16 21:55:05 eric
+** Initial revision
+**
+**
+**--
+*/
+
+/*
+** Seg0 and Seg1 Virtual Address (VA) Format
+**
+** Loc Size Name Function
+** ----- ---- ---- ---------------------------------
+** <42:33> 10 SEG1 First level page table offset
+** <32:23> 10 SEG2 Second level page table offset
+** <22:13> 10 SEG3 Third level page table offset
+** <12:00> 13 OFFSET Byte within page offset
+*/
+
+#define VA_V_SEG1 33
+#define VA_M_SEG1 (0x3FF<<VA_V_SEG1)
+#define VA_V_SEG2 23
+#define VA_M_SEG2 (0x3FF<<VA_V_SEG2)
+#define VA_V_SEG3 13
+#define VA_M_SEG3 (0x3FF<<VA_V_SEG3)
+#define VA_V_OFFSET 0
+#define VA_M_OFFSET 0x1FFF
+
+/*
+** Virtual Address Options: 8K byte page size
+*/
+
+#define VA_S_SIZE 43
+#define VA_S_OFF 13
+#define VA_S_SEG 10
+#define VA_S_PAGE_SIZE 8192
+
+/*
+** Page Table Entry (PTE) Format
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <63:32> 32 PFN Page Frame Number
+** <31:16> 16 SW Reserved for software
+** <15:14> 2 RSV0 Reserved for hardware SBZ
+** <13> 1 UWE User Write Enable
+** <12> 1 KWE Kernel Write Enable
+** <11:10> 2 RSV1 Reserved for hardware SBZ
+** <9> 1 URE User Read Enable
+** <8> 1 KRE Kernel Read Enable
+** <7> 1 RSV2 Reserved for hardware SBZ
+** <6:5> 2 GH Granularity Hint
+** <4> 1 ASM Address Space Match
+** <3> 1 FOE Fault On Execute
+** <2> 1 FOW Fault On Write
+** <1> 1 FOR Fault On Read
+** <0> 1 V Valid
+*/
+
+#define PTE_V_PFN 32
+#define PTE_M_PFN 0xFFFFFFFF00000000
+#define PTE_V_SW 16
+#define PTE_M_SW 0x00000000FFFF0000
+#define PTE_V_UWE 13
+#define PTE_M_UWE (1<<PTE_V_UWE)
+#define PTE_V_KWE 12
+#define PTE_M_KWE (1<<PTE_V_KWE)
+#define PTE_V_URE 9
+#define PTE_M_URE (1<<PTE_V_URE)
+#define PTE_V_KRE 8
+#define PTE_M_KRE (1<<PTE_V_KRE)
+#define PTE_V_GH 5
+#define PTE_M_GH (3<<PTE_V_GH)
+#define PTE_V_ASM 4
+#define PTE_M_ASM (1<<PTE_V_ASM)
+#define PTE_V_FOE 3
+#define PTE_M_FOE (1<<PTE_V_FOE)
+#define PTE_V_FOW 2
+#define PTE_M_FOW (1<<PTE_V_FOW)
+#define PTE_V_FOR 1
+#define PTE_M_FOR (1<<PTE_V_FOR)
+#define PTE_V_VALID 0
+#define PTE_M_VALID (1<<PTE_V_VALID)
+
+#define PTE_M_KSEG 0x1111
+#define PTE_M_PROT 0x3300
+
+/*
+** System Entry Instruction Fault (entIF) Constants:
+*/
+
+#define IF_K_BPT 0x0
+#define IF_K_BUGCHK 0x1
+#define IF_K_GENTRAP 0x2
+#define IF_K_FEN 0x3
+#define IF_K_OPCDEC 0x4
+
+/*
+** System Entry Hardware Interrupt (entInt) Constants:
+*/
+
+#define INT_K_IP 0x0
+#define INT_K_CLK 0x1
+#define INT_K_MCHK 0x2
+#define INT_K_DEV 0x3
+#define INT_K_PERF 0x4
+
+/*
+** System Entry MM Fault (entMM) Constants:
+*/
+
+#define MM_K_TNV 0x0
+#define MM_K_ACV 0x1
+#define MM_K_FOR 0x2
+#define MM_K_FOE 0x3
+#define MM_K_FOW 0x4
+
+/*
+** Process Control Block (PCB) Offsets:
+*/
+
+#define PCB_Q_KSP 0x0000
+#define PCB_Q_USP 0x0008
+#define PCB_Q_PTBR 0x0010
+#define PCB_L_PCC 0x0018
+#define PCB_L_ASN 0x001C
+#define PCB_Q_UNIQUE 0x0020
+#define PCB_Q_FEN 0x0028
+#define PCB_Q_RSV0 0x0030
+#define PCB_Q_RSV1 0x0038
+
+/*
+** Processor Status Register (PS) Bit Summary
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <3> 1 CM Current Mode
+** <2:0> 3 IPL Interrupt Priority Level
+**/
+
+#define PS_V_CM 3
+#define PS_M_CM (1<<PS_V_CM)
+#define PS_V_IPL 0
+#define PS_M_IPL (7<<PS_V_IPL)
+
+#define PS_K_KERN (0<<PS_V_CM)
+#define PS_K_USER (1<<PS_V_CM)
+
+#define IPL_K_ZERO 0x0
+#define IPL_K_SW0 0x1
+#define IPL_K_SW1 0x2
+#define IPL_K_DEV0 0x3
+#define IPL_K_DEV1 0x4
+#define IPL_K_CLK 0x5
+#define IPL_K_RT 0x6
+#define IPL_K_PERF 0x6
+#define IPL_K_PFAIL 0x6
+#define IPL_K_MCHK 0x7
+
+#define IPL_K_LOW 0x0
+#define IPL_K_HIGH 0x7
+
+/*
+** SCB Offset Definitions:
+*/
+
+#define SCB_Q_FEN 0x0010
+#define SCB_Q_ACV 0x0080
+#define SCB_Q_TNV 0x0090
+#define SCB_Q_FOR 0x00A0
+#define SCB_Q_FOW 0x00B0
+#define SCB_Q_FOE 0x00C0
+#define SCB_Q_ARITH 0x0200
+#define SCB_Q_KAST 0x0240
+#define SCB_Q_EAST 0x0250
+#define SCB_Q_SAST 0x0260
+#define SCB_Q_UAST 0x0270
+#define SCB_Q_UNALIGN 0x0280
+#define SCB_Q_BPT 0x0400
+#define SCB_Q_BUGCHK 0x0410
+#define SCB_Q_OPCDEC 0x0420
+#define SCB_Q_ILLPAL 0x0430
+#define SCB_Q_TRAP 0x0440
+#define SCB_Q_CHMK 0x0480
+#define SCB_Q_CHME 0x0490
+#define SCB_Q_CHMS 0x04A0
+#define SCB_Q_CHMU 0x04B0
+#define SCB_Q_SW0 0x0500
+#define SCB_Q_SW1 0x0510
+#define SCB_Q_SW2 0x0520
+#define SCB_Q_SW3 0x0530
+#define SCB_Q_SW4 0x0540
+#define SCB_Q_SW5 0x0550
+#define SCB_Q_SW6 0x0560
+#define SCB_Q_SW7 0x0570
+#define SCB_Q_SW8 0x0580
+#define SCB_Q_SW9 0x0590
+#define SCB_Q_SW10 0x05A0
+#define SCB_Q_SW11 0x05B0
+#define SCB_Q_SW12 0x05C0
+#define SCB_Q_SW13 0x05D0
+#define SCB_Q_SW14 0x05E0
+#define SCB_Q_SW15 0x05F0
+#define SCB_Q_CLOCK 0x0600
+#define SCB_Q_INTER 0x0610
+#define SCB_Q_SYSERR 0x0620
+#define SCB_Q_PROCERR 0x0630
+#define SCB_Q_PWRFAIL 0x0640
+#define SCB_Q_PERFMON 0x0650
+#define SCB_Q_SYSMCHK 0x0660
+#define SCB_Q_PROCMCHK 0x0670
+#define SCB_Q_PASSREL 0x0680
+
+/*
+** Stack Frame (FRM) Offsets:
+**
+** There are two types of system entries for OSF/1 - those for the
+** callsys CALL_PAL function and those for exceptions and interrupts.
+** Both entry types use the same stack frame layout. The stack frame
+** contains space for the PC, the PS, the saved GP, and the saved
+** argument registers a0, a1, and a2. On entry, SP points to the
+** saved PS.
+*/
+
+#define FRM_Q_PS 0x0000
+#define FRM_Q_PC 0x0008
+#define FRM_Q_GP 0x0010
+#define FRM_Q_A0 0x0018
+#define FRM_Q_A1 0x0020
+#define FRM_Q_A2 0x0028
+
+#define FRM_K_SIZE 48
+
+#define STACK_FRAME(tmp1,tmp2) \
+ sll ps, 63-PS_V_CM, p7; \
+ bge p7, 0f; \
+ bis zero, zero, ps; \
+ mtpr sp, ptUsp; \
+ mfpr sp, ptKsp; \
+0: lda sp, 0-FRM_K_SIZE(sp); \
+ stq tmp1, FRM_Q_PS(sp); \
+ stq tmp2, FRM_Q_PC(sp); \
+ stq gp, FRM_Q_GP(sp); \
+ stq a0, FRM_Q_A0(sp); \
+ stq a1, FRM_Q_A1(sp); \
+ stq a2, FRM_Q_A2(sp)
+
+/*
+** Halt Codes:
+*/
+
+#define HLT_K_RESET 0x0000
+#define HLT_K_HW_HALT 0x0001
+#define HLT_K_KSP_INVAL 0x0002
+#define HLT_K_SCBB_INVAL 0x0003
+#define HLT_K_PTBR_INVAL 0x0004
+#define HLT_K_SW_HALT 0x0005
+#define HLT_K_DBL_MCHK 0x0006
+#define HLT_K_MCHK_FROM_PAL 0x0007
+
+/*
+** Machine Check Codes:
+*/
+
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_UNKNOWN 0x008A
+#define MCHK_K_CACKSOFT 0x008C
+#define MCHK_K_BUGCHECK 0x008E
+#define MCHK_K_OS_BUGCHECK 0x0090
+#define MCHK_K_DCPERR 0x0092
+#define MCHK_K_ICPERR 0x0094
+#define MCHK_K_RETRY_IRD 0x0096
+#define MCHK_K_PROC_HERR 0x0098
+
+/*
+** System Machine Check Codes:
+*/
+
+#define MCHK_K_READ_NXM 0x0200
+#define MCHK_K_SYS_HERR 0x0202
+
+/*
+** Machine Check Error Status Summary (MCES) Register Format
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <0> 1 MIP Machine check in progress
+** <1> 1 SCE System correctable error in progress
+** <2> 1 PCE Processor correctable error in progress
+** <3> 1 DPC Disable PCE error reporting
+** <4> 1 DSC Disable SCE error reporting
+*/
+
+#define MCES_V_MIP 0
+#define MCES_M_MIP (1<<MCES_V_MIP)
+#define MCES_V_SCE 1
+#define MCES_M_SCE (1<<MCES_V_SCE)
+#define MCES_V_PCE 2
+#define MCES_M_PCE (1<<MCES_V_PCE)
+#define MCES_V_DPC 3
+#define MCES_M_DPC (1<<MCES_V_DPC)
+#define MCES_V_DSC 4
+#define MCES_M_DSC (1<<MCES_V_DSC)
+
+#define MCES_M_ALL (MCES_M_MIP | MCES_M_SCE | MCES_M_PCE | MCES_M_DPC \
+ | MCES_M_DSC)
+
+/*
+** Who-Am-I (WHAMI) Register Format
+**
+** Extent Size Name Function
+** ------ ---- ---- ---------------------------------
+** <7:0> 8 ID Who-Am-I identifier
+** <15:8> 1 SWAP Swap PALcode flag - character 'S'
+*/
+
+#define WHAMI_V_SWAP 8
+#define WHAMI_M_SWAP (1<<WHAMI_V_SWAP)
+#define WHAMI_V_ID 0
+#define WHAMI_M_ID 0xFF
+
+#define WHAMI_K_SWAP 0x53 /* Character 'S' */
+
+/*
+** Conventional Register Usage Definitions
+**
+** Assembler temporary `at' is `AT' so it doesn't conflict with the
+** `.set at' assembler directive.
+*/
+
+#define v0 $0 /* Function Return Value Register */
+#define t0 $1 /* Scratch (Temporary) Registers ... */
+#define t1 $2
+#define t2 $3
+#define t3 $4
+#define t4 $5
+#define t5 $6
+#define t6 $7
+#define t7 $8
+#define s0 $9 /* Saved (Non-Volatile) Registers ... */
+#define s1 $10
+#define s2 $11
+#define s3 $12
+#define s4 $13
+#define s5 $14
+#define fp $15 /* Frame Pointer Register, Or S6 */
+#define s6 $15
+#define a0 $16 /* Argument Registers ... */
+#define a1 $17
+#define a2 $18
+#define a3 $19
+#define a4 $20
+#define a5 $21
+#define t8 $22 /* Scratch (Temporary) Registers ... */
+#define t9 $23
+#define t10 $24
+#define t11 $25
+#define ra $26 /* Return Address Register */
+#define pv $27 /* Procedure Value Register, Or T12 */
+#define t12 $27
+#define AT $28 /* Assembler Temporary (Volatile) Register */
+#define gp $29 /* Global Pointer Register */
+#define sp $30 /* Stack Pointer Register */
+#define zero $31 /* Zero Register */
+
+/*
+** OSF/1 Unprivileged CALL_PAL Entry Offsets:
+**
+** Entry Name Offset (Hex)
+**
+** bpt 0080
+** bugchk 0081
+** callsys 0083
+** imb 0086
+** rdunique 009E
+** wrunique 009F
+** gentrap 00AA
+** dbgstop 00AD
+*/
+
+#define UNPRIV 0x80
+#define PAL_BPT_ENTRY 0x80
+#define PAL_BUGCHK_ENTRY 0x81
+#define PAL_CALLSYS_ENTRY 0x83
+#define PAL_IMB_ENTRY 0x86
+#define PAL_RDUNIQUE_ENTRY 0x9E
+#define PAL_WRUNIQUE_ENTRY 0x9F
+#define PAL_GENTRAP_ENTRY 0xAA
+
+#if defined(KDEBUG)
+#define PAL_DBGSTOP_ENTRY 0xAD
+/* #define NUM_UNPRIV_CALL_PALS 10 */
+#else
+/* #define NUM_UNPRIV_CALL_PALS 9 */
+#endif /* KDEBUG */
+
+/*
+** OSF/1 Privileged CALL_PAL Entry Offsets:
+**
+** Entry Name Offset (Hex)
+**
+** halt 0000
+** cflush 0001
+** draina 0002
+** cserve 0009
+** swppal 000A
+** rdmces 0010
+** wrmces 0011
+** wrfen 002B
+** wrvptptr 002D
+** swpctx 0030
+** wrval 0031
+** rdval 0032
+** tbi 0033
+** wrent 0034
+** swpipl 0035
+** rdps 0036
+** wrkgp 0037
+** wrusp 0038
+** rdusp 003A
+** whami 003C
+** retsys 003D
+** rti 003F
+*/
+
+#define PAL_HALT_ENTRY 0x0000
+#define PAL_CFLUSH_ENTRY 0x0001
+#define PAL_DRAINA_ENTRY 0x0002
+#define PAL_CSERVE_ENTRY 0x0009
+#define PAL_SWPPAL_ENTRY 0x000A
+#define PAL_WRIPIR_ENTRY 0x000D
+#define PAL_RDMCES_ENTRY 0x0010
+#define PAL_WRMCES_ENTRY 0x0011
+#define PAL_WRFEN_ENTRY 0x002B
+#define PAL_WRVPTPTR_ENTRY 0x002D
+#define PAL_SWPCTX_ENTRY 0x0030
+#define PAL_WRVAL_ENTRY 0x0031
+#define PAL_RDVAL_ENTRY 0x0032
+#define PAL_TBI_ENTRY 0x0033
+#define PAL_WRENT_ENTRY 0x0034
+#define PAL_SWPIPL_ENTRY 0x0035
+#define PAL_RDPS_ENTRY 0x0036
+#define PAL_WRKGP_ENTRY 0x0037
+#define PAL_WRUSP_ENTRY 0x0038
+#define PAL_RDUSP_ENTRY 0x003A
+#define PAL_WHAMI_ENTRY 0x003C
+#define PAL_RETSYS_ENTRY 0x003D
+#define PAL_RTI_ENTRY 0x003F
+
+#define NUM_PRIV_CALL_PALS 23
+
diff --git a/system/alpha/palcode/osfpal.nh b/system/alpha/palcode/osfpal.nh
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/system/alpha/palcode/osfpal.nh
diff --git a/system/alpha/palcode/osfpal.s b/system/alpha/palcode/osfpal.s
new file mode 100644
index 000000000..655b17cd7
--- /dev/null
+++ b/system/alpha/palcode/osfpal.s
@@ -0,0 +1,5123 @@
+// modified to use the Hudson style "impure.h" instead of ev5_impure.sdl
+// since we don't have a mechanism to expand the data structures.... pb Nov/95
+
+// build_fixed_image: not sure what means
+// real_mm to be replaced during rewrite
+// remove_save_state remove_restore_state can be remooved to save space ??
+
+
+#include "ev5_defs.h"
+#include "ev5_impure.h"
+#include "ev5_alpha_defs.h"
+#include "ev5_paldef.h"
+#include "ev5_osfalpha_defs.h"
+#include "fromHudsonMacros.h"
+#include "fromHudsonOsf.h"
+#include "dc21164FromGasSources.h"
+
+#ifdef SIMOS
+#define DEBUGSTORE(c) nop
+#else
+#define DEBUGSTORE(c) \
+ lda r13, c(zero) ; \
+ bsr r25, debugstore
+#endif
+
+#define DEBUG_EXC_ADDR()\
+ bsr r25, put_exc_addr; \
+ DEBUGSTORE(13) ; \
+ DEBUGSTORE(10)
+
+#define egore 0
+#define acore 0
+#define beh_model 0
+#define ev5_p2 1
+#define ev5_p1 0
+#define ldvpte_bug_fix 1
+#define osf_chm_fix 0
+
+// Do we want to do this?? pb
+#define spe_fix 0
+// Do we want to do this?? pb
+#define build_fixed_image 0
+
+#define ev5_pass2
+#define enable_p4_fixups 0
+#define osf_svmin 1
+#define enable_physical_console 0
+#define fill_err_hack 0
+#define icflush_on_tbix 0
+#define max_cpuid 1
+#define perfmon_debug 0
+#define rawhide_system 0
+#define rax_mode 0
+
+
+// This is the fix for the user-mode super page references causing the machine to crash.
+#if (spe_fix == 1) && (build_fixed_image==1)
+#define hw_rei_spe br r31, hw_rei_update_spe
+#else
+#define hw_rei_spe hw_rei
+#endif
+
+
+// redefine a few of the distribution-code names to match the Hudson gas names.
+// opcodes
+#define ldqp ldq_p
+#define stqp stq_p
+#define ldlp ldl_p
+#define stlp stl_p
+
+#define r0 $0
+#define r1 $1
+#define r2 $2
+#define r3 $3
+#define r4 $4
+#define r5 $5
+#define r6 $6
+#define r7 $7
+#define r8 $8
+#define r9 $9
+#define r10 $10
+#define r11 $11
+#define r12 $12
+#define r13 $13
+#define r14 $14
+#define r15 $15
+#define r16 $16
+#define r17 $17
+#define r18 $18
+#define r19 $19
+#define r20 $20
+#define r21 $21
+#define r22 $22
+#define r23 $23
+#define r24 $24
+#define r25 $25
+#define r26 $26
+#define r27 $27
+#define r28 $28
+#define r29 $29
+#define r30 $30
+#define r31 $31
+
+// .title "EV5 OSF PAL"
+// .ident "V1.18"
+//
+//****************************************************************************
+//* *
+//* Copyright (c) 1992, 1993, 1994, 1995 *
+//* by DIGITAL Equipment Corporation, Maynard, Mass. *
+//* *
+//* This software is furnished under a license and may be used and copied *
+//* only in accordance with the terms of such license and with the *
+//* inclusion of the above copyright notice. This software or any other *
+//* copies thereof may not be provided or otherwise made available to any *
+//* other person. No title to and ownership of the software is hereby *
+//* transferred. *
+//* *
+//* The information in this software is subject to change without notice *
+//* and should not be construed as a commitment by DIGITAL Equipment *
+//* Corporation. *
+//* *
+//* DIGITAL assumes no responsibility for the use or reliability of its *
+//* software on equipment which is not supplied by DIGITAL. *
+//* *
+//****************************************************************************
+
+// .sbttl "Edit History"
+//+
+// Who Rev When What
+// ------------ --- ----------- --------------------------------
+// DB 0.0 03-Nov-1992 Start
+// DB 0.1 28-Dec-1992 add swpctx
+// DB 0.2 05-Jan-1993 Bug: PVC found mtpr dtb_CM -> virt ref bug
+// DB 0.3 11-Jan-1993 rearrange trap entry points
+// DB 0.4 01-Feb-1993 add tbi
+// DB 0.5 04-Feb-1993 real MM, kludge reset flow, kludge swppal
+// DB 0.6 09-Feb-1993 Bug: several stack pushers used r16 for pc (should be r14)
+// DB 0.7 10-Feb-1993 Bug: pushed wrong PC (+8) on CALL_PAL OPCDEC
+// Bug: typo on register number for store in wrunique
+// Bug: rti to kern uses r16 as scratch
+// Bug: callsys saving wrong value in pt_usp
+// DB 0.8 16-Feb-1993 PVC: fix possible pt write->read bug in wrkgp, wrusp
+// DB 0.9 18-Feb-1993 Bug: invalid_dpte_handler shifted pte twice
+// Bug: rti stl_c could corrupt the stack
+// Bug: unaligned returning wrong value in r17 (or should be and)
+// DB 0.10 19-Feb-1993 Add draina, rd/wrmces, cflush, cserve, interrupt
+// DB 0.11 23-Feb-1993 Turn caches on in reset flow
+// DB 0.12 10-Mar-1993 Bug: wrong value for icsr for FEN in kern mode flow
+// DB 0.13 15-Mar-1993 Bug: wrong value pushed for PC in invalid_dpte_handler if stack push tbmisses
+// DB 0.14 23-Mar-1993 Add impure pointer paltemp, reshuffle some other paltemps to match VMS
+// DB 0.15 15-Apr-1993 Combine paltemps for WHAMI and MCES
+// DB 0.16 12-May-1993 Update reset
+// New restriction: no mfpr exc_addr in cycle 1 of call_pal flows
+// Bug: in wrmces, not clearing DPC, DSC
+// Update swppal
+// Add pal bugchecks, pal_save_state, pal_restore_state
+// DB 0.17 24-May-1993 Add dfault_in_pal flow; fixup stack builder to have common state for pc/ps.
+// New restriction: No hw_rei_stall in 0,1,2 after mtpr itb_asn
+// DB 0.18 26-May-1993 PVC fixes
+// JM 0.19 01-jul-1993 Bug: OSFPAL_CALPAL_OPCDEC, TRAP_OPCDEC -- move mt exc_addr after stores
+// JM 0.20 07-jul-1993 Update cns_ and mchk_ names for impure.mar conversion to .sdl
+// Bug: exc_addr was being loaded before stores that could dtb_miss in the following
+// routines: TRAP_FEN,FEN_TO_OPCDEC,CALL_PAL_CALLSYS,RTI_TO_KERN
+// JM 0.21 26-jul-1993 Bug: move exc_addr load after ALL stores in the following routines:
+// TRAP_IACCVIO::,TRAP_OPCDEC::,TRAP_ARITH::,TRAP_FEN::
+// dfault_trap_cont:,fen_to_opcdec:,invalid_dpte_handler:
+// osfpal_calpal_opcdec:,CALL_PAL_callsys::,TRAP_UNALIGN::
+// Bugs from PVC: trap_unalign - mt pt0 ->mf pt0 within 2 cycles
+// JM 0.22 28-jul-1993 Add WRIPIR instruction
+// JM 0.23 05-aug-1993 Bump version number for release
+// JM 0.24 11-aug-1993 Bug: call_pal_swpipl - palshadow write -> hw_rei violation
+// JM 0.25 09-sep-1993 Disable certain "hidden" pvc checks in call_pals;
+// New restriction: No hw_rei_stall in 0,1,2,3,4 after mtpr itb_asn - affects HALT(raxmode),
+// and SWPCTX
+// JM 0.26 07-oct-1993 Re-implement pal_version
+// JM 0.27 12-oct-1993 One more time: change pal_version format to conform to SRM
+// JM 0.28 14-oct-1993 Change ic_flush routine to pal_ic_flush
+// JM 0.29 19-oct-1993 BUG(?): dfault_in_pal: use exc_addr to check for dtbmiss,itbmiss check instead
+// of mm_stat<opcode>. mm_stat contains original opcode, not hw_ld.
+// JM 0.30 28-oct-1993 BUG: PVC violation - mf exc_addr in first cycles of call_pal in rti,retsys
+// JM 0.31 15-nov-1993 BUG: WRFEN trashing r0
+// JM 0.32 21-nov-1993 BUG: dtb_ldq,itb_ldq (used in dfault_in_pal) not defined when real_mm=0
+// JM 0.33 24-nov-1993 save/restore_state -
+// BUG: use ivptbr to restore mvptbr
+// BUG: adjust hw_ld/st base/offsets to accomodate 10-bit offset limit
+// CHANGE: Load 2 pages into dtb to accomodate compressed logout area/multiprocessors
+// JM 0.34 20-dec-1993 BUG: set r11<mode> to kernel for ksnv halt case
+// BUG: generate ksnv halt when tb miss on kernel stack accesses
+// save exc_addr in r14 for invalid_dpte stack builder
+// JM 0.35 30-dec-1993 BUG: PVC violation in trap_arith - mt exc_sum in shadow of store with mf exc_mask in
+// the same shadow
+// JM 0.36 6-jan-1994 BUG: fen_to_opcdec - savePC should be PC+4, need to save old PS, update new PS
+// New palcode restiction: mt icsr<fpe,hwe> --> 3 bubbles to hw_rei --affects wrfen
+// JM 0.37 25-jan-1994 BUG: PVC violations in restore_state - mt dc_mode/maf_mode ->mbox instructions
+// Hide impure area manipulations in macros
+// BUG: PVC violation in save and restore state-- move mt icsr out of shadow of ld/st
+// Add some pvc_violate statements
+// JM 0.38 1-feb-1994 Changes to save_state: save pt1; don't save r31,f31; update comments to reflect reality;
+// Changes to restore_state: restore pt1, icsr; don't restore r31,f31; update comments
+// Add code to ensure fen bit set in icsr before ldt
+// conditionally compile rax_more_reset out.
+// move ldqp,stqp macro definitions to ev5_pal_macros.mar and add .mcall's for them here
+// move rax reset stuff to ev5_osf_system_pal.m64
+// JM 0.39 7-feb-1994 Move impure pointer to pal scratch space. Use former pt_impure for bc_ctl shadow
+// and performance monitoring bits
+// Change to save_state routine to save more iprs.
+// JM 0.40 19-feb-1994 Change algorithm in save/restore_state routines; add f31,r31 back in
+// JM 0.41 21-feb-1994 Add flags to compile out save/restore state (not needed in some systems)
+// remove_save_state,remove_restore_state;fix new pvc violation in save_state
+// JM 0.42 22-feb-1994 BUG: save_state overwriting r3
+// JM 0.43 24-feb-1994 BUG: save_state saving wrong icsr
+// JM 0.44 28-feb-1994 Remove ic_flush from wr_tbix instructions
+// JM 0.45 15-mar-1994 BUG: call_pal_tbi trashes a0 prior to range check (instruction order problem)
+// New pal restriction in pal_restore_state: icsr<fpe>->floating instr = 3 bubbles
+// Add exc_sum and exc_mask to pal_save_state (not restore)
+// JM 0.46 22-apr-1994 Move impure pointer back into paltemp; Move bc_ctl shadow and pmctr_ctl into impure
+// area.
+// Add performance counter support to swpctx and wrperfmon
+// JM 0.47 9-may-1994 Bump version # (for ev5_osf_system_pal.m64 sys_perfmon fix)
+// JM 0.48 13-jun-1994 BUG: trap_interrupt --> put new ev5 ipl at 30 for all osfipl6 interrupts
+// JM 0.49 8-jul-1994 BUG: In the unlikely (impossible?) event that the branch to pal_pal_bug_check is
+// taken in the interrupt flow, stack is pushed twice.
+// SWPPAL - update to support ECO 59 to allow 0 as a valid address
+// Add itb flush to save/restore state routines
+// Change hw_rei to hw_rei_stall in ic_flush routine. Shouldn't be necessary, but
+// conforms to itbia restriction.
+// Added enable_physical_console flag (for enter/exit console routines only)
+// JM 0.50 29-jul-1994 Add code to dfault & invalid_dpte_handler to ignore exceptions on a
+// load to r31/f31. changed dfault_fetch_err to dfault_fetch_ldr31_err and
+// nmiss_fetch_err to nmiss_fetch_ldr31_err.
+// JM 1.00 1-aug-1994 Add pass2 support (swpctx)
+// JM 1.01 2-aug-1994 swppal now passes bc_ctl/bc_config in r1/r2
+// JM 1.02 15-sep-1994 BUG: swpctx missing shift of pme bit to correct position in icsr (pass2)
+// Moved perfmon code here from system file.
+// BUG: pal_perfmon - enable function not saving correct enables when pme not set (pass1)
+// JM 1.03 3-oct-1994 Added (pass2 only) code to wrperfmon enable function to look at pme bit.
+// JM 1.04 14-oct-1994 BUG: trap_interrupt - ISR read (and saved) before INTID -- INTID can change
+// after ISR read, but we won't catch the ISR update. reverse order
+// JM 1.05 17-nov-1994 Add code to dismiss UNALIGN trap if LD r31/F31
+// JM 1.06 28-nov-1994 BUG: missing mm_stat shift for store case in trap_unalign (new bug due to "dismiss" code)
+// JM 1.07 1-dec-1994 EV5 PASS1,2,3 BUG WORKAROUND: Add flag LDVPTE_BUG_FIX. In DTBMISS_DOUBLE, branch to
+// DTBMISS_SINGLE if not in palmode.
+// JM 1.08 9-jan-1995 Bump version number for change to EV5_OSF_SYSTEM_PAL.M64 - ei_stat fix in mchk logout frame
+// JM 1.09 2-feb-1995 Add flag "spe_fix" and accompanying code to workaround pre-pass4 bug: Disable Ibox
+// superpage mode in User mode and re-enable in kernel mode.
+// EV5_OSF_SYSTEM_PAL.M64 and EV5_PALDEF.MAR (added pt_misc_v_cm) also changed to support this.
+// JM 1.10 24-feb-1995 Set ldvpte_bug_fix regardless of ev5 pass. set default to ev5_p2
+// ES 1.11 10-mar-1995 Add flag "osf_chm_fix" to enable dcache in user mode only to avoid
+// cpu bug.
+// JM 1.12 17-mar-1995 BUG FIX: Fix F0 corruption problem in pal_restore_state
+// ES 1.13 17-mar-1995 Refine osf_chm_fix
+// ES 1.14 20-mar-1995 Don't need as many stalls before hw_rei_stall in chm_fix
+// ES 1.15 21-mar-1995 Add a stall to avoid a pvc violation in pal_restore_state
+// Force pvc checking of exit_console
+// ES 1.16 26-apr-1995 In the wrperfmon disable function, correct meaning of R17<2:0> to ctl2,ctl2,ctl0
+// ES 1.17 01-may-1995 In hw_rei_update_spe code, in the osf_chm fix, use bic and bis (self-correcting)
+// instead of xor to maintain previous mode in pt_misc
+// ES 1.18 14-jul-1995 In wrperfmon enable on pass2, update pmctr even if current process does
+// not have pme set. The bits in icsr maintain the master enable state.
+// In sys_reset, add icsr<17>=1 for ev56 byte/word eco enable
+//
+#define vmaj 1
+#define vmin 18
+#define vms_pal 1
+#define osf_pal 2
+#define pal_type osf_pal
+#define osfpal_version_l ((pal_type<<16) | (vmaj<<8) | (vmin<<0))
+//-
+
+// .sbttl "PALtemp register usage"
+
+//+
+// The EV5 Ibox holds 24 PALtemp registers. This maps the OSF PAL usage
+// for these PALtemps:
+//
+// pt0 local scratch
+// pt1 local scratch
+// pt2 entUna pt_entUna
+// pt3 CPU specific impure area pointer pt_impure
+// pt4 memory management temp
+// pt5 memory management temp
+// pt6 memory management temp
+// pt7 entIF pt_entIF
+// pt8 intmask pt_intmask
+// pt9 entSys pt_entSys
+// pt10
+// pt11 entInt pt_entInt
+// pt12 entArith pt_entArith
+// pt13 reserved for system specific PAL
+// pt14 reserved for system specific PAL
+// pt15 reserved for system specific PAL
+// pt16 MISC: scratch ! WHAMI<7:0> ! 0 0 0 MCES<4:0> pt_misc, pt_whami, pt_mces
+// pt17 sysval pt_sysval
+// pt18 usp pt_usp
+// pt19 ksp pt_ksp
+// pt20 PTBR pt_ptbr
+// pt21 entMM pt_entMM
+// pt22 kgp pt_kgp
+// pt23 PCBB pt_pcbb
+//
+//-
+
+// .sbttl "PALshadow register usage"
+//
+//+
+//
+// EV5 shadows R8-R14 and R25 when in PALmode and ICSR<shadow_enable> = 1.
+// This maps the OSF PAL usage of R8 - R14 and R25:
+//
+// r8 ITBmiss/DTBmiss scratch
+// r9 ITBmiss/DTBmiss scratch
+// r10 ITBmiss/DTBmiss scratch
+// r11 PS
+// r12 local scratch
+// r13 local scratch
+// r14 local scratch
+// r25 local scratch
+//
+//
+//-
+
+// .sbttl "ALPHA symbol definitions"
+// _OSF_PSDEF GLOBAL
+// _OSF_PTEDEF GLOBAL
+// _OSF_VADEF GLOBAL
+// _OSF_PCBDEF GLOBAL
+// _OSF_SFDEF GLOBAL
+// _OSF_MMCSR_DEF GLOBAL
+// _SCBDEF GLOBAL
+// _FRMDEF GLOBAL
+// _EXSDEF GLOBAL
+// _OSF_A0_DEF GLOBAL
+// _MCESDEF GLOBAL
+
+// .sbttl "EV5 symbol definitions"
+
+// _EV5DEF
+// _PALTEMP
+// _MM_STAT_DEF
+// _EV5_MM
+// _EV5_IPLDEF
+
+// _HALT_CODES GLOBAL
+// _MCHK_CODES GLOBAL
+
+// _PAL_IMPURE
+// _PAL_LOGOUT
+
+
+
+
+// .sbttl "PALcode configuration options"
+
+// There are a number of options that may be assembled into this version of
+// PALcode. They should be adjusted in a prefix assembly file (i.e. do not edit
+// the following). The options that can be adjusted cause the resultant PALcode
+// to reflect the desired target system.
+
+
+#define osfpal 1 // This is the PALcode for OSF.
+
+#ifndef rawhide_system
+
+#define rawhide_system 0
+#endif
+
+
+#ifndef real_mm
+// Page table translation vs 1-1 mapping
+#define real_mm 1
+#endif
+
+
+#ifndef rax_mode
+
+#define rax_mode 0
+#endif
+
+#ifndef egore
+// End of reset flow starts a program at 200000(hex).
+#define egore 1
+#endif
+
+#ifndef acore
+// End of reset flow starts a program at 40000(hex).
+#define acore 0
+#endif
+
+
+// assume acore+egore+rax_mode lt 2 // Assertion checker
+
+#ifndef beh_model
+// EV5 behavioral model specific code
+#define beh_model 1
+#endif
+
+#ifndef init_cbox
+// Reset flow init of Bcache and Scache
+#define init_cbox 1
+#endif
+
+#ifndef disable_crd
+// Decides whether the reset flow will disable
+#define disable_crd 0
+#endif
+
+ // correctable read interrupts via ICSR
+#ifndef perfmon_debug
+#define perfmon_debug 0
+#endif
+
+#ifndef icflush_on_tbix
+#define icflush_on_tbix 0
+#endif
+
+#ifndef remove_restore_state
+#define remove_restore_state 0
+#endif
+
+#ifndef remove_save_state
+#define remove_save_state 0
+#endif
+
+#ifndef enable_physical_console
+#define enable_physical_console 0
+#endif
+
+#ifndef ev5_p1
+#define ev5_p1 0
+#endif
+
+#ifndef ev5_p2
+#define ev5_p2 1
+#endif
+
+// assume ev5_p1+ev5_p2 eq 1
+
+#ifndef ldvpte_bug_fix
+#define ldvpte_bug_fix 1 // If set, fix ldvpte bug in dtbmiss_double flow.
+#endif
+
+#ifndef spe_fix
+// If set, disable super-page mode in user mode and re-enable
+#define spe_fix 0
+#endif
+ // in kernel. Workaround for cpu bug.
+#ifndef build_fixed_image
+#define build_fixed_image 0
+#endif
+
+
+#ifndef fill_err_hack
+// If set, disable fill_error mode in user mode and re-enable
+#define fill_err_hack 0
+#endif
+
+ // in kernel. Workaround for cpu bug.
+
+// .macro hw_rei_spe
+// .iif eq spe_fix, hw_rei
+//#if spe_fix != 0
+//
+//
+//#define hw_rei_chm_count hw_rei_chm_count + 1
+// p4_fixup_label \hw_rei_chm_count
+// .iif eq build_fixed_image, br r31, hw_rei_update_spe
+// .iif ne build_fixed_image, hw_rei
+//#endif
+//
+// .endm
+
+// Add flag "osf_chm_fix" to enable dcache in user mode only
+// to avoid cpu bug.
+
+#ifndef osf_chm_fix
+// If set, enable D-Cache in
+#define osf_chm_fix 0
+#endif
+
+#if osf_chm_fix != 0
+// user mode only.
+#define hw_rei_chm_count 0
+#endif
+
+#if osf_chm_fix != 0
+
+#define hw_rei_stall_chm_count 0
+#endif
+
+#ifndef enable_p4_fixups
+
+#define enable_p4_fixups 0
+#endif
+
+ // If set, do EV5 Pass 4 fixups
+#if spe_fix == 0
+
+#define osf_chm_fix 0
+#endif
+
+#if spe_fix == 0
+
+#define enable_p4_fixups 0
+#endif
+
+ // Only allow fixups if fix enabled
+
+ //Turn off fill_errors and MEM_NEM in user mode
+// .macro fill_error_hack ?L10_, ?L20_, ?L30_, ?L40_
+// //save r22,r23,r24
+// stqp r22, 0x150(r31) //add
+// stqp r23, 0x158(r31) //contents
+// stqp r24, 0x160(r31) //bit mask
+//
+// lda r22, 0x82(r31)
+// ldah r22, 0x8740(r22)
+// sll r22, 8, r22
+// ldlp r23, 0x80(r22) // r23 <- contents of CIA_MASK
+// bis r23,r31,r23
+//
+// lda r24, 0x8(r31) // r24 <- MEM_NEM bit
+// beq r10, L10_ // IF user mode (r10<0> == 0) pal mode
+// bic r23, r24, r23 // set fillerr_en bit
+// br r31, L20_ // ELSE
+//L10_: bis r23, r24, r23 // clear fillerr_en bit
+//L20_: // ENDIF
+//
+// stlp r23, 0x80(r22) // write back the CIA_MASK register
+// mb
+// ldlp r23, 0x80(r22)
+// bis r23,r31,r23
+// mb
+//
+// lda r22, 1(r31) // r22 <- 87.4000.0100 ptr to CIA_CTRL
+// ldah r22, 0x8740(r22)
+// sll r22, 8, r22
+// ldlp r23, 0(r22) // r23 <- contents of CIA_CTRL
+// bis r23,r31,r23
+//
+//
+// lda r24, 0x400(r31) // r9 <- fillerr_en bit
+// beq r10, L30_ // IF user mode (r10<0> == 0) pal mode
+// bic r23, r24, r23 // set fillerr_en bit
+// br r31, L40_ // ELSE
+//L30_: bis r23, r24, r23 // clear fillerr_en bit
+//L40_: // ENDIF
+//
+// stlp r23, 0(r22) // write back the CIA_CTRL register
+// mb
+// ldlp r23, 0(r22)
+// bis r23,r31,r23
+// mb
+//
+// //restore r22,r23,r24
+// ldqp r22, 0x150(r31)
+// ldqp r23, 0x158(r31)
+// ldqp r24, 0x160(r31)
+//
+// .endm
+
+// multiprocessor support can be enabled for a max of n processors by
+// setting the following to the number of processors on the system.
+// Note that this is really the max cpuid.
+
+#ifndef max_cpuid
+#define max_cpuid 8
+#endif
+
+#ifndef osf_svmin // platform specific palcode version number
+#define osf_svmin 0
+#endif
+
+
+#define osfpal_version_h ((max_cpuid<<16) | (osf_svmin<<0))
+
+// .mcall ldqp // override macro64 definition with macro from library
+// .mcall stqp // override macro64 definition with macro from library
+
+
+// .psect _pal,mix
+// huh pb pal_base:
+// huh pb #define current_block_base . - pal_base
+
+// .sbttl "RESET - Reset Trap Entry Point"
+//+
+// RESET - offset 0000
+// Entry:
+// Vectored into via hardware trap on reset, or branched to
+// on swppal.
+//
+// r0 = whami
+// r1 = pal_base
+// r2 = base of scratch area
+// r3 = halt code
+//
+//
+// Function:
+//
+//-
+
+ .text 0
+ . = 0x0000
+ .globl Pal_Base
+Pal_Base:
+ HDW_VECTOR(PAL_RESET_ENTRY)
+Trap_Reset:
+ nop
+#ifdef SIMOS
+ /*
+ * store into r1
+ */
+ br r1,sys_reset
+#else
+ /* following is a srcmax change */
+
+ DEBUGSTORE(0x41)
+ /* The original code jumped using r1 as a linkage register to pass the base
+ of PALcode to the platform specific code. We use r1 to pass a parameter
+ from the SROM, so we hardcode the address of Pal_Base in platform.s
+ */
+ br r31, sys_reset
+#endif
+
+ // Specify PAL version info as a constant
+ // at a known location (reset + 8).
+
+ .long osfpal_version_l // <pal_type@16> ! <vmaj@8> ! <vmin@0>
+ .long osfpal_version_h // <max_cpuid@16> ! <osf_svmin@0>
+ .long 0
+ .long 0
+pal_impure_start:
+ .quad 0
+pal_debug_ptr:
+ .quad 0 // reserved for debug pointer ; 20
+#if beh_model == 0
+
+
+#if enable_p4_fixups != 0
+
+
+ .quad 0
+ .long p4_fixup_hw_rei_fixup_table
+#endif
+
+#else
+
+ .quad 0 //
+ .quad 0 //0x0030
+ .quad 0
+ .quad 0 //0x0040
+ .quad 0
+ .quad 0 //0x0050
+ .quad 0
+ .quad 0 //0x0060
+ .quad 0
+pal_enter_cns_address:
+ .quad 0 //0x0070 -- address to jump to from enter_console
+ .long <<sys_exit_console-pal_base>+1> //0x0078 -- offset to sys_exit_console (set palmode bit)
+#endif
+
+
+
+
+// .sbttl "IACCVIO- Istream Access Violation Trap Entry Point"
+
+//+
+// IACCVIO - offset 0080
+// Entry:
+// Vectored into via hardware trap on Istream access violation or sign check error on PC.
+//
+// Function:
+// Build stack frame
+// a0 <- Faulting VA
+// a1 <- MMCSR (1 for ACV)
+// a2 <- -1 (for ifetch fault)
+// vector via entMM
+//-
+
+ HDW_VECTOR(PAL_IACCVIO_ENTRY)
+Trap_Iaccvio:
+ DEBUGSTORE(0x42)
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ bis r11, r31, r12 // Save PS
+ bge r25, TRAP_IACCVIO_10_ // no stack swap needed if cm=kern
+
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r12 // Set new PS
+ mfpr r30, pt_ksp
+
+TRAP_IACCVIO_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ mfpr r14, exc_addr // get pc
+
+ stq r16, osfsf_a0(sp) // save regs
+ bic r14, 3, r16 // pass pc/va as a0
+
+ stq r17, osfsf_a1(sp) // a1
+ or r31, mmcsr_c_acv, r17 // pass mm_csr as a1
+
+ stq r18, osfsf_a2(sp) // a2
+ mfpr r13, pt_entmm // get entry point
+
+ stq r11, osfsf_ps(sp) // save old ps
+ bis r12, r31, r11 // update ps
+
+ stq r16, osfsf_pc(sp) // save pc
+ stq r29, osfsf_gp(sp) // save gp
+
+ mtpr r13, exc_addr // load exc_addr with entMM
+ // 1 cycle to hw_rei
+ mfpr r29, pt_kgp // get the kgp
+
+ subq r31, 1, r18 // pass flag of istream, as a2
+ hw_rei_spe
+
+
+// .sbttl "INTERRUPT- Interrupt Trap Entry Point"
+
+//+
+// INTERRUPT - offset 0100
+// Entry:
+// Vectored into via trap on hardware interrupt
+//
+// Function:
+// check for halt interrupt
+// check for passive release (current ipl geq requestor)
+// if necessary, switch to kernel mode
+// push stack frame, update ps (including current mode and ipl copies), sp, and gp
+// pass the interrupt info to the system module
+//
+//-
+
+
+ HDW_VECTOR(PAL_INTERRUPT_ENTRY)
+Trap_Interrupt:
+ mfpr r13, ev5__intid // Fetch level of interruptor
+ mfpr r25, ev5__isr // Fetch interrupt summary register
+
+ srl r25, isr_v_hlt, r9 // Get HLT bit
+ mfpr r14, ev5__ipl
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kern
+ blbs r9, sys_halt_interrupt // halt_interrupt if HLT bit set
+
+ cmple r13, r14, r8 // R8 = 1 if intid .less than or eql. ipl
+ bne r8, sys_passive_release // Passive release is current rupt is lt or eq ipl
+
+ and r11, osfps_m_mode, r10 // get mode bit
+ beq r10, TRAP_INTERRUPT_10_ // Skip stack swap in kernel
+
+ mtpr r30, pt_usp // save user stack
+ mfpr r30, pt_ksp // get kern stack
+
+TRAP_INTERRUPT_10_:
+ lda sp, (0-osfsf_c_size)(sp)// allocate stack space
+ mfpr r14, exc_addr // get pc
+
+ stq r11, osfsf_ps(sp) // save ps
+ stq r14, osfsf_pc(sp) // save pc
+
+ stq r29, osfsf_gp(sp) // push gp
+ stq r16, osfsf_a0(sp) // a0
+
+// pvc_violate 354 // ps is cleared anyway, if store to stack faults.
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+ stq r17, osfsf_a1(sp) // a1
+
+ stq r18, osfsf_a2(sp) // a2
+ subq r13, 0x11, r12 // Start to translate from EV5IPL->OSFIPL
+
+ srl r12, 1, r8 // 1d, 1e: ipl 6. 1f: ipl 7.
+ subq r13, 0x1d, r9 // Check for 1d, 1e, 1f
+
+ cmovge r9, r8, r12 // if .ge. 1d, then take shifted value
+ bis r12, r31, r11 // set new ps
+
+ mfpr r12, pt_intmask
+ and r11, osfps_m_ipl, r14 // Isolate just new ipl (not really needed, since all non-ipl bits zeroed already)
+
+#ifdef SIMOS
+ /*
+ * Lance had space problems. We don't.
+ */
+ extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
+ mfpr r29, pt_kgp // update gp
+ mtpr r14, ev5__ipl // load the new IPL into Ibox
+#else
+// Moved the following three lines to sys_interrupt to make room for debug
+// extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
+// mfpr r29, pt_kgp // update gp
+
+// mtpr r14, ev5__ipl // load the new IPL into Ibox
+#endif
+ br r31, sys_interrupt // Go handle interrupt
+
+
+
+// .sbttl "ITBMISS- Istream TBmiss Trap Entry Point"
+
+//+
+// ITBMISS - offset 0180
+// Entry:
+// Vectored into via hardware trap on Istream translation buffer miss.
+//
+// Function:
+// Do a virtual fetch of the PTE, and fill the ITB if the PTE is valid.
+// Can trap into DTBMISS_DOUBLE.
+// This routine can use the PALshadow registers r8, r9, and r10
+//
+//-
+
+ HDW_VECTOR(PAL_ITB_MISS_ENTRY)
+Trap_Itbmiss:
+#if real_mm == 0
+
+
+ // Simple 1-1 va->pa mapping
+
+ nop // Pad to align to E1
+ mfpr r8, exc_addr
+
+ srl r8, page_offset_size_bits, r9
+ sll r9, 32, r9
+
+ lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
+ mtpr r9, itb_pte // E1
+
+ hw_rei_stall // Nital says I don't have to obey shadow wait rule here.
+#else
+
+ // Real MM mapping
+ nop
+ mfpr r8, ev5__ifault_va_form // Get virtual address of PTE.
+
+ nop
+ mfpr r10, exc_addr // Get PC of faulting instruction in case of DTBmiss.
+
+pal_itb_ldq:
+ ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
+ mtpr r10, exc_addr // Restore exc_address if there was a trap.
+
+ mfpr r31, ev5__va // Unlock VA in case there was a double miss
+ nop
+
+ and r8, osfpte_m_foe, r25 // Look for FOE set.
+ blbc r8, invalid_ipte_handler // PTE not valid.
+
+ nop
+ bne r25, foe_ipte_handler // FOE is set
+
+ nop
+ mtpr r8, ev5__itb_pte // Ibox remembers the VA, load the PTE into the ITB.
+
+ hw_rei_stall //
+
+#endif
+
+
+
+
+// .sbttl "DTBMISS_SINGLE - Dstream Single TBmiss Trap Entry Point"
+
+//+
+// DTBMISS_SINGLE - offset 0200
+// Entry:
+// Vectored into via hardware trap on Dstream single translation buffer miss.
+//
+// Function:
+// Do a virtual fetch of the PTE, and fill the DTB if the PTE is valid.
+// Can trap into DTBMISS_DOUBLE.
+// This routine can use the PALshadow registers r8, r9, and r10
+//-
+
+ HDW_VECTOR(PAL_DTB_MISS_ENTRY)
+Trap_Dtbmiss_Single:
+#if real_mm == 0
+ // Simple 1-1 va->pa mapping
+ mfpr r8, va // E0
+ srl r8, page_offset_size_bits, r9
+
+ sll r9, 32, r9
+ lda r9, 0x3301(r9) // Make PTE, V set, all KRE, URE, KWE, UWE
+
+ mtpr r9, dtb_pte // E0
+ nop // Pad to align to E0
+
+
+
+ mtpr r8, dtb_tag // E0
+ nop
+
+ nop // Pad tag write
+ nop
+
+ nop // Pad tag write
+ nop
+
+ hw_rei
+#else
+ mfpr r8, ev5__va_form // Get virtual address of PTE - 1 cycle delay. E0.
+ mfpr r10, exc_addr // Get PC of faulting instruction in case of error. E1.
+
+// DEBUGSTORE(0x45)
+// DEBUG_EXC_ADDR()
+ // Real MM mapping
+ mfpr r9, ev5__mm_stat // Get read/write bit. E0.
+ mtpr r10, pt6 // Stash exc_addr away
+
+pal_dtb_ldq:
+ ld_vpte r8, 0(r8) // Get PTE, traps to DTBMISS_DOUBLE in case of TBmiss
+ nop // Pad MF VA
+
+ mfpr r10, ev5__va // Get original faulting VA for TB load. E0.
+ nop
+
+ mtpr r8, ev5__dtb_pte // Write DTB PTE part. E0.
+ blbc r8, invalid_dpte_handler // Handle invalid PTE
+
+ mtpr r10, ev5__dtb_tag // Write DTB TAG part, completes DTB load. No virt ref for 3 cycles.
+ mfpr r10, pt6
+
+ // Following 2 instructions take 2 cycles
+ mtpr r10, exc_addr // Return linkage in case we trapped. E1.
+ mfpr r31, pt0 // Pad the write to dtb_tag
+
+ hw_rei // Done, return
+#endif
+
+
+
+
+// .sbttl "DTBMISS_DOUBLE - Dstream Double TBmiss Trap Entry Point"
+
+//+
+// DTBMISS_DOUBLE - offset 0280
+// Entry:
+// Vectored into via hardware trap on Double TBmiss from single miss flows.
+//
+// r8 - faulting VA
+// r9 - original MMstat
+// r10 - original exc_addr (both itb,dtb miss)
+// pt6 - original exc_addr (dtb miss flow only)
+// VA IPR - locked with original faulting VA
+//
+// Function:
+// Get PTE, if valid load TB and return.
+// If not valid then take TNV/ACV exception.
+//
+// pt4 and pt5 are reserved for this flow.
+//
+//
+//-
+
+ HDW_VECTOR(PAL_DOUBLE_MISS_ENTRY)
+Trap_Dtbmiss_double:
+#if ldvpte_bug_fix != 0
+ mtpr r8, pt4 // save r8 to do exc_addr check
+ mfpr r8, exc_addr
+ blbc r8, Trap_Dtbmiss_Single //if not in palmode, should be in the single routine, dummy!
+ mfpr r8, pt4 // restore r8
+#endif
+ nop
+ mtpr r22, pt5 // Get some scratch space. E1.
+ // Due to virtual scheme, we can skip the first lookup and go
+ // right to fetch of level 2 PTE
+ sll r8, (64-((2*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
+ mtpr r21, pt4 // Get some scratch space. E1.
+
+ srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
+ mfpr r21, pt_ptbr // Get physical address of the page table.
+
+ nop
+ addq r21, r22, r21 // Index into page table for level 2 PTE.
+
+ sll r8, (64-((1*page_seg_size_bits)+page_offset_size_bits)), r22 // Clean off upper bits of VA
+ ldqp r21, 0(r21) // Get level 2 PTE (addr<2:0> ignored)
+
+ srl r22, 61-page_seg_size_bits, r22 // Get Va<seg1>*8
+ blbc r21, double_pte_inv // Check for Invalid PTE.
+
+ srl r21, 32, r21 // extract PFN from PTE
+ sll r21, page_offset_size_bits, r21 // get PFN * 2^13 for add to <seg3>*8
+
+ addq r21, r22, r21 // Index into page table for level 3 PTE.
+ nop
+
+ ldqp r21, 0(r21) // Get level 3 PTE (addr<2:0> ignored)
+ blbc r21, double_pte_inv // Check for invalid PTE.
+
+ mtpr r21, ev5__dtb_pte // Write the PTE. E0.
+ mfpr r22, pt5 // Restore scratch register
+
+ mtpr r8, ev5__dtb_tag // Write the TAG. E0. No virtual references in subsequent 3 cycles.
+ mfpr r21, pt4 // Restore scratch register
+
+ nop // Pad write to tag.
+ nop
+
+ nop // Pad write to tag.
+ nop
+
+ hw_rei
+
+
+
+// .sbttl "UNALIGN -- Dstream unalign trap"
+//+
+// UNALIGN - offset 0300
+// Entry:
+// Vectored into via hardware trap on unaligned Dstream reference.
+//
+// Function:
+// Build stack frame
+// a0 <- Faulting VA
+// a1 <- Opcode
+// a2 <- src/dst register number
+// vector via entUna
+//-
+
+ HDW_VECTOR(PAL_UNALIGN_ENTRY)
+Trap_Unalign:
+/* DEBUGSTORE(0x47)*/
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r8, ev5__mm_stat // Get mmstat --ok to use r8, no tbmiss
+ mfpr r14, exc_addr // get pc
+
+ srl r8, mm_stat_v_ra, r13 // Shift Ra field to ls bits
+ blbs r14, pal_pal_bug_check // Bugcheck if unaligned in PAL
+
+ blbs r8, UNALIGN_NO_DISMISS // lsb only set on store or fetch_m
+ // not set, must be a load
+ and r13, 0x1F, r8 // isolate ra
+
+ cmpeq r8, 0x1F, r8 // check for r31/F31
+ bne r8, dfault_fetch_ldr31_err // if its a load to r31 or f31 -- dismiss the fault
+
+UNALIGN_NO_DISMISS:
+ bis r11, r31, r12 // Save PS
+ bge r25, UNALIGN_NO_DISMISS_10_ // no stack swap needed if cm=kern
+
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r12 // Set new PS
+ mfpr r30, pt_ksp
+
+UNALIGN_NO_DISMISS_10_:
+ mfpr r25, ev5__va // Unlock VA
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+
+ mtpr r25, pt0 // Stash VA
+ stq r18, osfsf_a2(sp) // a2
+
+ stq r11, osfsf_ps(sp) // save old ps
+ srl r13, mm_stat_v_opcode-mm_stat_v_ra, r25// Isolate opcode
+
+ stq r29, osfsf_gp(sp) // save gp
+ addq r14, 4, r14 // inc PC past the ld/st
+
+ stq r17, osfsf_a1(sp) // a1
+ and r25, mm_stat_m_opcode, r17// Clean opocde for a1
+
+ stq r16, osfsf_a0(sp) // save regs
+ mfpr r16, pt0 // a0 <- va/unlock
+
+ stq r14, osfsf_pc(sp) // save pc
+ mfpr r25, pt_entuna // get entry point
+
+
+ bis r12, r31, r11 // update ps
+ br r31, unalign_trap_cont
+
+
+
+
+// .sbttl "DFAULT - Dstream Fault Trap Entry Point"
+
+//+
+// DFAULT - offset 0380
+// Entry:
+// Vectored into via hardware trap on dstream fault or sign check error on DVA.
+//
+// Function:
+// Ignore faults on FETCH/FETCH_M
+// Check for DFAULT in PAL
+// Build stack frame
+// a0 <- Faulting VA
+// a1 <- MMCSR (1 for ACV, 2 for FOR, 4 for FOW)
+// a2 <- R/W
+// vector via entMM
+//
+//-
+ HDW_VECTOR(PAL_D_FAULT_ENTRY)
+Trap_Dfault:
+// DEBUGSTORE(0x48)
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r13, ev5__mm_stat // Get mmstat
+ mfpr r8, exc_addr // get pc, preserve r14
+
+ srl r13, mm_stat_v_opcode, r9 // Shift opcode field to ls bits
+ blbs r8, dfault_in_pal
+
+ bis r8, r31, r14 // move exc_addr to correct place
+ bis r11, r31, r12 // Save PS
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ and r9, mm_stat_m_opcode, r9 // Clean all but opcode
+
+ cmpeq r9, evx_opc_sync, r9 // Is the opcode fetch/fetchm?
+ bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
+
+ //dismiss exception if load to r31/f31
+ blbs r13, dfault_no_dismiss // mm_stat<0> set on store or fetchm
+
+ // not a store or fetch, must be a load
+ srl r13, mm_stat_v_ra, r9 // Shift rnum to low bits
+
+ and r9, 0x1F, r9 // isolate rnum
+ nop
+
+ cmpeq r9, 0x1F, r9 // Is the rnum r31 or f31?
+ bne r9, dfault_fetch_ldr31_err // Yes, dismiss the fault
+
+dfault_no_dismiss:
+ and r13, 0xf, r13 // Clean extra bits in mm_stat
+ bge r25, dfault_trap_cont // no stack swap needed if cm=kern
+
+
+ mtpr r30, pt_usp // save user stack
+ bis r31, r31, r12 // Set new PS
+
+ mfpr r30, pt_ksp
+ br r31, dfault_trap_cont
+
+
+
+
+
+// .sbttl "MCHK - Machine Check Trap Entry Point"
+
+//+
+// MCHK - offset 0400
+// Entry:
+// Vectored into via hardware trap on machine check.
+//
+// Function:
+//
+//-
+
+ HDW_VECTOR(PAL_MCHK_ENTRY)
+Trap_Mchk:
+ DEBUGSTORE(0x49)
+ mtpr r31, ic_flush_ctl // Flush the Icache
+ br r31, sys_machine_check
+
+
+
+
+// .sbttl "OPCDEC - Illegal Opcode Trap Entry Point"
+
+//+
+// OPCDEC - offset 0480
+// Entry:
+// Vectored into via hardware trap on illegal opcode.
+//
+// Build stack frame
+// a0 <- code
+// a1 <- unpred
+// a2 <- unpred
+// vector via entIF
+//
+//-
+
+ HDW_VECTOR(PAL_OPCDEC_ENTRY)
+Trap_Opcdec:
+ DEBUGSTORE(0x4a)
+//simos DEBUG_EXC_ADDR()
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r14, exc_addr // get pc
+ blbs r14, pal_pal_bug_check // check opcdec in palmode
+
+ bis r11, r31, r12 // Save PS
+ bge r25, TRAP_OPCDEC_10_ // no stack swap needed if cm=kern
+
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r12 // Set new PS
+ mfpr r30, pt_ksp
+
+TRAP_OPCDEC_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ addq r14, 4, r14 // inc pc
+
+ stq r16, osfsf_a0(sp) // save regs
+ bis r31, osf_a0_opdec, r16 // set a0
+
+ stq r11, osfsf_ps(sp) // save old ps
+ mfpr r13, pt_entif // get entry point
+
+ stq r18, osfsf_a2(sp) // a2
+ stq r17, osfsf_a1(sp) // a1
+
+ stq r29, osfsf_gp(sp) // save gp
+ stq r14, osfsf_pc(sp) // save pc
+
+ bis r12, r31, r11 // update ps
+ mtpr r13, exc_addr // load exc_addr with entIF
+ // 1 cycle to hw_rei, E1
+
+ mfpr r29, pt_kgp // get the kgp, E1
+
+ hw_rei_spe // done, E1
+
+
+
+
+
+
+// .sbttl "ARITH - Arithmetic Exception Trap Entry Point"
+
+//+
+// ARITH - offset 0500
+// Entry:
+// Vectored into via hardware trap on arithmetic excpetion.
+//
+// Function:
+// Build stack frame
+// a0 <- exc_sum
+// a1 <- exc_mask
+// a2 <- unpred
+// vector via entArith
+//
+//-
+ HDW_VECTOR(PAL_ARITH_ENTRY)
+Trap_Arith:
+ DEBUGSTORE(0x4b)
+ and r11, osfps_m_mode, r12 // get mode bit
+ mfpr r31, ev5__va // unlock mbox
+
+ bis r11, r31, r25 // save ps
+ mfpr r14, exc_addr // get pc
+
+ nop
+ blbs r14, pal_pal_bug_check // arith trap from PAL
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ beq r12, TRAP_ARITH_10_ // if zero we are in kern now
+
+ bis r31, r31, r25 // set the new ps
+ mtpr r30, pt_usp // save user stack
+
+ nop
+ mfpr r30, pt_ksp // get kern stack
+
+TRAP_ARITH_10_: lda sp, 0-osfsf_c_size(sp) // allocate stack space
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ nop // Pad current mode write and stq
+ mfpr r13, ev5__exc_sum // get the exc_sum
+
+ mfpr r12, pt_entarith
+ stq r14, osfsf_pc(sp) // save pc
+
+ stq r17, osfsf_a1(sp)
+ mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
+
+ stq r11, osfsf_ps(sp) // save ps
+ bis r25, r31, r11 // set new ps
+
+ stq r16, osfsf_a0(sp) // save regs
+ srl r13, exc_sum_v_swc, r16// shift data to correct position
+
+ stq r18, osfsf_a2(sp)
+// pvc_violate 354 // ok, but make sure reads of exc_mask/sum are not in same trap shadow
+ mtpr r31, ev5__exc_sum // Unlock exc_sum and exc_mask
+
+ stq r29, osfsf_gp(sp)
+ mtpr r12, exc_addr // Set new PC - 1 bubble to hw_rei - E1
+
+ mfpr r29, pt_kgp // get the kern gp - E1
+ hw_rei_spe // done - E1
+
+
+
+
+
+
+// .sbttl "FEN - Illegal Floating Point Operation Trap Entry Point"
+
+//+
+// FEN - offset 0580
+// Entry:
+// Vectored into via hardware trap on illegal FP op.
+//
+// Function:
+// Build stack frame
+// a0 <- code
+// a1 <- unpred
+// a2 <- unpred
+// vector via entIF
+//
+//-
+
+ HDW_VECTOR(PAL_FEN_ENTRY)
+Trap_Fen:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r14, exc_addr // get pc
+ blbs r14, pal_pal_bug_check // check opcdec in palmode
+
+ mfpr r13, ev5__icsr
+ nop
+
+ bis r11, r31, r12 // Save PS
+ bge r25, TRAP_FEN_10_ // no stack swap needed if cm=kern
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r12 // Set new PS
+ mfpr r30, pt_ksp
+
+TRAP_FEN_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ srl r13, icsr_v_fpe, r25 // Shift FP enable to bit 0
+
+
+ stq r16, osfsf_a0(sp) // save regs
+ mfpr r13, pt_entif // get entry point
+
+ stq r18, osfsf_a2(sp) // a2
+ stq r11, osfsf_ps(sp) // save old ps
+
+ stq r29, osfsf_gp(sp) // save gp
+ bis r12, r31, r11 // set new ps
+
+ stq r17, osfsf_a1(sp) // a1
+ blbs r25,fen_to_opcdec // If FP is enabled, this is really OPCDEC.
+
+ bis r31, osf_a0_fen, r16 // set a0
+ stq r14, osfsf_pc(sp) // save pc
+
+ mtpr r13, exc_addr // load exc_addr with entIF
+ // 1 cycle to hw_rei -E1
+
+ mfpr r29, pt_kgp // get the kgp -E1
+
+ hw_rei_spe // done -E1
+
+// FEN trap was taken, but the fault is really opcdec.
+ ALIGN_BRANCH
+fen_to_opcdec:
+ addq r14, 4, r14 // save PC+4
+ bis r31, osf_a0_opdec, r16 // set a0
+
+ stq r14, osfsf_pc(sp) // save pc
+ mtpr r13, exc_addr // load exc_addr with entIF
+ // 1 cycle to hw_rei
+
+ mfpr r29, pt_kgp // get the kgp
+ hw_rei_spe // done
+
+
+
+// .sbttl "Misc handlers"
+ // Start area for misc code.
+//+
+//dfault_trap_cont
+// A dfault trap has been taken. The sp has been updated if necessary.
+// Push a stack frame a vector via entMM.
+//
+// Current state:
+// r12 - new PS
+// r13 - MMstat
+// VA - locked
+//
+//-
+ ALIGN_BLOCK
+dfault_trap_cont:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ mfpr r25, ev5__va // Fetch VA/unlock
+
+ stq r18, osfsf_a2(sp) // a2
+ and r13, 1, r18 // Clean r/w bit for a2
+
+ stq r16, osfsf_a0(sp) // save regs
+ bis r25, r31, r16 // a0 <- va
+
+ stq r17, osfsf_a1(sp) // a1
+ srl r13, 1, r17 // shift fault bits to right position
+
+ stq r11, osfsf_ps(sp) // save old ps
+ bis r12, r31, r11 // update ps
+
+ stq r14, osfsf_pc(sp) // save pc
+ mfpr r25, pt_entmm // get entry point
+
+ stq r29, osfsf_gp(sp) // save gp
+ cmovlbs r17, 1, r17 // a2. acv overrides fox.
+
+ mtpr r25, exc_addr // load exc_addr with entMM
+ // 1 cycle to hw_rei
+ mfpr r29, pt_kgp // get the kgp
+
+ hw_rei_spe // done
+
+//+
+//unalign_trap_cont
+// An unalign trap has been taken. Just need to finish up a few things.
+//
+// Current state:
+// r25 - entUna
+// r13 - shifted MMstat
+//
+//-
+ ALIGN_BLOCK
+unalign_trap_cont:
+ mtpr r25, exc_addr // load exc_addr with entUna
+ // 1 cycle to hw_rei
+
+
+ mfpr r29, pt_kgp // get the kgp
+ and r13, mm_stat_m_ra, r18 // Clean Ra for a2
+
+ hw_rei_spe // done
+
+
+
+//+
+// dfault_in_pal
+// Dfault trap was taken, exc_addr points to a PAL PC.
+// r9 - mmstat<opcode> right justified
+// r8 - exception address
+//
+// These are the cases:
+// opcode was STQ -- from a stack builder, KSP not valid halt
+// r14 - original exc_addr
+// r11 - original PS
+// opcode was STL_C -- rti or retsys clear lock_flag by stack write,
+// KSP not valid halt
+// r11 - original PS
+// r14 - original exc_addr
+// opcode was LDQ -- retsys or rti stack read, KSP not valid halt
+// r11 - original PS
+// r14 - original exc_addr
+// opcode was HW_LD -- itbmiss or dtbmiss, bugcheck due to fault on page tables
+// r10 - original exc_addr
+// r11 - original PS
+//
+//
+//-
+ ALIGN_BLOCK
+dfault_in_pal:
+ DEBUGSTORE(0x50)
+ bic r8, 3, r8 // Clean PC
+ mfpr r9, pal_base
+
+ mfpr r31, va // unlock VA
+#if real_mm != 0
+ // if not real_mm, should never get here from miss flows
+
+ subq r9, r8, r8 // pal_base - offset
+
+ lda r9, pal_itb_ldq-pal_base(r8)
+ nop
+
+ beq r9, dfault_do_bugcheck
+ lda r9, pal_dtb_ldq-pal_base(r8)
+
+ beq r9, dfault_do_bugcheck
+#endif
+
+//
+// KSP invalid halt case --
+ksp_inval_halt:
+ DEBUGSTORE(76)
+ bic r11, osfps_m_mode, r11 // set ps to kernel mode
+ mtpr r0, pt0
+
+ mtpr r31, dtb_cm // Make sure that the CM IPRs are all kernel mode
+ mtpr r31, ips
+
+ mtpr r14, exc_addr // Set PC to instruction that caused trouble
+//orig pvc_jsr updpcb, bsr=1
+ bsr r0, pal_update_pcb // update the pcb
+
+ lda r0, hlt_c_ksp_inval(r31) // set halt code to hw halt
+ br r31, sys_enter_console // enter the console
+
+ ALIGN_BRANCH
+dfault_do_bugcheck:
+ bis r10, r31, r14 // bugcheck expects exc_addr in r14
+ br r31, pal_pal_bug_check
+
+
+ ALIGN_BLOCK
+//+
+// dfault_fetch_ldr31_err - ignore faults on fetch(m) and loads to r31/f31
+// On entry -
+// r14 - exc_addr
+// VA is locked
+//
+//-
+dfault_fetch_ldr31_err:
+ mtpr r11, ev5__dtb_cm
+ mtpr r11, ev5__ps // Make sure ps hasn't changed
+
+ mfpr r31, va // unlock the mbox
+ addq r14, 4, r14 // inc the pc to skip the fetch
+
+ mtpr r14, exc_addr // give ibox new PC
+ mfpr r31, pt0 // pad exc_addr write
+
+ hw_rei
+
+
+
+ ALIGN_BLOCK
+//+
+// sys_from_kern
+// callsys from kernel mode - OS bugcheck machine check
+//
+//-
+sys_from_kern:
+ mfpr r14, exc_addr // PC points to call_pal
+ subq r14, 4, r14
+
+ lda r25, mchk_c_os_bugcheck(r31) // fetch mchk code
+ br r31, pal_pal_mchk
+
+
+// .sbttl "Continuation of long call_pal flows"
+ ALIGN_BLOCK
+//+
+// wrent_tbl
+// Table to write *int in paltemps.
+// 4 instructions/entry
+// r16 has new value
+//
+//-
+wrent_tbl:
+//orig pvc_jsr wrent, dest=1
+ nop
+ mtpr r16, pt_entint
+
+ mfpr r31, pt0 // Pad for mt->mf paltemp rule
+ hw_rei
+
+
+//orig pvc_jsr wrent, dest=1
+ nop
+ mtpr r16, pt_entarith
+
+ mfpr r31, pt0 // Pad for mt->mf paltemp rule
+ hw_rei
+
+
+//orig pvc_jsr wrent, dest=1
+ nop
+ mtpr r16, pt_entmm
+
+ mfpr r31, pt0 // Pad for mt->mf paltemp rule
+ hw_rei
+
+
+//orig pvc_jsr wrent, dest=1
+ nop
+ mtpr r16, pt_entif
+
+ mfpr r31, pt0 // Pad for mt->mf paltemp rule
+ hw_rei
+
+
+//orig pvc_jsr wrent, dest=1
+ nop
+ mtpr r16, pt_entuna
+
+ mfpr r31, pt0 // Pad for mt->mf paltemp rule
+ hw_rei
+
+
+//orig pvc_jsr wrent, dest=1
+ nop
+ mtpr r16, pt_entsys
+
+ mfpr r31, pt0 // Pad for mt->mf paltemp rule
+ hw_rei
+
+ ALIGN_BLOCK
+//+
+// tbi_tbl
+// Table to do tbi instructions
+// 4 instructions per entry
+//-
+tbi_tbl:
+ // -2 tbia
+//orig pvc_jsr tbi, dest=1
+ mtpr r31, ev5__dtb_ia // Flush DTB
+ mtpr r31, ev5__itb_ia // Flush ITB
+
+#if icflush_on_tbix != 0
+
+
+ br r31, pal_ic_flush // Flush Icache
+#else
+
+ hw_rei_stall
+#endif
+
+ nop // Pad table
+
+ // -1 tbiap
+//orig pvc_jsr tbi, dest=1
+ mtpr r31, ev5__dtb_iap // Flush DTB
+ mtpr r31, ev5__itb_iap // Flush ITB
+
+#if icflush_on_tbix != 0
+
+
+ br r31, pal_ic_flush // Flush Icache
+#else
+
+ hw_rei_stall
+#endif
+
+ nop // Pad table
+
+
+ // 0 unused
+//orig pvc_jsr tbi, dest=1
+ hw_rei // Pad table
+ nop
+ nop
+ nop
+
+
+ // 1 tbisi
+//orig pvc_jsr tbi, dest=1
+#if icflush_on_tbix != 0
+
+
+
+ nop
+ br r31, pal_ic_flush_and_tbisi // Flush Icache
+ nop
+ nop // Pad table
+#else
+
+ nop
+ nop
+ mtpr r17, ev5__itb_is // Flush ITB
+ hw_rei_stall
+#endif
+
+
+
+ // 2 tbisd
+//orig pvc_jsr tbi, dest=1
+ mtpr r17, ev5__dtb_is // Flush DTB.
+ nop
+
+ nop
+ hw_rei_stall
+
+
+ // 3 tbis
+//orig pvc_jsr tbi, dest=1
+ mtpr r17, ev5__dtb_is // Flush DTB
+#if icflush_on_tbix != 0
+
+
+ br r31, pal_ic_flush_and_tbisi // Flush Icache and ITB
+#else
+ br r31, tbi_finish
+ ALIGN_BRANCH
+tbi_finish:
+ mtpr r17, ev5__itb_is // Flush ITB
+ hw_rei_stall
+#endif
+
+
+
+ ALIGN_BLOCK
+//+
+// bpt_bchk_common:
+// Finish up the bpt/bchk instructions
+//-
+bpt_bchk_common:
+ stq r18, osfsf_a2(sp) // a2
+ mfpr r13, pt_entif // get entry point
+
+ stq r12, osfsf_ps(sp) // save old ps
+ stq r14, osfsf_pc(sp) // save pc
+
+ stq r29, osfsf_gp(sp) // save gp
+ mtpr r13, exc_addr // load exc_addr with entIF
+ // 1 cycle to hw_rei
+
+ mfpr r29, pt_kgp // get the kgp
+
+
+ hw_rei_spe // done
+
+
+ ALIGN_BLOCK
+//+
+// rti_to_user
+// Finish up the rti instruction
+//-
+rti_to_user:
+ mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
+ mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
+
+ mtpr r31, ev5__ipl // set the ipl. No hw_rei for 2 cycles
+ mtpr r25, pt_ksp // save off incase RTI to user
+
+ mfpr r30, pt_usp
+ hw_rei_spe // and back
+
+
+ ALIGN_BLOCK
+//+
+// rti_to_kern
+// Finish up the rti instruction
+//-
+rti_to_kern:
+ and r12, osfps_m_ipl, r11 // clean ps
+ mfpr r12, pt_intmask // get int mask
+
+ extbl r12, r11, r12 // get mask for this ipl
+ mtpr r25, pt_ksp // save off incase RTI to user
+
+ mtpr r12, ev5__ipl // set the new ipl.
+ or r25, r31, sp // sp
+
+// pvc_violate 217 // possible hidden mt->mf ipl not a problem in callpals
+ hw_rei
+
+ ALIGN_BLOCK
+//+
+// swpctx_cont
+// Finish up the swpctx instruction
+//-
+
+swpctx_cont:
+#if ev5_p1 != 0
+
+
+ bic r25, r24, r25 // clean icsr<FPE>
+ get_impure r8 // get impure pointer
+
+ sll r12, icsr_v_fpe, r12 // shift new fen to pos
+ fix_impure_ipr r8 // adjust impure pointer
+
+ restore_reg1 pmctr_ctl, r8, r8, ipr=1 // "ldqp" - get pmctr_ctl bits
+ srl r23, 32, r24 // move asn to low asn pos
+
+ ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
+ srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
+
+ or r25, r12, r25 // icsr with new fen
+ sll r24, itb_asn_v_asn, r12
+
+#else
+
+ bic r25, r24, r25 // clean icsr<FPE,PMP>
+ sll r12, icsr_v_fpe, r12 // shift new fen to pos
+
+ ldqp r14, osfpcb_q_mmptr(r16)// get new mmptr
+ srl r22, osfpcb_v_pme, r22 // get pme down to bit 0
+
+ or r25, r12, r25 // icsr with new fen
+ srl r23, 32, r24 // move asn to low asn pos
+
+ and r22, 1, r22
+ sll r24, itb_asn_v_asn, r12
+
+ sll r22, icsr_v_pmp, r22
+ nop
+
+ or r25, r22, r25 // icsr with new pme
+#endif
+
+ sll r24, dtb_asn_v_asn, r24
+
+ subl r23, r13, r13 // gen new cc offset
+ mtpr r12, itb_asn // no hw_rei_stall in 0,1,2,3,4
+
+ mtpr r24, dtb_asn // Load up new ASN
+ mtpr r25, icsr // write the icsr
+
+ sll r14, page_offset_size_bits, r14 // Move PTBR into internal position.
+ ldqp r25, osfpcb_q_usp(r16) // get new usp
+
+ insll r13, 4, r13 // >> 32
+// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
+ mtpr r14, pt_ptbr // load the new ptbr
+
+ mtpr r13, cc // set new offset
+ ldqp r30, osfpcb_q_ksp(r16) // get new ksp
+
+// pvc_violate 379 // ldqp can't trap except replay. only problem if mf same ipr in same shadow
+ mtpr r25, pt_usp // save usp
+
+#if ev5_p1 != 0
+
+
+ blbc r8, no_pm_change // if monitoring all processes -- no need to change pm
+
+ // otherwise, monitoring select processes - update pm
+ lda r25, 0x3F(r31)
+ cmovlbc r22, r31, r8 // if pme set, disable counters, otherwise use saved encodings
+
+ sll r25, pmctr_v_ctl2, r25 // create ctl field bit mask
+ mfpr r22, ev5__pmctr
+
+ and r8, r25, r8 // mask new ctl value
+ bic r22, r25, r22 // clear ctl field in pmctr
+
+ or r8, r22, r8
+ mtpr r8, ev5__pmctr
+
+no_pm_change:
+#endif
+
+
+#if osf_chm_fix != 0
+
+
+ p4_fixup_hw_rei_stall // removes this section for Pass 4 by placing a hw_rei_stall here
+
+#if build_fixed_image != 0
+
+
+ hw_rei_stall
+#else
+
+ mfpr r9, pt_pcbb // get FEN
+#endif
+
+ ldqp r9, osfpcb_q_fen(r9)
+ blbc r9, no_pm_change_10_ // skip if FEN disabled
+
+ mb // ensure no outstanding fills
+ lda r12, 1<<dc_mode_v_dc_ena(r31)
+ mtpr r12, dc_mode // turn dcache on so we can flush it
+ nop // force correct slotting
+ mfpr r31, pt0 // no mbox instructions in 1,2,3,4
+ mfpr r31, pt0 // no mbox instructions in 1,2,3,4
+ mfpr r31, pt0 // no mbox instructions in 1,2,3,4
+ mfpr r31, pt0 // no mbox instructions in 1,2,3,4
+
+ lda r8, 0(r31) // flood the dcache with junk data
+no_pm_change_5_: ldqp r31, 0(r8)
+ lda r8, 0x20(r8) // touch each cache block
+ srl r8, 13, r9
+ blbc r9, no_pm_change_5_
+
+ mb // ensure no outstanding fills
+ mtpr r31, dc_mode // turn the dcache back off
+ nop // force correct slotting
+ mfpr r31, pt0 // no hw_rei_stall in 0,1
+#endif
+
+
+no_pm_change_10_: hw_rei_stall // back we go
+
+ ALIGN_BLOCK
+//+
+// swppal_cont - finish up the swppal call_pal
+//-
+
+swppal_cont:
+ mfpr r2, pt_misc // get misc bits
+ sll r0, pt_misc_v_switch, r0 // get the "I've switched" bit
+ or r2, r0, r2 // set the bit
+ mtpr r31, ev5__alt_mode // ensure alt_mode set to 0 (kernel)
+ mtpr r2, pt_misc // update the chip
+
+ or r3, r31, r4
+ mfpr r3, pt_impure // pass pointer to the impure area in r3
+//orig fix_impure_ipr r3 // adjust impure pointer for ipr read
+//orig restore_reg1 bc_ctl, r1, r3, ipr=1 // pass cns_bc_ctl in r1
+//orig restore_reg1 bc_config, r2, r3, ipr=1 // pass cns_bc_config in r2
+//orig unfix_impure_ipr r3 // restore impure pointer
+ lda r3, CNS_Q_IPR(r3)
+ RESTORE_SHADOW(r1,CNS_Q_BC_CTL,r3);
+ RESTORE_SHADOW(r1,CNS_Q_BC_CFG,r3);
+ lda r3, -CNS_Q_IPR(r3)
+
+ or r31, r31, r0 // set status to success
+// pvc_violate 1007
+ jmp r31, (r4) // and call our friend, it's her problem now
+
+
+swppal_fail:
+ addq r0, 1, r0 // set unknown pal or not loaded
+ hw_rei // and return
+
+
+// .sbttl "Memory management"
+
+ ALIGN_BLOCK
+//+
+//foe_ipte_handler
+// IFOE detected on level 3 pte, sort out FOE vs ACV
+//
+// on entry:
+// with
+// R8 = pte
+// R10 = pc
+//
+// Function
+// Determine TNV vs ACV vs FOE. Build stack and dispatch
+// Will not be here if TNV.
+//-
+
+foe_ipte_handler:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ bis r11, r31, r12 // Save PS for stack write
+ bge r25, foe_ipte_handler_10_ // no stack swap needed if cm=kern
+
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+ srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
+ nop
+
+foe_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+
+ or r10, r31, r14 // Save pc/va in case TBmiss or fault on stack
+ mfpr r13, pt_entmm // get entry point
+
+ stq r16, osfsf_a0(sp) // a0
+ or r14, r31, r16 // pass pc/va as a0
+
+ stq r17, osfsf_a1(sp) // a1
+ nop
+
+ stq r18, osfsf_a2(sp) // a2
+ lda r17, mmcsr_c_acv(r31) // assume ACV
+
+ stq r16, osfsf_pc(sp) // save pc
+ cmovlbs r25, mmcsr_c_foe, r17 // otherwise FOE
+
+ stq r12, osfsf_ps(sp) // save ps
+ subq r31, 1, r18 // pass flag of istream as a2
+
+ stq r29, osfsf_gp(sp)
+ mtpr r13, exc_addr // set vector address
+
+ mfpr r29, pt_kgp // load kgp
+ hw_rei_spe // out to exec
+
+ ALIGN_BLOCK
+//+
+//invalid_ipte_handler
+// TNV detected on level 3 pte, sort out TNV vs ACV
+//
+// on entry:
+// with
+// R8 = pte
+// R10 = pc
+//
+// Function
+// Determine TNV vs ACV. Build stack and dispatch.
+//-
+
+invalid_ipte_handler:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ bis r11, r31, r12 // Save PS for stack write
+ bge r25, invalid_ipte_handler_10_ // no stack swap needed if cm=kern
+
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+ srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
+ nop
+
+invalid_ipte_handler_10_: srl r8, osfpte_v_kre, r25 // get kre to <0>
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+
+ or r10, r31, r14 // Save pc/va in case TBmiss on stack
+ mfpr r13, pt_entmm // get entry point
+
+ stq r16, osfsf_a0(sp) // a0
+ or r14, r31, r16 // pass pc/va as a0
+
+ stq r17, osfsf_a1(sp) // a1
+ nop
+
+ stq r18, osfsf_a2(sp) // a2
+ and r25, 1, r17 // Isolate kre
+
+ stq r16, osfsf_pc(sp) // save pc
+ xor r17, 1, r17 // map to acv/tnv as a1
+
+ stq r12, osfsf_ps(sp) // save ps
+ subq r31, 1, r18 // pass flag of istream as a2
+
+ stq r29, osfsf_gp(sp)
+ mtpr r13, exc_addr // set vector address
+
+ mfpr r29, pt_kgp // load kgp
+ hw_rei_spe // out to exec
+
+
+
+
+ ALIGN_BLOCK
+//+
+//invalid_dpte_handler
+// INVALID detected on level 3 pte, sort out TNV vs ACV
+//
+// on entry:
+// with
+// R10 = va
+// R8 = pte
+// R9 = mm_stat
+// PT6 = pc
+//
+// Function
+// Determine TNV vs ACV. Build stack and dispatch
+//-
+
+
+invalid_dpte_handler:
+ mfpr r12, pt6
+ blbs r12, tnv_in_pal // Special handler if original faulting reference was in PALmode
+
+ bis r12, r31, r14 // save PC in case of tbmiss or fault
+ srl r9, mm_stat_v_opcode, r25 // shift opc to <0>
+
+ mtpr r11, pt0 // Save PS for stack write
+ and r25, mm_stat_m_opcode, r25 // isolate opcode
+
+ cmpeq r25, evx_opc_sync, r25 // is it FETCH/FETCH_M?
+ blbs r25, nmiss_fetch_ldr31_err // yes
+
+ //dismiss exception if load to r31/f31
+ blbs r9, invalid_dpte_no_dismiss // mm_stat<0> set on store or fetchm
+
+ // not a store or fetch, must be a load
+ srl r9, mm_stat_v_ra, r25 // Shift rnum to low bits
+
+ and r25, 0x1F, r25 // isolate rnum
+ nop
+
+ cmpeq r25, 0x1F, r25 // Is the rnum r31 or f31?
+ bne r25, nmiss_fetch_ldr31_err // Yes, dismiss the fault
+
+invalid_dpte_no_dismiss:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ bge r25, invalid_dpte_no_dismiss_10_ // no stack swap needed if cm=kern
+
+ srl r8, osfpte_v_ure-osfpte_v_kre, r8 // move pte user bits to kern
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+invalid_dpte_no_dismiss_10_: srl r8, osfpte_v_kre, r12 // get kre to <0>
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+
+ or r10, r31, r25 // Save va in case TBmiss on stack
+ and r9, 1, r13 // save r/w flag
+
+ stq r16, osfsf_a0(sp) // a0
+ or r25, r31, r16 // pass va as a0
+
+ stq r17, osfsf_a1(sp) // a1
+ or r31, mmcsr_c_acv, r17 // assume acv
+
+ srl r12, osfpte_v_kwe-osfpte_v_kre, r25 // get write enable to <0>
+ stq r29, osfsf_gp(sp)
+
+ stq r18, osfsf_a2(sp) // a2
+ cmovlbs r13, r25, r12 // if write access move acv based on write enable
+
+ or r13, r31, r18 // pass flag of dstream access and read vs write
+ mfpr r25, pt0 // get ps
+
+ stq r14, osfsf_pc(sp) // save pc
+ mfpr r13, pt_entmm // get entry point
+
+ stq r25, osfsf_ps(sp) // save ps
+ mtpr r13, exc_addr // set vector address
+
+ mfpr r29, pt_kgp // load kgp
+ cmovlbs r12, mmcsr_c_tnv, r17 // make p2 be tnv if access ok else acv
+
+ hw_rei_spe // out to exec
+
+//+
+//
+// We come here if we are erring on a dtb_miss, and the instr is a
+// fetch, fetch_m, of load to r31/f31.
+// The PC is incremented, and we return to the program.
+// essentially ignoring the instruction and error.
+//
+//-
+ ALIGN_BLOCK
+nmiss_fetch_ldr31_err:
+ mfpr r12, pt6
+ addq r12, 4, r12 // bump pc to pc+4
+
+ mtpr r12, exc_addr // and set entry point
+ mfpr r31, pt0 // pad exc_addr write
+
+ hw_rei //
+
+ ALIGN_BLOCK
+//+
+// double_pte_inv
+// We had a single tbmiss which turned into a double tbmiss which found
+// an invalid PTE. Return to single miss with a fake pte, and the invalid
+// single miss flow will report the error.
+//
+// on entry:
+// r21 PTE
+// r22 available
+// VA IPR locked with original fault VA
+// pt4 saved r21
+// pt5 saved r22
+// pt6 original exc_addr
+//
+// on return to tbmiss flow:
+// r8 fake PTE
+//
+//
+//-
+double_pte_inv:
+ srl r21, osfpte_v_kre, r21 // get the kre bit to <0>
+ mfpr r22, exc_addr // get the pc
+
+ lda r22, 4(r22) // inc the pc
+ lda r8, osfpte_m_prot(r31) // make a fake pte with xre and xwe set
+
+ cmovlbc r21, r31, r8 // set to all 0 for acv if pte<kre> is 0
+ mtpr r22, exc_addr // set for rei
+
+ mfpr r21, pt4 // restore regs
+ mfpr r22, pt5 // restore regs
+
+ hw_rei // back to tb miss
+
+ ALIGN_BLOCK
+//+
+//tnv_in_pal
+// The only places in pal that ld or store are the
+// stack builders, rti or retsys. Any of these mean we
+// need to take a ksp not valid halt.
+//
+//-
+tnv_in_pal:
+
+
+ br r31, ksp_inval_halt
+
+
+// .sbttl "Icache flush routines"
+
+ ALIGN_BLOCK
+//+
+// Common Icache flush routine.
+//
+//
+//-
+pal_ic_flush:
+ nop
+ mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
+ nop
+ nop
+
+// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 10
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 20
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 30
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 40
+
+ nop
+ nop
+
+one_cycle_and_hw_rei:
+ nop
+ nop
+
+ hw_rei_stall
+
+#if icflush_on_tbix != 0
+
+
+ ALIGN_BLOCK
+
+//+
+// Common Icache flush and ITB invalidate single routine.
+// ITBIS and hw_rei_stall must be in same octaword.
+// r17 - has address to invalidate
+//
+//-
+PAL_IC_FLUSH_AND_TBISI:
+ nop
+ mtpr r31, ev5__ic_flush_ctl // Icache flush - E1
+ nop
+ nop
+
+// Now, do 44 NOPs. 3RFB prefetches (24) + IC buffer,IB,slot,issue (20)
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 10
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 20
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 30
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop // 40
+
+
+ nop
+ nop
+
+ nop
+ nop
+
+ // A quadword is 64 bits, so an octaword is 128 bits -> 16 bytes -> 4 instructions
+ // 44 nops plus 4 instructions before it is 48 instructions.
+ // Since this routine started on a 32-byte (8 instruction) boundary,
+ // the following 2 instructions will be in the same octword as required.
+// ALIGN_BRANCH
+ mtpr r17, ev5__itb_is // Flush ITB
+ hw_rei_stall
+
+#endif
+
+ ALIGN_BLOCK
+//+
+//osfpal_calpal_opcdec
+// Here for all opcdec CALL_PALs
+//
+// Build stack frame
+// a0 <- code
+// a1 <- unpred
+// a2 <- unpred
+// vector via entIF
+//
+//-
+
+osfpal_calpal_opcdec:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r14, exc_addr // get pc
+ nop
+
+ bis r11, r31, r12 // Save PS for stack write
+ bge r25, osfpal_calpal_opcdec_10_ // no stack swap needed if cm=kern
+
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+osfpal_calpal_opcdec_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ nop
+
+ stq r16, osfsf_a0(sp) // save regs
+ bis r31, osf_a0_opdec, r16 // set a0
+
+ stq r18, osfsf_a2(sp) // a2
+ mfpr r13, pt_entif // get entry point
+
+ stq r12, osfsf_ps(sp) // save old ps
+ stq r17, osfsf_a1(sp) // a1
+
+ stq r14, osfsf_pc(sp) // save pc
+ nop
+
+ stq r29, osfsf_gp(sp) // save gp
+ mtpr r13, exc_addr // load exc_addr with entIF
+ // 1 cycle to hw_rei
+
+ mfpr r29, pt_kgp // get the kgp
+
+
+ hw_rei_spe // done
+
+
+
+
+
+//+
+//pal_update_pcb
+// Update the PCB with the current SP, AST, and CC info
+//
+// r0 - return linkage
+//-
+ ALIGN_BLOCK
+
+pal_update_pcb:
+ mfpr r12, pt_pcbb // get pcbb
+ and r11, osfps_m_mode, r25 // get mode
+ beq r25, pal_update_pcb_10_ // in kern? no need to update user sp
+ mtpr r30, pt_usp // save user stack
+ stqp r30, osfpcb_q_usp(r12) // store usp
+ br r31, pal_update_pcb_20_ // join common
+pal_update_pcb_10_: stqp r30, osfpcb_q_ksp(r12) // store ksp
+pal_update_pcb_20_: rpcc r13 // get cyccounter
+ srl r13, 32, r14 // move offset
+ addl r13, r14, r14 // merge for new time
+ stlp r14, osfpcb_l_cc(r12) // save time
+
+//orig pvc_jsr updpcb, bsr=1, dest=1
+ ret r31, (r0)
+
+
+
+#if remove_save_state == 0
+
+// .sbttl "PAL_SAVE_STATE"
+//+
+//
+// Pal_save_state
+//
+// Function
+// All chip state saved, all PT's, SR's FR's, IPR's
+//
+//
+// Regs' on entry...
+//
+// R0 = halt code
+// pt0 = r0
+// R1 = pointer to impure
+// pt4 = r1
+// R3 = return addr
+// pt5 = r3
+//
+// register usage:
+// r0 = halt_code
+// r1 = addr of impure area
+// r3 = return_address
+// r4 = scratch
+//
+//-
+
+
+ ALIGN_BLOCK
+ .globl pal_save_state
+pal_save_state:
+//
+//
+// start of implementation independent save routine
+//
+// the impure area is larger than the addressibility of hw_ld and hw_st
+// therefore, we need to play some games: The impure area
+// is informally divided into the "machine independent" part and the
+// "machine dependent" part. The state that will be saved in the
+// "machine independent" part are gpr's, fpr's, hlt, flag, mchkflag (use (un)fix_impure_gpr macros).
+// All others will be in the "machine dependent" part (use (un)fix_impure_ipr macros).
+// The impure pointer will need to be adjusted by a different offset for each. The store/restore_reg
+// macros will automagically adjust the offset correctly.
+//
+
+// The distributed code is commented out and followed by corresponding SRC code.
+// Beware: SAVE_IPR and RESTORE_IPR blow away r0(v0)
+
+//orig fix_impure_gpr r1 // adjust impure area pointer for stores to "gpr" part of impure area
+ lda r1, 0x200(r1) // Point to center of CPU segment
+//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area flag
+ SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the valid flag
+//orig store_reg1 hlt, r0, r1, ipr=1
+ SAVE_GPR(r0,CNS_Q_HALT,r1) // Save the halt code
+
+ mfpr r0, pt0 // get r0 back //orig
+//orig store_reg1 0, r0, r1 // save r0
+ SAVE_GPR(r0,CNS_Q_GPR+0x00,r1) // Save r0
+
+ mfpr r0, pt4 // get r1 back //orig
+//orig store_reg1 1, r0, r1 // save r1
+ SAVE_GPR(r0,CNS_Q_GPR+0x08,r1) // Save r1
+
+//orig store_reg 2 // save r2
+ SAVE_GPR(r2,CNS_Q_GPR+0x10,r1) // Save r2
+
+ mfpr r0, pt5 // get r3 back //orig
+//orig store_reg1 3, r0, r1 // save r3
+ SAVE_GPR(r0,CNS_Q_GPR+0x18,r1) // Save r3
+
+ // reason code has been saved
+ // r0 has been saved
+ // r1 has been saved
+ // r2 has been saved
+ // r3 has been saved
+ // pt0, pt4, pt5 have been lost
+
+ //
+ // Get out of shadow mode
+ //
+
+ mfpr r2, icsr // Get icsr //orig
+//orig ldah r0, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
+ ldah r0, (1<<(icsr_v_sde-16))(r31)
+ bic r2, r0, r0 // ICSR with SDE clear //orig
+ mtpr r0, icsr // Turn off SDE //orig
+
+ mfpr r31, pt0 // SDE bubble cycle 1 //orig
+ mfpr r31, pt0 // SDE bubble cycle 2 //orig
+ mfpr r31, pt0 // SDE bubble cycle 3 //orig
+ nop //orig
+
+
+ // save integer regs R4-r31
+//orig #define t 4
+//orig .repeat 28
+//orig store_reg \t
+//orig #define t t + 1
+//orig .endr
+ SAVE_GPR(r4,CNS_Q_GPR+0x20,r1)
+ SAVE_GPR(r5,CNS_Q_GPR+0x28,r1)
+ SAVE_GPR(r6,CNS_Q_GPR+0x30,r1)
+ SAVE_GPR(r7,CNS_Q_GPR+0x38,r1)
+ SAVE_GPR(r8,CNS_Q_GPR+0x40,r1)
+ SAVE_GPR(r9,CNS_Q_GPR+0x48,r1)
+ SAVE_GPR(r10,CNS_Q_GPR+0x50,r1)
+ SAVE_GPR(r11,CNS_Q_GPR+0x58,r1)
+ SAVE_GPR(r12,CNS_Q_GPR+0x60,r1)
+ SAVE_GPR(r13,CNS_Q_GPR+0x68,r1)
+ SAVE_GPR(r14,CNS_Q_GPR+0x70,r1)
+ SAVE_GPR(r15,CNS_Q_GPR+0x78,r1)
+ SAVE_GPR(r16,CNS_Q_GPR+0x80,r1)
+ SAVE_GPR(r17,CNS_Q_GPR+0x88,r1)
+ SAVE_GPR(r18,CNS_Q_GPR+0x90,r1)
+ SAVE_GPR(r19,CNS_Q_GPR+0x98,r1)
+ SAVE_GPR(r20,CNS_Q_GPR+0xA0,r1)
+ SAVE_GPR(r21,CNS_Q_GPR+0xA8,r1)
+ SAVE_GPR(r22,CNS_Q_GPR+0xB0,r1)
+ SAVE_GPR(r23,CNS_Q_GPR+0xB8,r1)
+ SAVE_GPR(r24,CNS_Q_GPR+0xC0,r1)
+ SAVE_GPR(r25,CNS_Q_GPR+0xC8,r1)
+ SAVE_GPR(r26,CNS_Q_GPR+0xD0,r1)
+ SAVE_GPR(r27,CNS_Q_GPR+0xD8,r1)
+ SAVE_GPR(r28,CNS_Q_GPR+0xE0,r1)
+ SAVE_GPR(r29,CNS_Q_GPR+0xE8,r1)
+ SAVE_GPR(r30,CNS_Q_GPR+0xF0,r1)
+ SAVE_GPR(r31,CNS_Q_GPR+0xF8,r1)
+
+ // save all paltemp regs except pt0
+
+//orig unfix_impure_gpr r1 // adjust impure area pointer for gpr stores
+//orig fix_impure_ipr r1 // adjust impure area pointer for pt stores
+//orig #define t 1
+//orig .repeat 23
+//orig store_reg \t , pal=1
+//orig #define t t + 1
+//orig .endr
+
+ lda r1, -0x200(r1) // Restore the impure base address.
+ lda r1, CNS_Q_IPR(r1) // Point to the base of IPR area.
+ SAVE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
+ SAVE_IPR(pt1,CNS_Q_PT+0x08,r1)
+ SAVE_IPR(pt2,CNS_Q_PT+0x10,r1)
+ SAVE_IPR(pt3,CNS_Q_PT+0x18,r1)
+ SAVE_IPR(pt4,CNS_Q_PT+0x20,r1)
+ SAVE_IPR(pt5,CNS_Q_PT+0x28,r1)
+ SAVE_IPR(pt6,CNS_Q_PT+0x30,r1)
+ SAVE_IPR(pt7,CNS_Q_PT+0x38,r1)
+ SAVE_IPR(pt8,CNS_Q_PT+0x40,r1)
+ SAVE_IPR(pt9,CNS_Q_PT+0x48,r1)
+ SAVE_IPR(pt10,CNS_Q_PT+0x50,r1)
+ SAVE_IPR(pt11,CNS_Q_PT+0x58,r1)
+ SAVE_IPR(pt12,CNS_Q_PT+0x60,r1)
+ SAVE_IPR(pt13,CNS_Q_PT+0x68,r1)
+ SAVE_IPR(pt14,CNS_Q_PT+0x70,r1)
+ SAVE_IPR(pt15,CNS_Q_PT+0x78,r1)
+ SAVE_IPR(pt16,CNS_Q_PT+0x80,r1)
+ SAVE_IPR(pt17,CNS_Q_PT+0x88,r1)
+ SAVE_IPR(pt18,CNS_Q_PT+0x90,r1)
+ SAVE_IPR(pt19,CNS_Q_PT+0x98,r1)
+ SAVE_IPR(pt20,CNS_Q_PT+0xA0,r1)
+ SAVE_IPR(pt21,CNS_Q_PT+0xA8,r1)
+ SAVE_IPR(pt22,CNS_Q_PT+0xB0,r1)
+ SAVE_IPR(pt23,CNS_Q_PT+0xB8,r1)
+
+ // Restore shadow mode
+ mfpr r31, pt0 // pad write to icsr out of shadow of store (trap does not abort write) //orig
+ mfpr r31, pt0 //orig
+ mtpr r2, icsr // Restore original ICSR //orig
+
+ mfpr r31, pt0 // SDE bubble cycle 1 //orig
+ mfpr r31, pt0 // SDE bubble cycle 2 //orig
+ mfpr r31, pt0 // SDE bubble cycle 3 //orig
+ nop //orig
+
+ // save all integer shadow regs
+
+//orig #define t 8
+//orig .repeat 7
+//orig store_reg \t, shadow=1
+//orig #define t t + 1
+//orig .endr
+//orig store_reg 25, shadow=1
+
+ SAVE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
+ SAVE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
+ SAVE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
+ SAVE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
+ SAVE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
+ SAVE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
+ SAVE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
+ SAVE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
+
+//orig store_reg exc_addr, ipr=1 // save ipr
+//orig store_reg pal_base, ipr=1 // save ipr
+//orig store_reg mm_stat, ipr=1 // save ipr
+//orig store_reg va, ipr=1 // save ipr
+//orig store_reg icsr, ipr=1 // save ipr
+//orig store_reg ipl, ipr=1 // save ipr
+//orig store_reg ps, ipr=1 // save ipr
+//orig store_reg itb_asn, ipr=1 // save ipr
+//orig store_reg aster, ipr=1 // save ipr
+//orig store_reg astrr, ipr=1 // save ipr
+//orig store_reg sirr, ipr=1 // save ipr
+//orig store_reg isr, ipr=1 // save ipr
+//orig store_reg ivptbr, ipr=1 // save ipr
+//orig store_reg mcsr, ipr=1 // save ipr
+//orig store_reg dc_mode, ipr=1 // save ipr
+
+ SAVE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
+ SAVE_IPR(palBase,CNS_Q_PAL_BASE,r1)
+ SAVE_IPR(mmStat,CNS_Q_MM_STAT,r1)
+ SAVE_IPR(va,CNS_Q_VA,r1)
+ SAVE_IPR(icsr,CNS_Q_ICSR,r1)
+ SAVE_IPR(ipl,CNS_Q_IPL,r1)
+ SAVE_IPR(ips,CNS_Q_IPS,r1)
+ SAVE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
+ SAVE_IPR(aster,CNS_Q_ASTER,r1)
+ SAVE_IPR(astrr,CNS_Q_ASTRR,r1)
+ SAVE_IPR(sirr,CNS_Q_SIRR,r1)
+ SAVE_IPR(isr,CNS_Q_ISR,r1)
+ SAVE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
+ SAVE_IPR(mcsr,CNS_Q_MCSR,r1)
+ SAVE_IPR(dcMode,CNS_Q_DC_MODE,r1)
+
+//orig pvc_violate 379 // mf maf_mode after a store ok (pvc doesn't distinguish ld from st)
+//orig store_reg maf_mode, ipr=1 // save ipr -- no mbox instructions for
+//orig // PVC violation applies only to
+pvc$osf35$379: // loads. HW_ST ok here, so ignore
+ SAVE_IPR(mafMode,CNS_Q_MAF_MODE,r1) // MBOX INST->MF MAF_MODE IN 0,1,2
+
+
+ //the following iprs are informational only -- will not be restored
+
+//orig store_reg icperr_stat, ipr=1
+//orig store_reg pmctr, ipr=1
+//orig store_reg intid, ipr=1
+//orig store_reg exc_sum, ipr=1
+//orig store_reg exc_mask, ipr=1
+//orig ldah r14, 0xfff0(r31)
+//orig zap r14, 0xE0, r14 // Get Cbox IPR base
+//orig nop // pad mf dcperr_stat out of shadow of last store
+//orig nop
+//orig nop
+//orig store_reg dcperr_stat, ipr=1
+
+ SAVE_IPR(icPerr,CNS_Q_ICPERR_STAT,r1)
+ SAVE_IPR(PmCtr,CNS_Q_PM_CTR,r1)
+ SAVE_IPR(intId,CNS_Q_INT_ID,r1)
+ SAVE_IPR(excSum,CNS_Q_EXC_SUM,r1)
+ SAVE_IPR(excMask,CNS_Q_EXC_MASK,r1)
+ ldah r14, 0xFFF0(zero)
+ zap r14, 0xE0, r14 // Get base address of CBOX IPRs
+ NOP // Pad mfpr dcPerr out of shadow of
+ NOP // last store
+ NOP
+ SAVE_IPR(dcPerr,CNS_Q_DCPERR_STAT,r1)
+
+ // read cbox ipr state
+
+//orig mb
+//orig ldqp r2, ev5__sc_ctl(r14)
+//orig ldqp r13, ld_lock(r14)
+//orig ldqp r4, ev5__sc_addr(r14)
+//orig ldqp r5, ev5__ei_addr(r14)
+//orig ldqp r6, ev5__bc_tag_addr(r14)
+//orig ldqp r7, ev5__fill_syn(r14)
+//orig bis r5, r4, r31
+//orig bis r7, r6, r31 // make sure previous loads finish before reading stat registers which unlock them
+//orig ldqp r8, ev5__sc_stat(r14) // unlocks sc_stat,sc_addr
+//orig ldqp r9, ev5__ei_stat(r14) // may unlock ei_*, bc_tag_addr, fill_syn
+//orig ldqp r31, ev5__ei_stat(r14) // ensures it is really unlocked
+//orig mb
+
+#ifndef SIMOS
+ mb
+ ldq_p r2, scCtl(r14)
+ ldq_p r13, ldLock(r14)
+ ldq_p r4, scAddr(r14)
+ ldq_p r5, eiAddr(r14)
+ ldq_p r6, bcTagAddr(r14)
+ ldq_p r7, fillSyn(r14)
+ bis r5, r4, zero // Make sure all loads complete before
+ bis r7, r6, zero // reading registers that unlock them.
+ ldq_p r8, scStat(r14) // Unlocks scAddr.
+ ldq_p r9, eiStat(r14) // Unlocks eiAddr, bcTagAddr, fillSyn.
+ ldq_p zero, eiStat(r14) // Make sure it is really unlocked.
+ mb
+#endif
+//orig // save cbox ipr state
+//orig store_reg1 sc_ctl, r2, r1, ipr=1
+//orig store_reg1 ld_lock, r13, r1, ipr=1
+//orig store_reg1 sc_addr, r4, r1, ipr=1
+//orig store_reg1 ei_addr, r5, r1, ipr=1
+//orig store_reg1 bc_tag_addr, r6, r1, ipr=1
+//orig store_reg1 fill_syn, r7, r1, ipr=1
+//orig store_reg1 sc_stat, r8, r1, ipr=1
+//orig store_reg1 ei_stat, r9, r1, ipr=1
+//orig //bc_config? sl_rcv?
+
+ SAVE_SHADOW(r2,CNS_Q_SC_CTL,r1);
+ SAVE_SHADOW(r13,CNS_Q_LD_LOCK,r1);
+ SAVE_SHADOW(r4,CNS_Q_SC_ADDR,r1);
+ SAVE_SHADOW(r5,CNS_Q_EI_ADDR,r1);
+ SAVE_SHADOW(r6,CNS_Q_BC_TAG_ADDR,r1);
+ SAVE_SHADOW(r7,CNS_Q_FILL_SYN,r1);
+ SAVE_SHADOW(r8,CNS_Q_SC_STAT,r1);
+ SAVE_SHADOW(r9,CNS_Q_EI_STAT,r1);
+
+// restore impure base //orig
+//orig unfix_impure_ipr r1
+ lda r1, -CNS_Q_IPR(r1)
+
+// save all floating regs //orig
+ mfpr r0, icsr // get icsr //orig
+ or r31, 1, r2 // get a one //orig
+//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot //orig
+ sll r2, icsr_v_fpe, r2 // Shift it into ICSR<FPE> position
+ or r2, r0, r0 // set FEN on //orig
+ mtpr r0, icsr // write to icsr, enabling FEN //orig
+
+// map the save area virtually
+// orig mtpr r31, dtb_ia // clear the dtb
+// orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
+// orig sll r0, 32, r0 // shift to PFN field
+// orig lda r2, 0xff(r31) // all read enable and write enable bits set
+// orig sll r2, 8, r2 // move to PTE location
+// orig addq r0, r2, r0 // combine with PFN
+// orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
+// orig mtpr r1, dtb_tag // write TB tag
+
+ mtpr r31, dtbIa // Clear all DTB entries
+ srl r1, va_s_off, r0 // Clean off byte-within-page offset
+ sll r0, pte_v_pfn, r0 // Shift to form PFN
+ lda r0, pte_m_prot(r0) // Set all read/write enable bits
+ mtpr r0, dtbPte // Load the PTE and set valid
+ mtpr r1, dtbTag // Write the PTE and tag into the DTB
+
+
+//orig // map the next page too - in case the impure area crosses a page boundary
+//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
+//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
+//orig sll r0, 32, r0 // shift to PFN field
+//orig lda r2, 0xff(r31) // all read enable and write enable bits set
+//orig sll r2, 8, r2 // move to PTE location
+//orig addq r0, r2, r0 // combine with PFN
+//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
+//orig mtpr r4, dtb_tag // write TB tag
+
+ lda r4, (1<<va_s_off)(r1) // Generate address for next page
+ srl r4, va_s_off, r0 // Clean off byte-within-page offset
+ sll r0, pte_v_pfn, r0 // Shift to form PFN
+ lda r0, pte_m_prot(r0) // Set all read/write enable bits
+ mtpr r0, dtbPte // Load the PTE and set valid
+ mtpr r4, dtbTag // Write the PTE and tag into the DTB
+
+ sll r31, 0, r31 // stall cycle 1 // orig
+ sll r31, 0, r31 // stall cycle 2 // orig
+ sll r31, 0, r31 // stall cycle 3 // orig
+ nop // orig
+
+//orig // add offset for saving fpr regs
+//orig fix_impure_gpr r1
+
+ lda r1, 0x200(r1) // Point to center of CPU segment
+
+// now save the regs - F0-F31
+
+//orig #define t 0
+//orig .repeat 32
+//orig store_reg \t , fpu=1
+//orig #define t t + 1
+//orig .endr
+
+ mf_fpcr f0 // original
+
+ SAVE_FPR(f0,CNS_Q_FPR+0x00,r1)
+ SAVE_FPR(f1,CNS_Q_FPR+0x08,r1)
+ SAVE_FPR(f2,CNS_Q_FPR+0x10,r1)
+ SAVE_FPR(f3,CNS_Q_FPR+0x18,r1)
+ SAVE_FPR(f4,CNS_Q_FPR+0x20,r1)
+ SAVE_FPR(f5,CNS_Q_FPR+0x28,r1)
+ SAVE_FPR(f6,CNS_Q_FPR+0x30,r1)
+ SAVE_FPR(f7,CNS_Q_FPR+0x38,r1)
+ SAVE_FPR(f8,CNS_Q_FPR+0x40,r1)
+ SAVE_FPR(f9,CNS_Q_FPR+0x48,r1)
+ SAVE_FPR(f10,CNS_Q_FPR+0x50,r1)
+ SAVE_FPR(f11,CNS_Q_FPR+0x58,r1)
+ SAVE_FPR(f12,CNS_Q_FPR+0x60,r1)
+ SAVE_FPR(f13,CNS_Q_FPR+0x68,r1)
+ SAVE_FPR(f14,CNS_Q_FPR+0x70,r1)
+ SAVE_FPR(f15,CNS_Q_FPR+0x78,r1)
+ SAVE_FPR(f16,CNS_Q_FPR+0x80,r1)
+ SAVE_FPR(f17,CNS_Q_FPR+0x88,r1)
+ SAVE_FPR(f18,CNS_Q_FPR+0x90,r1)
+ SAVE_FPR(f19,CNS_Q_FPR+0x98,r1)
+ SAVE_FPR(f20,CNS_Q_FPR+0xA0,r1)
+ SAVE_FPR(f21,CNS_Q_FPR+0xA8,r1)
+ SAVE_FPR(f22,CNS_Q_FPR+0xB0,r1)
+ SAVE_FPR(f23,CNS_Q_FPR+0xB8,r1)
+ SAVE_FPR(f24,CNS_Q_FPR+0xC0,r1)
+ SAVE_FPR(f25,CNS_Q_FPR+0xC8,r1)
+ SAVE_FPR(f26,CNS_Q_FPR+0xD0,r1)
+ SAVE_FPR(f27,CNS_Q_FPR+0xD8,r1)
+ SAVE_FPR(f28,CNS_Q_FPR+0xE0,r1)
+ SAVE_FPR(f29,CNS_Q_FPR+0xE8,r1)
+ SAVE_FPR(f30,CNS_Q_FPR+0xF0,r1)
+ SAVE_FPR(f31,CNS_Q_FPR+0xF8,r1)
+
+//orig //switch impure offset from gpr to ipr---
+//orig unfix_impure_gpr r1
+//orig fix_impure_ipr r1
+//orig store_reg1 fpcsr, f0, r1, fpcsr=1
+
+ SAVE_FPR(f0,CNS_Q_FPCSR,r1) // fpcsr loaded above into f0 -- can it reach// pb
+ lda r1, -0x200(r1) // Restore the impure base address
+
+//orig // and back to gpr ---
+//orig unfix_impure_ipr r1
+//orig fix_impure_gpr r1
+
+//orig lda r0, cns_mchksize(r31) // get size of mchk area
+//orig store_reg1 mchkflag, r0, r1, ipr=1
+//orig mb
+
+ lda r1, CNS_Q_IPR(r1) // Point to base of IPR area again
+ // save this using the IPR base (it is closer) not the GRP base as they used...pb
+ lda r0, MACHINE_CHECK_SIZE(r31) // get size of mchk area
+ SAVE_SHADOW(r0,CNS_Q_MCHK,r1);
+ mb
+
+//orig or r31, 1, r0 // get a one
+//orig store_reg1 flag, r0, r1, ipr=1 // set dump area flag
+//orig mb
+
+ lda r1, -CNS_Q_IPR(r1) // back to the base
+ lda r1, 0x200(r1) // Point to center of CPU segment
+ or r31, 1, r0 // get a one
+ SAVE_GPR(r0,CNS_Q_FLAG,r1) // // set dump area valid flag
+ mb
+
+//orig // restore impure area base
+//orig unfix_impure_gpr r1
+ lda r1, -0x200(r1) // Point to center of CPU segment
+
+ mtpr r31, dtb_ia // clear the dtb //orig
+ mtpr r31, itb_ia // clear the itb //orig
+
+//orig pvc_jsr savsta, bsr=1, dest=1
+ ret r31, (r3) // and back we go
+#endif
+
+
+#if remove_restore_state == 0
+
+
+// .sbttl "PAL_RESTORE_STATE"
+//+
+//
+// Pal_restore_state
+//
+//
+// register usage:
+// r1 = addr of impure area
+// r3 = return_address
+// all other regs are scratchable, as they are about to
+// be reloaded from ram.
+//
+// Function:
+// All chip state restored, all SRs, FRs, PTs, IPRs
+// *** except R1, R3, PT0, PT4, PT5 ***
+//
+//-
+ ALIGN_BLOCK
+pal_restore_state:
+
+//need to restore sc_ctl,bc_ctl,bc_config??? if so, need to figure out a safe way to do so.
+
+//orig // map the console io area virtually
+//orig mtpr r31, dtb_ia // clear the dtb
+//orig srl r1, page_offset_size_bits, r0 // Clean off low bits of VA
+//orig sll r0, 32, r0 // shift to PFN field
+//orig lda r2, 0xff(r31) // all read enable and write enable bits set
+//orig sll r2, 8, r2 // move to PTE location
+//orig addq r0, r2, r0 // combine with PFN
+//orig
+//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
+//orig mtpr r1, dtb_tag // write TB tag
+//orig
+
+ mtpr r31, dtbIa // Clear all DTB entries
+ srl r1, va_s_off, r0 // Clean off byte-within-page offset
+ sll r0, pte_v_pfn, r0 // Shift to form PFN
+ lda r0, pte_m_prot(r0) // Set all read/write enable bits
+ mtpr r0, dtbPte // Load the PTE and set valid
+ mtpr r1, dtbTag // Write the PTE and tag into the DTB
+
+
+//orig // map the next page too, in case impure area crosses page boundary
+//orig lda r4, 1@page_offset_size_bits(r1) // generate address for next page
+//orig srl r4, page_offset_size_bits, r0 // Clean off low bits of VA
+//orig sll r0, 32, r0 // shift to PFN field
+//orig lda r2, 0xff(r31) // all read enable and write enable bits set
+//orig sll r2, 8, r2 // move to PTE location
+//orig addq r0, r2, r0 // combine with PFN
+//orig
+//orig mtpr r0, dtb_pte // Load PTE and set TB valid bit
+//orig mtpr r4, dtb_tag // write TB tag - no virtual mbox instruction for 3 cycles
+
+ lda r4, (1<<VA_S_OFF)(r1) // Generate address for next page
+ srl r4, va_s_off, r0 // Clean off byte-within-page offset
+ sll r0, pte_v_pfn, r0 // Shift to form PFN
+ lda r0, pte_m_prot(r0) // Set all read/write enable bits
+ mtpr r0, dtbPte // Load the PTE and set valid
+ mtpr r4, dtbTag // Write the PTE and tag into the DTB
+
+//orig // save all floating regs
+//orig mfpr r0, icsr // get icsr
+//orig// assume ICSR_V_SDE gt <ICSR_V_FPE> // assertion checker
+//orig or r31, <<1@<ICSR_V_SDE-ICSR_V_FPE>> ! 1>, r2 // set SDE and FPE
+//orig sll r2, #icsr_v_fpe, r2 // shift for fpu spot
+//orig or r2, r0, r0 // set FEN on
+//orig mtpr r0, icsr // write to icsr, enabling FEN and SDE. 3 bubbles to floating instr.
+
+ mfpr r0, icsr // Get current ICSR
+ bis zero, 1, r2 // Get a '1'
+ or r2, (1<<(icsr_v_sde-icsr_v_fpe)), r2
+ sll r2, icsr_v_fpe, r2 // Shift bits into position
+ bis r2, r2, r0 // Set ICSR<SDE> and ICSR<FPE>
+ mtpr r0, icsr // Update the chip
+
+ mfpr r31, pt0 // FPE bubble cycle 1 //orig
+ mfpr r31, pt0 // FPE bubble cycle 2 //orig
+ mfpr r31, pt0 // FPE bubble cycle 3 //orig
+
+//orig fix_impure_ipr r1
+//orig restore_reg1 fpcsr, f0, r1, fpcsr=1
+//orig mt_fpcr f0
+//orig
+//orig unfix_impure_ipr r1
+//orig fix_impure_gpr r1 // adjust impure pointer offset for gpr access
+//orig
+//orig // restore all floating regs
+//orig#define t 0
+//orig .repeat 32
+//orig restore_reg \t , fpu=1
+//orig#define t t + 1
+//orig .endr
+
+ lda r1, 200(r1) // Point to base of IPR area again
+ RESTORE_FPR(f0,CNS_Q_FPCSR,r1) // can it reach?? pb
+ mt_fpcr f0 // original
+
+ lda r1, 0x200(r1) // point to center of CPU segment
+ RESTORE_FPR(f0,CNS_Q_FPR+0x00,r1)
+ RESTORE_FPR(f1,CNS_Q_FPR+0x08,r1)
+ RESTORE_FPR(f2,CNS_Q_FPR+0x10,r1)
+ RESTORE_FPR(f3,CNS_Q_FPR+0x18,r1)
+ RESTORE_FPR(f4,CNS_Q_FPR+0x20,r1)
+ RESTORE_FPR(f5,CNS_Q_FPR+0x28,r1)
+ RESTORE_FPR(f6,CNS_Q_FPR+0x30,r1)
+ RESTORE_FPR(f7,CNS_Q_FPR+0x38,r1)
+ RESTORE_FPR(f8,CNS_Q_FPR+0x40,r1)
+ RESTORE_FPR(f9,CNS_Q_FPR+0x48,r1)
+ RESTORE_FPR(f10,CNS_Q_FPR+0x50,r1)
+ RESTORE_FPR(f11,CNS_Q_FPR+0x58,r1)
+ RESTORE_FPR(f12,CNS_Q_FPR+0x60,r1)
+ RESTORE_FPR(f13,CNS_Q_FPR+0x68,r1)
+ RESTORE_FPR(f14,CNS_Q_FPR+0x70,r1)
+ RESTORE_FPR(f15,CNS_Q_FPR+0x78,r1)
+ RESTORE_FPR(f16,CNS_Q_FPR+0x80,r1)
+ RESTORE_FPR(f17,CNS_Q_FPR+0x88,r1)
+ RESTORE_FPR(f18,CNS_Q_FPR+0x90,r1)
+ RESTORE_FPR(f19,CNS_Q_FPR+0x98,r1)
+ RESTORE_FPR(f20,CNS_Q_FPR+0xA0,r1)
+ RESTORE_FPR(f21,CNS_Q_FPR+0xA8,r1)
+ RESTORE_FPR(f22,CNS_Q_FPR+0xB0,r1)
+ RESTORE_FPR(f23,CNS_Q_FPR+0xB8,r1)
+ RESTORE_FPR(f24,CNS_Q_FPR+0xC0,r1)
+ RESTORE_FPR(f25,CNS_Q_FPR+0xC8,r1)
+ RESTORE_FPR(f26,CNS_Q_FPR+0xD0,r1)
+ RESTORE_FPR(f27,CNS_Q_FPR+0xD8,r1)
+ RESTORE_FPR(f28,CNS_Q_FPR+0xE0,r1)
+ RESTORE_FPR(f29,CNS_Q_FPR+0xE8,r1)
+ RESTORE_FPR(f30,CNS_Q_FPR+0xF0,r1)
+ RESTORE_FPR(f31,CNS_Q_FPR+0xF8,r1)
+
+//orig // switch impure pointer from gpr to ipr area --
+//orig unfix_impure_gpr r1
+//orig fix_impure_ipr r1
+//orig
+//orig // restore all pal regs
+//orig#define t 1
+//orig .repeat 23
+//orig restore_reg \t , pal=1
+//orig#define t t + 1
+//orig .endr
+
+ lda r1, -0x200(r1) // Restore base address of impure area.
+ lda r1, CNS_Q_IPR(r1) // Point to base of IPR area.
+ RESTORE_IPR(pt0,CNS_Q_PT+0x00,r1) // the osf code didn't save/restore palTemp 0 ?? pboyle
+ RESTORE_IPR(pt1,CNS_Q_PT+0x08,r1)
+ RESTORE_IPR(pt2,CNS_Q_PT+0x10,r1)
+ RESTORE_IPR(pt3,CNS_Q_PT+0x18,r1)
+ RESTORE_IPR(pt4,CNS_Q_PT+0x20,r1)
+ RESTORE_IPR(pt5,CNS_Q_PT+0x28,r1)
+ RESTORE_IPR(pt6,CNS_Q_PT+0x30,r1)
+ RESTORE_IPR(pt7,CNS_Q_PT+0x38,r1)
+ RESTORE_IPR(pt8,CNS_Q_PT+0x40,r1)
+ RESTORE_IPR(pt9,CNS_Q_PT+0x48,r1)
+ RESTORE_IPR(pt10,CNS_Q_PT+0x50,r1)
+ RESTORE_IPR(pt11,CNS_Q_PT+0x58,r1)
+ RESTORE_IPR(pt12,CNS_Q_PT+0x60,r1)
+ RESTORE_IPR(pt13,CNS_Q_PT+0x68,r1)
+ RESTORE_IPR(pt14,CNS_Q_PT+0x70,r1)
+ RESTORE_IPR(pt15,CNS_Q_PT+0x78,r1)
+ RESTORE_IPR(pt16,CNS_Q_PT+0x80,r1)
+ RESTORE_IPR(pt17,CNS_Q_PT+0x88,r1)
+ RESTORE_IPR(pt18,CNS_Q_PT+0x90,r1)
+ RESTORE_IPR(pt19,CNS_Q_PT+0x98,r1)
+ RESTORE_IPR(pt20,CNS_Q_PT+0xA0,r1)
+ RESTORE_IPR(pt21,CNS_Q_PT+0xA8,r1)
+ RESTORE_IPR(pt22,CNS_Q_PT+0xB0,r1)
+ RESTORE_IPR(pt23,CNS_Q_PT+0xB8,r1)
+
+
+//orig restore_reg exc_addr, ipr=1 // restore ipr
+//orig restore_reg pal_base, ipr=1 // restore ipr
+//orig restore_reg ipl, ipr=1 // restore ipr
+//orig restore_reg ps, ipr=1 // restore ipr
+//orig mtpr r0, dtb_cm // set current mode in mbox too
+//orig restore_reg itb_asn, ipr=1
+//orig srl r0, itb_asn_v_asn, r0
+//orig sll r0, dtb_asn_v_asn, r0
+//orig mtpr r0, dtb_asn // set ASN in Mbox too
+//orig restore_reg ivptbr, ipr=1
+//orig mtpr r0, mvptbr // use ivptbr value to restore mvptbr
+//orig restore_reg mcsr, ipr=1
+//orig restore_reg aster, ipr=1
+//orig restore_reg astrr, ipr=1
+//orig restore_reg sirr, ipr=1
+//orig restore_reg maf_mode, ipr=1 // no mbox instruction for 3 cycles
+//orig mfpr r31, pt0 // (may issue with mt maf_mode)
+//orig mfpr r31, pt0 // bubble cycle 1
+//orig mfpr r31, pt0 // bubble cycle 2
+//orig mfpr r31, pt0 // bubble cycle 3
+//orig mfpr r31, pt0 // (may issue with following ld)
+
+ // r0 gets the value of RESTORE_IPR in the macro and this code uses this side effect (gag)
+ RESTORE_IPR(excAddr,CNS_Q_EXC_ADDR,r1)
+ RESTORE_IPR(palBase,CNS_Q_PAL_BASE,r1)
+ RESTORE_IPR(ipl,CNS_Q_IPL,r1)
+ RESTORE_IPR(ips,CNS_Q_IPS,r1)
+ mtpr r0, dtbCm // Set Mbox current mode too.
+ RESTORE_IPR(itbAsn,CNS_Q_ITB_ASN,r1)
+ srl r0, 4, r0
+ sll r0, 57, r0
+ mtpr r0, dtbAsn // Set Mbox ASN too
+ RESTORE_IPR(iVptBr,CNS_Q_IVPTBR,r1)
+ mtpr r0, mVptBr // Set Mbox VptBr too
+ RESTORE_IPR(mcsr,CNS_Q_MCSR,r1)
+ RESTORE_IPR(aster,CNS_Q_ASTER,r1)
+ RESTORE_IPR(astrr,CNS_Q_ASTRR,r1)
+ RESTORE_IPR(sirr,CNS_Q_SIRR,r1)
+ RESTORE_IPR(mafMode,CNS_Q_MAF_MODE,r1)
+ STALL
+ STALL
+ STALL
+ STALL
+ STALL
+
+
+ // restore all integer shadow regs
+//orig#define t 8
+//orig .repeat 7
+//orig restore_reg \t, shadow=1
+//orig#define t t + 1
+//orig .endr
+//orig restore_reg 25, shadow=1
+//orig restore_reg dc_mode, ipr=1 // no mbox instructions for 4 cycles
+
+ RESTORE_SHADOW( r8,CNS_Q_SHADOW+0x00,r1) // also called p0...p7 in the Hudson code
+ RESTORE_SHADOW( r9,CNS_Q_SHADOW+0x08,r1)
+ RESTORE_SHADOW(r10,CNS_Q_SHADOW+0x10,r1)
+ RESTORE_SHADOW(r11,CNS_Q_SHADOW+0x18,r1)
+ RESTORE_SHADOW(r12,CNS_Q_SHADOW+0x20,r1)
+ RESTORE_SHADOW(r13,CNS_Q_SHADOW+0x28,r1)
+ RESTORE_SHADOW(r14,CNS_Q_SHADOW+0x30,r1)
+ RESTORE_SHADOW(r25,CNS_Q_SHADOW+0x38,r1)
+ RESTORE_IPR(dcMode,CNS_Q_DC_MODE,r1)
+
+ //
+ // Get out of shadow mode
+ //
+
+ mfpr r31, pt0 // pad last load to icsr write (in case of replay, icsr will be written anyway) //orig
+ mfpr r31, pt0 // " //orig
+ mfpr r0, icsr // Get icsr //orig
+//orig ldah r2, <1@<icsr_v_sde-16>>(r31) // Get a one in SHADOW_ENABLE bit location
+ ldah r2, (1<<(ICSR_V_SDE-16))(r31) // Get a one in SHADOW_ENABLE bit location //orig
+ bic r0, r2, r2 // ICSR with SDE clear //orig
+ mtpr r2, icsr // Turn off SDE - no palshadow rd/wr for 3 bubble cycles //orig
+
+ mfpr r31, pt0 // SDE bubble cycle 1 //orig
+ mfpr r31, pt0 // SDE bubble cycle 2 //orig
+ mfpr r31, pt0 // SDE bubble cycle 3 //orig
+ nop //orig
+
+//orig // switch impure pointer from ipr to gpr area --
+//orig unfix_impure_ipr r1
+//orig fix_impure_gpr r1
+//orig // restore all integer regs
+//orig#define t 4
+//orig .repeat 28
+//orig restore_reg \t
+//orig#define t t + 1
+//orig .endr
+
+// Restore GPRs (r0, r2 are restored later, r1 and r3 are trashed) ...
+
+ lda r1, -CNS_Q_IPR(r1) // Restore base address of impure area
+ lda r1, 0x200(r1) // Point to center of CPU segment
+
+ RESTORE_GPR(r4,CNS_Q_GPR+0x20,r1)
+ RESTORE_GPR(r5,CNS_Q_GPR+0x28,r1)
+ RESTORE_GPR(r6,CNS_Q_GPR+0x30,r1)
+ RESTORE_GPR(r7,CNS_Q_GPR+0x38,r1)
+ RESTORE_GPR(r8,CNS_Q_GPR+0x40,r1)
+ RESTORE_GPR(r9,CNS_Q_GPR+0x48,r1)
+ RESTORE_GPR(r10,CNS_Q_GPR+0x50,r1)
+ RESTORE_GPR(r11,CNS_Q_GPR+0x58,r1)
+ RESTORE_GPR(r12,CNS_Q_GPR+0x60,r1)
+ RESTORE_GPR(r13,CNS_Q_GPR+0x68,r1)
+ RESTORE_GPR(r14,CNS_Q_GPR+0x70,r1)
+ RESTORE_GPR(r15,CNS_Q_GPR+0x78,r1)
+ RESTORE_GPR(r16,CNS_Q_GPR+0x80,r1)
+ RESTORE_GPR(r17,CNS_Q_GPR+0x88,r1)
+ RESTORE_GPR(r18,CNS_Q_GPR+0x90,r1)
+ RESTORE_GPR(r19,CNS_Q_GPR+0x98,r1)
+ RESTORE_GPR(r20,CNS_Q_GPR+0xA0,r1)
+ RESTORE_GPR(r21,CNS_Q_GPR+0xA8,r1)
+ RESTORE_GPR(r22,CNS_Q_GPR+0xB0,r1)
+ RESTORE_GPR(r23,CNS_Q_GPR+0xB8,r1)
+ RESTORE_GPR(r24,CNS_Q_GPR+0xC0,r1)
+ RESTORE_GPR(r25,CNS_Q_GPR+0xC8,r1)
+ RESTORE_GPR(r26,CNS_Q_GPR+0xD0,r1)
+ RESTORE_GPR(r27,CNS_Q_GPR+0xD8,r1)
+ RESTORE_GPR(r28,CNS_Q_GPR+0xE0,r1)
+ RESTORE_GPR(r29,CNS_Q_GPR+0xE8,r1)
+ RESTORE_GPR(r30,CNS_Q_GPR+0xF0,r1)
+ RESTORE_GPR(r31,CNS_Q_GPR+0xF8,r1)
+
+//orig // switch impure pointer from gpr to ipr area --
+//orig unfix_impure_gpr r1
+//orig fix_impure_ipr r1
+//orig restore_reg icsr, ipr=1 // restore original icsr- 4 bubbles to hw_rei
+
+ lda t0, -0x200(t0) // Restore base address of impure area.
+ lda t0, CNS_Q_IPR(t0) // Point to base of IPR area again.
+ RESTORE_IPR(icsr,CNS_Q_ICSR,r1)
+
+//orig // and back again --
+//orig unfix_impure_ipr r1
+//orig fix_impure_gpr r1
+//orig store_reg1 flag, r31, r1, ipr=1 // clear dump area valid flag
+//orig mb
+
+ lda t0, -CNS_Q_IPR(t0) // Back to base of impure area again,
+ lda t0, 0x200(t0) // and back to center of CPU segment
+ SAVE_GPR(r31,CNS_Q_FLAG,r1) // Clear the dump area valid flag
+ mb
+
+//orig // and back we go
+//orig// restore_reg 3
+//orig restore_reg 2
+//orig// restore_reg 1
+//orig restore_reg 0
+//orig // restore impure area base
+//orig unfix_impure_gpr r1
+
+ RESTORE_GPR(r2,CNS_Q_GPR+0x10,r1)
+ RESTORE_GPR(r0,CNS_Q_GPR+0x00,r1)
+ lda r1, -0x200(r1) // Restore impure base address
+
+ mfpr r31, pt0 // stall for ldqp above //orig
+
+ mtpr r31, dtb_ia // clear the tb //orig
+ mtpr r31, itb_ia // clear the itb //orig
+
+//orig pvc_jsr rststa, bsr=1, dest=1
+ ret r31, (r3) // back we go //orig
+#endif
+
+
+//+
+// pal_pal_bug_check -- code has found a bugcheck situation.
+// Set things up and join common machine check flow.
+//
+// Input:
+// r14 - exc_addr
+//
+// On exit:
+// pt0 - saved r0
+// pt1 - saved r1
+// pt4 - saved r4
+// pt5 - saved r5
+// pt6 - saved r6
+// pt10 - saved exc_addr
+// pt_misc<47:32> - mchk code
+// pt_misc<31:16> - scb vector
+// r14 - base of Cbox IPRs in IO space
+// MCES<mchk> is set
+//-
+
+ ALIGN_BLOCK
+ .globl pal_pal_bug_check_from_int
+pal_pal_bug_check_from_int:
+ DEBUGSTORE(0x79)
+//simos DEBUG_EXC_ADDR()
+ DEBUGSTORE(0x20)
+//simos bsr r25, put_hex
+ lda r25, mchk_c_bugcheck(r31)
+ addq r25, 1, r25 // set flag indicating we came from interrupt and stack is already pushed
+ br r31, pal_pal_mchk
+ nop
+
+pal_pal_bug_check:
+ lda r25, mchk_c_bugcheck(r31)
+
+pal_pal_mchk:
+ sll r25, 32, r25 // Move mchk code to position
+
+ mtpr r14, pt10 // Stash exc_addr
+ mtpr r14, exc_addr
+
+ mfpr r12, pt_misc // Get MCES and scratch
+ zap r12, 0x3c, r12
+
+ or r12, r25, r12 // Combine mchk code
+ lda r25, scb_v_procmchk(r31) // Get SCB vector
+
+ sll r25, 16, r25 // Move SCBv to position
+ or r12, r25, r25 // Combine SCBv
+
+ mtpr r0, pt0 // Stash for scratch
+ bis r25, mces_m_mchk, r25 // Set MCES<MCHK> bit
+
+ mtpr r25, pt_misc // Save mchk code!scbv!whami!mces
+ ldah r14, 0xfff0(r31)
+
+ mtpr r1, pt1 // Stash for scratch
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+
+ mtpr r4, pt4
+ mtpr r5, pt5
+
+ mtpr r6, pt6
+ blbs r12, sys_double_machine_check // MCHK halt if double machine check
+
+ br r31, sys_mchk_collect_iprs // Join common machine check flow
+
+// align_to_call_pal_section // Align to address of first call_pal entry point - 2000
+
+// .sbttl "HALT - PALcode for HALT instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// GO to console code
+//
+//-
+
+ .text 1
+// . = 0x2000
+ CALL_PAL_PRIV(PAL_HALT_ENTRY)
+call_pal_halt:
+#if rax_mode == 0
+ mfpr r31, pt0 // Pad exc_addr read
+ mfpr r31, pt0
+
+ mfpr r12, exc_addr // get PC
+ subq r12, 4, r12 // Point to the HALT
+
+ mtpr r12, exc_addr
+ mtpr r0, pt0
+
+//orig pvc_jsr updpcb, bsr=1
+ bsr r0, pal_update_pcb // update the pcb
+ lda r0, hlt_c_sw_halt(r31) // set halt code to sw halt
+ br r31, sys_enter_console // enter the console
+
+#else // RAX mode
+ mb
+ mb
+ mtpr r9, ev5__dtb_asn // no Dstream virtual ref for next 3 cycles.
+ mtpr r9, ev5__itb_asn // E1. Update ITB ASN. No hw_rei for 5 cycles.
+ mtpr r8, exc_addr // no HW_REI for 1 cycle.
+ blbc r9, not_begin_case
+ mtpr r31, ev5__dtb_ia // clear DTB. No Dstream virtual ref for 2 cycles.
+ mtpr r31, ev5__itb_ia // clear ITB.
+
+not_begin_case:
+ nop
+ nop
+
+ nop
+ nop // pad mt itb_asn ->hw_rei_stall
+
+ hw_rei_stall
+#endif
+
+// .sbttl "CFLUSH- PALcode for CFLUSH instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// R16 - contains the PFN of the page to be flushed
+//
+// Function:
+// Flush all Dstream caches of 1 entire page
+// The CFLUSH routine is in the system specific module.
+//
+//-
+
+ CALL_PAL_PRIV(PAL_CFLUSH_ENTRY)
+Call_Pal_Cflush:
+ br r31, sys_cflush
+
+// .sbttl "DRAINA - PALcode for DRAINA instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+// Implicit TRAPB performed by hardware.
+//
+// Function:
+// Stall instruction issue until all prior instructions are guaranteed to
+// complete without incurring aborts. For the EV5 implementation, this
+// means waiting until all pending DREADS are returned.
+//
+//-
+
+ CALL_PAL_PRIV(PAL_DRAINA_ENTRY)
+Call_Pal_Draina:
+ ldah r14, 0x100(r31) // Init counter. Value?
+ nop
+
+DRAINA_LOOP:
+ subq r14, 1, r14 // Decrement counter
+ mfpr r13, ev5__maf_mode // Fetch status bit
+
+ srl r13, maf_mode_v_dread_pending, r13
+ ble r14, DRAINA_LOOP_TOO_LONG
+
+ nop
+ blbs r13, DRAINA_LOOP // Wait until all DREADS clear
+
+ hw_rei
+
+DRAINA_LOOP_TOO_LONG:
+ br r31, call_pal_halt
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_PRIV(0x0003)
+CallPal_OpcDec03:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0004)
+CallPal_OpcDec04:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0005)
+CallPal_OpcDec05:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0006)
+CallPal_OpcDec06:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0007)
+CallPal_OpcDec07:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0008)
+CallPal_OpcDec08:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "CSERVE- PALcode for CSERVE instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Various functions for private use of console software
+//
+// option selector in r0
+// arguments in r16....
+// The CSERVE routine is in the system specific module.
+//
+//-
+
+ CALL_PAL_PRIV(PAL_CSERVE_ENTRY)
+Call_Pal_Cserve:
+ br r31, sys_cserve
+
+// .sbttl "swppal - PALcode for swppal instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+// Vectored into via hardware PALcode instruction dispatch.
+// R16 contains the new PAL identifier
+// R17:R21 contain implementation-specific entry parameters
+//
+// R0 receives status:
+// 0 success (PAL was switched)
+// 1 unknown PAL variant
+// 2 known PAL variant, but PAL not loaded
+//
+//
+// Function:
+// Swap control to another PAL.
+//-
+
+ CALL_PAL_PRIV(PAL_SWPPAL_ENTRY)
+Call_Pal_Swppal:
+ cmpule r16, 255, r0 // see if a kibble was passed
+ cmoveq r16, r16, r0 // if r16=0 then a valid address (ECO 59)
+
+ or r16, r31, r3 // set r3 incase this is a address
+ blbc r0, swppal_cont // nope, try it as an address
+
+ cmpeq r16, 2, r0 // is it our friend OSF?
+ blbc r0, swppal_fail // nope, don't know this fellow
+
+ br r2, CALL_PAL_SWPPAL_10_ // tis our buddy OSF
+
+// .global osfpal_hw_entry_reset
+// .weak osfpal_hw_entry_reset
+// .long <osfpal_hw_entry_reset-pal_start>
+//orig halt // don't know how to get the address here - kludge ok, load pal at 0
+ .long 0 // ?? hack upon hack...pb
+
+CALL_PAL_SWPPAL_10_: ldlp r3, 0(r2) // fetch target addr
+// ble r3, swppal_fail ; if OSF not linked in say not loaded.
+ mfpr r2, pal_base // fetch pal base
+
+ addq r2, r3, r3 // add pal base
+ lda r2, 0x3FFF(r31) // get pal base checker mask
+
+ and r3, r2, r2 // any funky bits set?
+ cmpeq r2, 0, r0 //
+
+ blbc r0, swppal_fail // return unknown if bad bit set.
+ br r31, swppal_cont
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_PRIV(0x000B)
+CallPal_OpcDec0B:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x000C)
+CallPal_OpcDec0C:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "wripir- PALcode for wripir instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+// r16 = processor number to interrupt
+//
+// Function:
+// IPIR <- R16
+// Handled in system-specific code
+//
+// Exit:
+// interprocessor interrupt is recorded on the target processor
+// and is initiated when the proper enabling conditions are present.
+//-
+
+ CALL_PAL_PRIV(PAL_WRIPIR_ENTRY)
+Call_Pal_Wrpir:
+ br r31, sys_wripir
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_PRIV(0x000E)
+CallPal_OpcDec0E:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x000F)
+CallPal_OpcDec0F:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "rdmces- PALcode for rdmces instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// R0 <- ZEXT(MCES)
+//-
+
+ CALL_PAL_PRIV(PAL_RDMCES_ENTRY)
+Call_Pal_Rdmces:
+ mfpr r0, pt_mces // Read from PALtemp
+ and r0, mces_m_all, r0 // Clear other bits
+
+ hw_rei
+
+// .sbttl "wrmces- PALcode for wrmces instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// If {R16<0> EQ 1} then MCES<0> <- 0 (MCHK)
+// If {R16<1> EQ 1} then MCES<1> <- 0 (SCE)
+// If {R16<2> EQ 1} then MCES<2> <- 0 (PCE)
+// MCES<3> <- R16<3> (DPC)
+// MCES<4> <- R16<4> (DSC)
+//
+//-
+
+ CALL_PAL_PRIV(PAL_WRMCES_ENTRY)
+Call_Pal_Wrmces:
+ and r16, ((1<<mces_v_mchk) | (1<<mces_v_sce) | (1<<mces_v_pce)), r13 // Isolate MCHK, SCE, PCE
+ mfpr r14, pt_mces // Get current value
+
+ ornot r31, r13, r13 // Flip all the bits
+ and r16, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r17
+
+ and r14, r13, r1 // Update MCHK, SCE, PCE
+ bic r1, ((1<<mces_v_dpc) | (1<<mces_v_dsc)), r1 // Clear old DPC, DSC
+
+ or r1, r17, r1 // Update DPC and DSC
+ mtpr r1, pt_mces // Write MCES back
+
+#if rawhide_system == 0
+ nop // Pad to fix PT write->read restriction
+#else
+ blbs r16, RAWHIDE_clear_mchk_lock // Clear logout from lock
+#endif
+
+ nop
+ hw_rei
+
+
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_PRIV(0x0012)
+CallPal_OpcDec12:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0013)
+CallPal_OpcDec13:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0014)
+CallPal_OpcDec14:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0015)
+CallPal_OpcDec15:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0016)
+CallPal_OpcDec16:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0017)
+CallPal_OpcDec17:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0018)
+CallPal_OpcDec18:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0019)
+CallPal_OpcDec19:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x001A)
+CallPal_OpcDec1A:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x001B)
+CallPal_OpcDec1B:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x001C)
+CallPal_OpcDec1C:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x001D)
+CallPal_OpcDec1D:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x001E)
+CallPal_OpcDec1E:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x001F)
+CallPal_OpcDec1F:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0020)
+CallPal_OpcDec20:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0021)
+CallPal_OpcDec21:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0022)
+CallPal_OpcDec22:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0023)
+CallPal_OpcDec23:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0024)
+CallPal_OpcDec24:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0025)
+CallPal_OpcDec25:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0026)
+CallPal_OpcDec26:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0027)
+CallPal_OpcDec27:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0028)
+CallPal_OpcDec28:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x0029)
+CallPal_OpcDec29:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x002A)
+CallPal_OpcDec2A:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "wrfen - PALcode for wrfen instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// a0<0> -> ICSR<FPE>
+// Store new FEN in PCB
+// Final value of t0 (r1), t8..t10 (r22..r24) and a0 (r16) are UNPREDICTABLE
+//
+// Issue: What about pending FP loads when FEN goes from on->off????
+//-
+
+ CALL_PAL_PRIV(PAL_WRFEN_ENTRY)
+Call_Pal_Wrfen:
+ or r31, 1, r13 // Get a one
+ mfpr r1, ev5__icsr // Get current FPE
+
+ sll r13, icsr_v_fpe, r13 // shift 1 to icsr<fpe> spot, e0
+ and r16, 1, r16 // clean new fen
+
+ sll r16, icsr_v_fpe, r12 // shift new fen to correct bit position
+ bic r1, r13, r1 // zero icsr<fpe>
+
+ or r1, r12, r1 // Or new FEN into ICSR
+ mfpr r12, pt_pcbb // Get PCBB - E1
+
+ mtpr r1, ev5__icsr // write new ICSR. 3 Bubble cycles to HW_REI
+ stlp r16, osfpcb_q_fen(r12) // Store FEN in PCB.
+
+ mfpr r31, pt0 // Pad ICSR<FPE> write.
+ mfpr r31, pt0
+
+ mfpr r31, pt0
+// pvc_violate 225 // cuz PVC can't distinguish which bits changed
+ hw_rei
+
+
+ CALL_PAL_PRIV(0x002C)
+CallPal_OpcDec2C:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "wrvptpr - PALcode for wrvptpr instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// vptptr <- a0 (r16)
+//-
+
+ CALL_PAL_PRIV(PAL_WRVPTPTR_ENTRY)
+Call_Pal_Wrvptptr:
+ mtpr r16, ev5__mvptbr // Load Mbox copy
+ mtpr r16, ev5__ivptbr // Load Ibox copy
+ nop // Pad IPR write
+ nop
+ hw_rei
+
+ CALL_PAL_PRIV(0x002E)
+CallPal_OpcDec2E:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_PRIV(0x002F)
+CallPal_OpcDec2F:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "swpctx- PALcode for swpctx instruction"
+
+//+
+//
+// Entry:
+// hardware dispatch via callPal instruction
+// R16 -> new pcb
+//
+// Function:
+// dynamic state moved to old pcb
+// new state loaded from new pcb
+// pcbb pointer set
+// old pcbb returned in R0
+//
+// Note: need to add perf monitor stuff
+//-
+
+ CALL_PAL_PRIV(PAL_SWPCTX_ENTRY)
+Call_Pal_Swpctx:
+ rpcc r13 // get cyccounter
+ mfpr r0, pt_pcbb // get pcbb
+
+ ldqp r22, osfpcb_q_fen(r16) // get new fen/pme
+ ldqp r23, osfpcb_l_cc(r16) // get new asn
+
+ srl r13, 32, r25 // move offset
+ mfpr r24, pt_usp // get usp
+
+ stqp r30, osfpcb_q_ksp(r0) // store old ksp
+// pvc_violate 379 // stqp can't trap except replay. only problem if mf same ipr in same shadow.
+ mtpr r16, pt_pcbb // set new pcbb
+
+ stqp r24, osfpcb_q_usp(r0) // store usp
+ addl r13, r25, r25 // merge for new time
+
+ stlp r25, osfpcb_l_cc(r0) // save time
+ ldah r24, (1<<(icsr_v_fpe-16))(r31)
+
+ and r22, 1, r12 // isolate fen
+ mfpr r25, icsr // get current icsr
+
+ ev5_pass2 lda r24, (1<<icsr_v_pmp)(r24)
+ br r31, swpctx_cont
+
+// .sbttl "wrval - PALcode for wrval instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// sysvalue <- a0 (r16)
+//-
+
+ CALL_PAL_PRIV(PAL_WRVAL_ENTRY)
+Call_Pal_Wrval:
+ nop
+ mtpr r16, pt_sysval // Pad paltemp write
+ nop
+ nop
+ hw_rei
+
+
+// .sbttl "rdval - PALcode for rdval instruction"
+
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// v0 (r0) <- sysvalue
+//-
+
+ CALL_PAL_PRIV(PAL_RDVAL_ENTRY)
+Call_Pal_Rdval:
+ nop
+ mfpr r0, pt_sysval
+ nop
+ hw_rei
+
+// .sbttl "tbi - PALcode for tbi instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// TB invalidate
+// r16/a0 = TBI type
+// r17/a1 = Va for TBISx instructions
+//-
+
+ CALL_PAL_PRIV(PAL_TBI_ENTRY)
+Call_Pal_Tbi:
+ addq r16, 2, r16 // change range to 0-2
+ br r23, CALL_PAL_tbi_10_ // get our address
+
+CALL_PAL_tbi_10_: cmpult r16, 6, r22 // see if in range
+ lda r23, tbi_tbl-CALL_PAL_tbi_10_(r23) // set base to start of table
+ sll r16, 4, r16 // * 16
+ blbc r22, CALL_PAL_tbi_30_ // go rei, if not
+
+ addq r23, r16, r23 // addr of our code
+//orig pvc_jsr tbi
+ jmp r31, (r23) // and go do it
+
+CALL_PAL_tbi_30_:
+ hw_rei
+ nop
+
+// .sbttl "wrent - PALcode for wrent instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Update ent* in paltemps
+// r16/a0 = Address of entry routine
+// r17/a1 = Entry Number 0..5
+//
+// r22, r23 trashed
+//-
+
+ CALL_PAL_PRIV(PAL_WRENT_ENTRY)
+Call_Pal_Wrent:
+ cmpult r17, 6, r22 // see if in range
+ br r23, CALL_PAL_wrent_10_ // get our address
+
+CALL_PAL_wrent_10_: bic r16, 3, r16 // clean pc
+ blbc r22, CALL_PAL_wrent_30_ // go rei, if not in range
+
+ lda r23, wrent_tbl-CALL_PAL_wrent_10_(r23) // set base to start of table
+ sll r17, 4, r17 // *16
+
+ addq r17, r23, r23 // Get address in table
+//orig pvc_jsr wrent
+ jmp r31, (r23) // and go do it
+
+CALL_PAL_wrent_30_:
+ hw_rei // out of range, just return
+
+// .sbttl "swpipl - PALcode for swpipl instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// v0 (r0) <- PS<IPL>
+// PS<IPL> <- a0<2:0> (r16)
+//
+// t8 (r22) is scratch
+//-
+
+ CALL_PAL_PRIV(PAL_SWPIPL_ENTRY)
+Call_Pal_Swpipl:
+ and r16, osfps_m_ipl, r16 // clean New ipl
+ mfpr r22, pt_intmask // get int mask
+
+ extbl r22, r16, r22 // get mask for this ipl
+ bis r11, r31, r0 // return old ipl
+
+ bis r16, r31, r11 // set new ps
+ mtpr r22, ev5__ipl // set new mask
+
+ mfpr r31, pt0 // pad ipl write
+ mfpr r31, pt0 // pad ipl write
+
+ hw_rei // back
+
+// .sbttl "rdps - PALcode for rdps instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// v0 (r0) <- ps
+//-
+
+ CALL_PAL_PRIV(PAL_RDPS_ENTRY)
+Call_Pal_Rdps:
+ bis r11, r31, r0 // Fetch PALshadow PS
+ nop // Must be 2 cycles long
+ hw_rei
+
+// .sbttl "wrkgp - PALcode for wrkgp instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// kgp <- a0 (r16)
+//-
+
+ CALL_PAL_PRIV(PAL_WRKGP_ENTRY)
+Call_Pal_Wrkgp:
+ nop
+ mtpr r16, pt_kgp
+ nop // Pad for pt write->read restriction
+ nop
+ hw_rei
+
+// .sbttl "wrusp - PALcode for wrusp instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// usp <- a0 (r16)
+//-
+
+ CALL_PAL_PRIV(PAL_WRUSP_ENTRY)
+Call_Pal_Wrusp:
+ nop
+ mtpr r16, pt_usp
+ nop // Pad possible pt write->read restriction
+ nop
+ hw_rei
+
+// .sbttl "wrperfmon - PALcode for wrperfmon instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+//
+// Function:
+// Various control functions for the onchip performance counters
+//
+// option selector in r16
+// option argument in r17
+// returned status in r0
+//
+//
+// r16 = 0 Disable performance monitoring for one or more cpu's
+// r17 = 0 disable no counters
+// r17 = bitmask disable counters specified in bit mask (1=disable)
+//
+// r16 = 1 Enable performance monitoring for one or more cpu's
+// r17 = 0 enable no counters
+// r17 = bitmask enable counters specified in bit mask (1=enable)
+//
+// r16 = 2 Mux select for one or more cpu's
+// r17 = Mux selection (cpu specific)
+// <24:19> bc_ctl<pm_mux_sel> field (see spec)
+// <31>,<7:4>,<3:0> pmctr <sel0>,<sel1>,<sel2> fields (see spec)
+//
+// r16 = 3 Options
+// r17 = (cpu specific)
+// <0> = 0 log all processes
+// <0> = 1 log only selected processes
+// <30,9,8> mode select - ku,kp,kk
+//
+// r16 = 4 Interrupt frequency select
+// r17 = (cpu specific) indicates interrupt frequencies desired for each
+// counter, with "zero interrupts" being an option
+// frequency info in r17 bits as defined by PMCTR_CTL<FRQx> below
+//
+// r16 = 5 Read Counters
+// r17 = na
+// r0 = value (same format as ev5 pmctr)
+// <0> = 0 Read failed
+// <0> = 1 Read succeeded
+//
+// r16 = 6 Write Counters
+// r17 = value (same format as ev5 pmctr; all counters written simultaneously)
+//
+// r16 = 7 Enable performance monitoring for one or more cpu's and reset counter to 0
+// r17 = 0 enable no counters
+// r17 = bitmask enable & clear counters specified in bit mask (1=enable & clear)
+//
+//=============================================================================
+//Assumptions:
+//PMCTR_CTL:
+//
+// <15:14> CTL0 -- encoded frequency select and enable - CTR0
+// <13:12> CTL1 -- " - CTR1
+// <11:10> CTL2 -- " - CTR2
+//
+// <9:8> FRQ0 -- frequency select for CTR0 (no enable info)
+// <7:6> FRQ1 -- frequency select for CTR1
+// <5:4> FRQ2 -- frequency select for CTR2
+//
+// <0> all vs. select processes (0=all,1=select)
+//
+// where
+// FRQx<1:0>
+// 0 1 disable interrupt
+// 1 0 frequency = 65536 (16384 for ctr2)
+// 1 1 frequency = 256
+// note: FRQx<1:0> = 00 will keep counters from ever being enabled.
+//
+//=============================================================================
+//
+ CALL_PAL_PRIV(0x0039)
+// unsupported in Hudson code .. pboyle Nov/95
+CALL_PAL_Wrperfmon:
+#if perfmon_debug == 0
+ // "real" performance monitoring code
+ cmpeq r16, 1, r0 // check for enable
+ bne r0, perfmon_en // br if requested to enable
+
+ cmpeq r16, 2, r0 // check for mux ctl
+ bne r0, perfmon_muxctl // br if request to set mux controls
+
+ cmpeq r16, 3, r0 // check for options
+ bne r0, perfmon_ctl // br if request to set options
+
+ cmpeq r16, 4, r0 // check for interrupt frequency select
+ bne r0, perfmon_freq // br if request to change frequency select
+
+ cmpeq r16, 5, r0 // check for counter read request
+ bne r0, perfmon_rd // br if request to read counters
+
+ cmpeq r16, 6, r0 // check for counter write request
+ bne r0, perfmon_wr // br if request to write counters
+
+ cmpeq r16, 7, r0 // check for counter clear/enable request
+ bne r0, perfmon_enclr // br if request to clear/enable counters
+
+ beq r16, perfmon_dis // br if requested to disable (r16=0)
+ br r31, perfmon_unknown // br if unknown request
+#else
+
+ br r31, pal_perfmon_debug
+#endif
+
+// .sbttl "rdusp - PALcode for rdusp instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// v0 (r0) <- usp
+//-
+
+ CALL_PAL_PRIV(PAL_RDUSP_ENTRY)
+Call_Pal_Rdusp:
+ nop
+ mfpr r0, pt_usp
+ hw_rei
+
+
+ CALL_PAL_PRIV(0x003B)
+CallPal_OpcDec3B:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "whami - PALcode for whami instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// v0 (r0) <- whami
+//-
+ CALL_PAL_PRIV(PAL_WHAMI_ENTRY)
+Call_Pal_Whami:
+ nop
+ mfpr r0, pt_whami // Get Whami
+ extbl r0, 1, r0 // Isolate just whami bits
+ hw_rei
+
+// .sbttl "retsys - PALcode for retsys instruction"
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+// 00(sp) contains return pc
+// 08(sp) contains r29
+//
+// Function:
+// Return from system call.
+// mode switched from kern to user.
+// stacks swapped, ugp, upc restored.
+// r23, r25 junked
+//-
+
+ CALL_PAL_PRIV(PAL_RETSYS_ENTRY)
+Call_Pal_Retsys:
+ lda r25, osfsf_c_size(sp) // pop stack
+ bis r25, r31, r14 // touch r25 & r14 to stall mf exc_addr
+
+ mfpr r14, exc_addr // save exc_addr in case of fault
+ ldq r23, osfsf_pc(sp) // get pc
+
+ ldq r29, osfsf_gp(sp) // get gp
+ stl_c r31, -4(sp) // clear lock_flag
+
+ lda r11, 1<<osfps_v_mode(r31)// new PS:mode=user
+ mfpr r30, pt_usp // get users stack
+
+ bic r23, 3, r23 // clean return pc
+ mtpr r31, ev5__ipl // zero ibox IPL - 2 bubbles to hw_rei
+
+ mtpr r11, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
+ mtpr r11, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
+
+ mtpr r23, exc_addr // set return address - 1 bubble to hw_rei
+ mtpr r25, pt_ksp // save kern stack
+
+ rc r31 // clear inter_flag
+// pvc_violate 248 // possible hidden mt->mf pt violation ok in callpal
+ hw_rei_spe // and back
+
+
+ CALL_PAL_PRIV(0x003E)
+CallPal_OpcDec3E:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "rti - PALcode for rti instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// 00(sp) -> ps
+// 08(sp) -> pc
+// 16(sp) -> r29 (gp)
+// 24(sp) -> r16 (a0)
+// 32(sp) -> r17 (a1)
+// 40(sp) -> r18 (a3)
+//-
+
+ CALL_PAL_PRIV(PAL_RTI_ENTRY)
+#ifdef SIMOS
+ /* called once by platform_tlaser */
+ .globl Call_Pal_Rti
+#endif
+Call_Pal_Rti:
+ lda r25, osfsf_c_size(sp) // get updated sp
+ bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
+
+ mfpr r14, exc_addr // save PC in case of fault
+ rc r31 // clear intr_flag
+
+ ldq r12, -6*8(r25) // get ps
+ ldq r13, -5*8(r25) // pc
+
+ ldq r18, -1*8(r25) // a2
+ ldq r17, -2*8(r25) // a1
+
+ ldq r16, -3*8(r25) // a0
+ ldq r29, -4*8(r25) // gp
+
+ bic r13, 3, r13 // clean return pc
+ stl_c r31, -4(r25) // clear lock_flag
+
+ and r12, osfps_m_mode, r11 // get mode
+ mtpr r13, exc_addr // set return address
+
+ beq r11, rti_to_kern // br if rti to Kern
+ br r31, rti_to_user // out of call_pal space
+
+
+// .sbttl "Start the Unprivileged CALL_PAL Entry Points"
+// .sbttl "bpt- PALcode for bpt instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Build stack frame
+// a0 <- code
+// a1 <- unpred
+// a2 <- unpred
+// vector via entIF
+//
+//-
+//
+ .text 1
+// . = 0x3000
+ CALL_PAL_UNPRIV(PAL_BPT_ENTRY)
+Call_Pal_Bpt:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ bis r11, r31, r12 // Save PS for stack write
+ bge r25, CALL_PAL_bpt_10_ // no stack swap needed if cm=kern
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+CALL_PAL_bpt_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ mfpr r14, exc_addr // get pc
+
+ stq r16, osfsf_a0(sp) // save regs
+ bis r31, osf_a0_bpt, r16 // set a0
+
+ stq r17, osfsf_a1(sp) // a1
+ br r31, bpt_bchk_common // out of call_pal space
+
+
+// .sbttl "bugchk- PALcode for bugchk instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Build stack frame
+// a0 <- code
+// a1 <- unpred
+// a2 <- unpred
+// vector via entIF
+//
+//-
+//
+ CALL_PAL_UNPRIV(PAL_BUGCHK_ENTRY)
+Call_Pal_Bugchk:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ bis r11, r31, r12 // Save PS for stack write
+ bge r25, CALL_PAL_bugchk_10_ // no stack swap needed if cm=kern
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+CALL_PAL_bugchk_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ mfpr r14, exc_addr // get pc
+
+ stq r16, osfsf_a0(sp) // save regs
+ bis r31, osf_a0_bugchk, r16 // set a0
+
+ stq r17, osfsf_a1(sp) // a1
+ br r31, bpt_bchk_common // out of call_pal space
+
+
+ CALL_PAL_UNPRIV(0x0082)
+CallPal_OpcDec82:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "callsys - PALcode for callsys instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Switch mode to kernel and build a callsys stack frame.
+// sp = ksp
+// gp = kgp
+// t8 - t10 (r22-r24) trashed
+//
+//-
+//
+ CALL_PAL_UNPRIV(PAL_CALLSYS_ENTRY)
+Call_Pal_Callsys:
+
+ and r11, osfps_m_mode, r24 // get mode
+ mfpr r22, pt_ksp // get ksp
+
+ beq r24, sys_from_kern // sysCall from kern is not allowed
+ mfpr r12, pt_entsys // get address of callSys routine
+
+//+
+// from here on we know we are in user going to Kern
+//-
+ mtpr r31, ev5__dtb_cm // set Mbox current mode - no virt ref for 2 cycles
+ mtpr r31, ev5__ps // set Ibox current mode - 2 bubble to hw_rei
+
+ bis r31, r31, r11 // PS=0 (mode=kern)
+ mfpr r23, exc_addr // get pc
+
+ mtpr r30, pt_usp // save usp
+ lda sp, 0-osfsf_c_size(r22)// set new sp
+
+ stq r29, osfsf_gp(sp) // save user gp/r29
+ stq r24, osfsf_ps(sp) // save ps
+
+ stq r23, osfsf_pc(sp) // save pc
+ mtpr r12, exc_addr // set address
+ // 1 cycle to hw_rei
+
+ mfpr r29, pt_kgp // get the kern gp/r29
+
+ hw_rei_spe // and off we go!
+
+
+ CALL_PAL_UNPRIV(0x0084)
+CallPal_OpcDec84:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0085)
+CallPal_OpcDec85:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "imb - PALcode for imb instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Flush the writebuffer and flush the Icache
+//
+//-
+//
+ CALL_PAL_UNPRIV(PAL_IMB_ENTRY)
+Call_Pal_Imb:
+ mb // Clear the writebuffer
+ mfpr r31, ev5__mcsr // Sync with clear
+ nop
+ nop
+ br r31, pal_ic_flush // Flush Icache
+
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_UNPRIV(0x0087)
+CallPal_OpcDec87:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0088)
+CallPal_OpcDec88:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0089)
+CallPal_OpcDec89:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x008A)
+CallPal_OpcDec8A:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x008B)
+CallPal_OpcDec8B:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x008C)
+CallPal_OpcDec8C:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x008D)
+CallPal_OpcDec8D:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x008E)
+CallPal_OpcDec8E:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x008F)
+CallPal_OpcDec8F:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0090)
+CallPal_OpcDec90:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0091)
+CallPal_OpcDec91:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0092)
+CallPal_OpcDec92:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0093)
+CallPal_OpcDec93:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0094)
+CallPal_OpcDec94:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0095)
+CallPal_OpcDec95:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0096)
+CallPal_OpcDec96:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0097)
+CallPal_OpcDec97:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0098)
+CallPal_OpcDec98:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x0099)
+CallPal_OpcDec99:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x009A)
+CallPal_OpcDec9A:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x009B)
+CallPal_OpcDec9B:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x009C)
+CallPal_OpcDec9C:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x009D)
+CallPal_OpcDec9D:
+ br r31, osfpal_calpal_opcdec
+
+// .sbttl "rdunique - PALcode for rdunique instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// v0 (r0) <- unique
+//
+//-
+//
+ CALL_PAL_UNPRIV(PAL_RDUNIQUE_ENTRY)
+CALL_PALrdunique_:
+ mfpr r0, pt_pcbb // get pcb pointer
+ ldqp r0, osfpcb_q_unique(r0) // get new value
+
+ hw_rei
+
+// .sbttl "wrunique - PALcode for wrunique instruction"
+//+
+//
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// unique <- a0 (r16)
+//
+//-
+//
+CALL_PAL_UNPRIV(PAL_WRUNIQUE_ENTRY)
+CALL_PAL_Wrunique:
+ nop
+ mfpr r12, pt_pcbb // get pcb pointer
+ stqp r16, osfpcb_q_unique(r12)// get new value
+ nop // Pad palshadow write
+ hw_rei // back
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_UNPRIV(0x00A0)
+CallPal_OpcDecA0:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A1)
+CallPal_OpcDecA1:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A2)
+CallPal_OpcDecA2:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A3)
+CallPal_OpcDecA3:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A4)
+CallPal_OpcDecA4:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A5)
+CallPal_OpcDecA5:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A6)
+CallPal_OpcDecA6:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A7)
+CallPal_OpcDecA7:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A8)
+CallPal_OpcDecA8:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00A9)
+CallPal_OpcDecA9:
+ br r31, osfpal_calpal_opcdec
+
+
+// .sbttl "gentrap - PALcode for gentrap instruction"
+//+
+// CALL_PAL_gentrap:
+// Entry:
+// Vectored into via hardware PALcode instruction dispatch.
+//
+// Function:
+// Build stack frame
+// a0 <- code
+// a1 <- unpred
+// a2 <- unpred
+// vector via entIF
+//
+//-
+
+ CALL_PAL_UNPRIV(0x00AA)
+// unsupported in Hudson code .. pboyle Nov/95
+CALL_PAL_gentrap:
+ sll r11, 63-osfps_v_mode, r25 // Shift mode up to MS bit
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ bis r11, r31, r12 // Save PS for stack write
+ bge r25, CALL_PAL_gentrap_10_ // no stack swap needed if cm=kern
+
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r30, pt_usp // save user stack
+
+ bis r31, r31, r11 // Set new PS
+ mfpr r30, pt_ksp
+
+CALL_PAL_gentrap_10_:
+ lda sp, 0-osfsf_c_size(sp)// allocate stack space
+ mfpr r14, exc_addr // get pc
+
+ stq r16, osfsf_a0(sp) // save regs
+ bis r31, osf_a0_gentrap, r16// set a0
+
+ stq r17, osfsf_a1(sp) // a1
+ br r31, bpt_bchk_common // out of call_pal space
+
+
+// .sbttl "CALL_PAL OPCDECs"
+
+ CALL_PAL_UNPRIV(0x00AB)
+CallPal_OpcDecAB:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00AC)
+CallPal_OpcDecAC:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00AD)
+CallPal_OpcDecAD:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00AE)
+CallPal_OpcDecAE:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00AF)
+CallPal_OpcDecAF:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B0)
+CallPal_OpcDecB0:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B1)
+CallPal_OpcDecB1:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B2)
+CallPal_OpcDecB2:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B3)
+CallPal_OpcDecB3:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B4)
+CallPal_OpcDecB4:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B5)
+CallPal_OpcDecB5:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B6)
+CallPal_OpcDecB6:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B7)
+CallPal_OpcDecB7:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B8)
+CallPal_OpcDecB8:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00B9)
+CallPal_OpcDecB9:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00BA)
+CallPal_OpcDecBA:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00BB)
+CallPal_OpcDecBB:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00BC)
+CallPal_OpcDecBC:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00BD)
+CallPal_OpcDecBD:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00BE)
+CallPal_OpcDecBE:
+ br r31, osfpal_calpal_opcdec
+
+ CALL_PAL_UNPRIV(0x00BF)
+CallPal_OpcDecBF:
+ br r31, osfpal_calpal_opcdec
+
+
+/*======================================================================*/
+/* OSF/1 CALL_PAL CONTINUATION AREA */
+/*======================================================================*/
+
+ .text 2
+
+ . = 0x4000
+
+
+// .sbttl "Continuation of MTPR_PERFMON"
+ ALIGN_BLOCK
+#if perfmon_debug == 0
+ // "real" performance monitoring code
+// mux ctl
+perfmon_muxctl:
+ lda r8, 1(r31) // get a 1
+ sll r8, pmctr_v_sel0, r8 // move to sel0 position
+ or r8, ((0xf<<pmctr_v_sel1) | (0xf<<pmctr_v_sel2)), r8 // build mux select mask
+ and r17, r8, r25 // isolate pmctr mux select bits
+ mfpr r0, ev5__pmctr
+ bic r0, r8, r0 // clear old mux select bits
+ or r0,r25, r25 // or in new mux select bits
+ mtpr r25, ev5__pmctr
+
+ // ok, now tackle cbox mux selects
+ ldah r14, 0xfff0(r31)
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+//orig get_bc_ctl_shadow r16 // bc_ctl returned in lower longword
+// adapted from ev5_pal_macros.mar
+ mfpr r16, pt_impure
+ lda r16, CNS_Q_IPR(r16)
+ RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
+
+ lda r8, 0x3F(r31) // build mux select mask
+ sll r8, bc_ctl_v_pm_mux_sel, r8
+
+ and r17, r8, r25 // isolate bc_ctl mux select bits
+ bic r16, r8, r16 // isolate old mux select bits
+ or r16, r25, r25 // create new bc_ctl
+ mb // clear out cbox for future ipr write
+ stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
+ mb // clear out cbox for future ipr write
+
+//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
+// adapted from ev5_pal_macros.mar
+ mfpr r16, pt_impure
+ lda r16, CNS_Q_IPR(r16)
+ SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
+
+ br r31, perfmon_success
+
+
+// requested to disable perf monitoring
+perfmon_dis:
+ mfpr r14, ev5__pmctr // read ibox pmctr ipr
+perfmon_dis_ctr0: // and begin with ctr0
+ blbc r17, perfmon_dis_ctr1 // do not disable ctr0
+ lda r8, 3(r31)
+ sll r8, pmctr_v_ctl0, r8
+ bic r14, r8, r14 // disable ctr0
+perfmon_dis_ctr1:
+ srl r17, 1, r17
+ blbc r17, perfmon_dis_ctr2 // do not disable ctr1
+ lda r8, 3(r31)
+ sll r8, pmctr_v_ctl1, r8
+ bic r14, r8, r14 // disable ctr1
+perfmon_dis_ctr2:
+ srl r17, 1, r17
+ blbc r17, perfmon_dis_update // do not disable ctr2
+ lda r8, 3(r31)
+ sll r8, pmctr_v_ctl2, r8
+ bic r14, r8, r14 // disable ctr2
+perfmon_dis_update:
+ mtpr r14, ev5__pmctr // update pmctr ipr
+//;the following code is not needed for ev5 pass2 and later, but doesn't hurt anything to leave in
+// adapted from ev5_pal_macros.mar
+//orig get_pmctr_ctl r8, r25 // pmctr_ctl bit in r8. adjusted impure pointer in r25
+ mfpr r25, pt_impure
+ lda r25, CNS_Q_IPR(r25)
+ RESTORE_SHADOW(r8,CNS_Q_PM_CTL,r25);
+
+ lda r17, 0x3F(r31) // build mask
+ sll r17, pmctr_v_ctl2, r17 // shift mask to correct position
+ and r14, r17, r14 // isolate ctl bits
+ bic r8, r17, r8 // clear out old ctl bits
+ or r14, r8, r14 // create shadow ctl bits
+//orig store_reg1 pmctr_ctl, r14, r25, ipr=1 // update pmctr_ctl register
+//adjusted impure pointer still in r25
+ SAVE_SHADOW(r14,CNS_Q_PM_CTL,r25);
+
+ br r31, perfmon_success
+
+
+// requested to enable perf monitoring
+//;the following code can be greatly simplified for pass2, but should work fine as is.
+
+
+perfmon_enclr:
+ lda r9, 1(r31) // set enclr flag
+ br perfmon_en_cont
+
+perfmon_en:
+ bis r31, r31, r9 // clear enclr flag
+
+perfmon_en_cont:
+ mfpr r8, pt_pcbb // get PCB base
+//orig get_pmctr_ctl r25, r25
+ mfpr r25, pt_impure
+ lda r25, CNS_Q_IPR(r25)
+ RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r25);
+
+ ldqp r16, osfpcb_q_fen(r8) // read DAT/PME/FEN quadword
+ mfpr r14, ev5__pmctr // read ibox pmctr ipr
+ srl r16, osfpcb_v_pme, r16 // get pme bit
+ mfpr r13, icsr
+ and r16, 1, r16 // isolate pme bit
+
+ // this code only needed in pass2 and later
+//orig sget_addr r12, 1<<icsr_v_pmp, r31
+ lda r12, 1<<icsr_v_pmp(r31) // pb
+ bic r13, r12, r13 // clear pmp bit
+ sll r16, icsr_v_pmp, r12 // move pme bit to icsr<pmp> position
+ or r12, r13, r13 // new icsr with icsr<pmp> bit set/clear
+ ev5_pass2 mtpr r13, icsr // update icsr
+
+#if ev5_p1 != 0
+ lda r12, 1(r31)
+ cmovlbc r25, r12, r16 // r16<0> set if either pme=1 or sprocess=0 (sprocess in bit 0 of r25)
+#else
+ bis r31, 1, r16 // set r16<0> on pass2 to update pmctr always (icsr provides real enable)
+#endif
+
+ sll r25, 6, r25 // shift frequency bits into pmctr_v_ctl positions
+ bis r14, r31, r13 // copy pmctr
+
+perfmon_en_ctr0: // and begin with ctr0
+ blbc r17, perfmon_en_ctr1 // do not enable ctr0
+
+ blbc r9, perfmon_en_noclr0 // enclr flag set, clear ctr0 field
+ lda r8, 0xffff(r31)
+ zapnot r8, 3, r8 // ctr0<15:0> mask
+ sll r8, pmctr_v_ctr0, r8
+ bic r14, r8, r14 // clear ctr bits
+ bic r13, r8, r13 // clear ctr bits
+
+perfmon_en_noclr0:
+//orig get_addr r8, 3<<pmctr_v_ctl0, r31
+ LDLI(r8, (3<<pmctr_v_ctl0))
+ and r25, r8, r12 //isolate frequency select bits for ctr0
+ bic r14, r8, r14 // clear ctl0 bits in preparation for enabling
+ or r14,r12,r14 // or in new ctl0 bits
+
+perfmon_en_ctr1: // enable ctr1
+ srl r17, 1, r17 // get ctr1 enable
+ blbc r17, perfmon_en_ctr2 // do not enable ctr1
+
+ blbc r9, perfmon_en_noclr1 // if enclr flag set, clear ctr1 field
+ lda r8, 0xffff(r31)
+ zapnot r8, 3, r8 // ctr1<15:0> mask
+ sll r8, pmctr_v_ctr1, r8
+ bic r14, r8, r14 // clear ctr bits
+ bic r13, r8, r13 // clear ctr bits
+
+perfmon_en_noclr1:
+//orig get_addr r8, 3<<pmctr_v_ctl1, r31
+ LDLI(r8, (3<<pmctr_v_ctl1))
+ and r25, r8, r12 //isolate frequency select bits for ctr1
+ bic r14, r8, r14 // clear ctl1 bits in preparation for enabling
+ or r14,r12,r14 // or in new ctl1 bits
+
+perfmon_en_ctr2: // enable ctr2
+ srl r17, 1, r17 // get ctr2 enable
+ blbc r17, perfmon_en_return // do not enable ctr2 - return
+
+ blbc r9, perfmon_en_noclr2 // if enclr flag set, clear ctr2 field
+ lda r8, 0x3FFF(r31) // ctr2<13:0> mask
+ sll r8, pmctr_v_ctr2, r8
+ bic r14, r8, r14 // clear ctr bits
+ bic r13, r8, r13 // clear ctr bits
+
+perfmon_en_noclr2:
+//orig get_addr r8, 3<<pmctr_v_ctl2, r31
+ LDLI(r8, (3<<pmctr_v_ctl2))
+ and r25, r8, r12 //isolate frequency select bits for ctr2
+ bic r14, r8, r14 // clear ctl2 bits in preparation for enabling
+ or r14,r12,r14 // or in new ctl2 bits
+
+perfmon_en_return:
+ cmovlbs r16, r14, r13 // if pme enabled, move enables into pmctr
+ // else only do the counter clears
+ mtpr r13, ev5__pmctr // update pmctr ipr
+
+//;this code not needed for pass2 and later, but does not hurt to leave it in
+ lda r8, 0x3F(r31)
+//orig get_pmctr_ctl r25, r12 // read pmctr ctl; r12=adjusted impure pointer
+ mfpr r12, pt_impure
+ lda r12, CNS_Q_IPR(r12)
+ RESTORE_SHADOW(r25,CNS_Q_PM_CTL,r12);
+
+ sll r8, pmctr_v_ctl2, r8 // build ctl mask
+ and r8, r14, r14 // isolate new ctl bits
+ bic r25, r8, r25 // clear out old ctl value
+ or r25, r14, r14 // create new pmctr_ctl
+//orig store_reg1 pmctr_ctl, r14, r12, ipr=1
+ SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
+
+ br r31, perfmon_success
+
+
+// options...
+perfmon_ctl:
+
+// set mode
+//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
+ mfpr r12, pt_impure
+ lda r12, CNS_Q_IPR(r12)
+ RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
+
+//orig get_addr r8, (1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk), r31 // build mode mask for pmctr register
+ LDLI(r8, ((1<<pmctr_v_killu) | (1<<pmctr_v_killp) | (1<<pmctr_v_killk)))
+ mfpr r0, ev5__pmctr
+ and r17, r8, r25 // isolate pmctr mode bits
+ bic r0, r8, r0 // clear old mode bits
+ or r0, r25, r25 // or in new mode bits
+ mtpr r25, ev5__pmctr
+
+//;the following code will only be used in pass2, but should not hurt anything if run in pass1.
+ mfpr r8, icsr
+ lda r25, 1<<icsr_v_pma(r31) // set icsr<pma> if r17<0>=0
+ bic r8, r25, r8 // clear old pma bit
+ cmovlbs r17, r31, r25 // and clear icsr<pma> if r17<0>=1
+ or r8, r25, r8
+ ev5_pass2 mtpr r8, icsr // 4 bubbles to hw_rei
+ mfpr r31, pt0 // pad icsr write
+ mfpr r31, pt0 // pad icsr write
+
+//;the following code not needed for pass2 and later, but should work anyway.
+ bis r14, 1, r14 // set for select processes
+ blbs r17, perfmon_sp // branch if select processes
+ bic r14, 1, r14 // all processes
+perfmon_sp:
+//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
+ SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
+ br r31, perfmon_success
+
+// counter frequency select
+perfmon_freq:
+//orig get_pmctr_ctl r14, r12 // read shadow pmctr ctl; r12=adjusted impure pointer
+ mfpr r12, pt_impure
+ lda r12, CNS_Q_IPR(r12)
+ RESTORE_SHADOW(r14,CNS_Q_PM_CTL,r12);
+
+ lda r8, 0x3F(r31)
+//orig sll r8, pmctr_ctl_v_frq2, r8 // build mask for frequency select field
+// I guess this should be a shift of 4 bits from the above control register structure .. pb
+#define pmctr_ctl_v_frq2_SHIFT 4
+ sll r8, pmctr_ctl_v_frq2_SHIFT, r8 // build mask for frequency select field
+
+ and r8, r17, r17
+ bic r14, r8, r14 // clear out old frequency select bits
+
+ or r17, r14, r14 // or in new frequency select info
+//orig store_reg1 pmctr_ctl, r14, r12, ipr=1 // update pmctr_ctl register
+ SAVE_SHADOW(r14,CNS_Q_PM_CTL,r12); // r12 still has the adjusted impure ptr
+
+ br r31, perfmon_success
+
+// read counters
+perfmon_rd:
+ mfpr r0, ev5__pmctr
+ or r0, 1, r0 // or in return status
+ hw_rei // back to user
+
+// write counters
+perfmon_wr:
+ mfpr r14, ev5__pmctr
+ lda r8, 0x3FFF(r31) // ctr2<13:0> mask
+ sll r8, pmctr_v_ctr2, r8
+
+//orig get_addr r9, 0xFFFFFFFF, r31, verify=0 // ctr2<15:0>,ctr1<15:0> mask
+ LDLI(r9, (0xFFFFFFFF))
+ sll r9, pmctr_v_ctr1, r9
+ or r8, r9, r8 // or ctr2, ctr1, ctr0 mask
+ bic r14, r8, r14 // clear ctr fields
+ and r17, r8, r25 // clear all but ctr fields
+ or r25, r14, r14 // write ctr fields
+ mtpr r14, ev5__pmctr // update pmctr ipr
+
+ mfpr r31, pt0 // pad pmctr write (needed only to keep PVC happy)
+
+perfmon_success:
+ or r31, 1, r0 // set success
+ hw_rei // back to user
+
+perfmon_unknown:
+ or r31, r31, r0 // set fail
+ hw_rei // back to user
+
+#else
+
+// end of "real code", start of debug code
+
+//+
+// Debug environment:
+// (in pass2, always set icsr<pma> to ensure master counter enable is on)
+// R16 = 0 Write to on-chip performance monitor ipr
+// r17 = on-chip ipr
+// r0 = return value of read of on-chip performance monitor ipr
+// R16 = 1 Setup Cbox mux selects
+// r17 = Cbox mux selects in same position as in bc_ctl ipr.
+// r0 = return value of read of on-chip performance monitor ipr
+//
+//-
+pal_perfmon_debug:
+ mfpr r8, icsr
+ lda r9, 1<<icsr_v_pma(r31)
+ bis r8, r9, r8
+ mtpr r8, icsr
+
+ mfpr r0, ev5__pmctr // read old value
+ bne r16, cbox_mux_sel
+
+ mtpr r17, ev5__pmctr // update pmctr ipr
+ br r31, end_pm
+
+cbox_mux_sel:
+ // ok, now tackle cbox mux selects
+ ldah r14, 0xfff0(r31)
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+//orig get_bc_ctl_shadow r16 // bc_ctl returned
+ mfpr r16, pt_impure
+ lda r16, CNS_Q_IPR(r16)
+ RESTORE_SHADOW(r16,CNS_Q_BC_CTL,r16);
+
+ lda r8, 0x3F(r31) // build mux select mask
+ sll r8, BC_CTL_V_PM_MUX_SEL, r8
+
+ and r17, r8, r25 // isolate bc_ctl mux select bits
+ bic r16, r8, r16 // isolate old mux select bits
+ or r16, r25, r25 // create new bc_ctl
+ mb // clear out cbox for future ipr write
+ stqp r25, ev5__bc_ctl(r14) // store to cbox ipr
+ mb // clear out cbox for future ipr write
+//orig update_bc_ctl_shadow r25, r16 // r25=value, r16-overwritten with adjusted impure ptr
+ mfpr r16, pt_impure
+ lda r16, CNS_Q_IPR(r16)
+ SAVE_SHADOW(r25,CNS_Q_BC_CTL,r16);
+
+end_pm: hw_rei
+
+#endif
+
+
+//;The following code is a workaround for a cpu bug where Istream prefetches to
+//;super-page address space in user mode may escape off-chip.
+#if spe_fix != 0
+
+ ALIGN_BLOCK
+hw_rei_update_spe:
+ mfpr r12, pt_misc // get previous mode
+ srl r11, osfps_v_mode, r10 // isolate current mode bit
+ and r10, 1, r10
+ extbl r12, 7, r8 // get previous mode field
+ and r8, 1, r8 // isolate previous mode bit
+ cmpeq r10, r8, r8 // compare previous and current modes
+ beq r8, hw_rei_update_spe_5_
+ hw_rei // if same, just return
+
+hw_rei_update_spe_5_:
+
+#if fill_err_hack != 0
+
+ fill_error_hack
+#endif
+
+ mfpr r8, icsr // get current icsr value
+ ldah r9, (2<<(icsr_v_spe-16))(r31) // get spe bit mask
+ bic r8, r9, r8 // disable spe
+ xor r10, 1, r9 // flip mode for new spe bit
+ sll r9, icsr_v_spe+1, r9 // shift into position
+ bis r8, r9, r8 // enable/disable spe
+ lda r9, 1(r31) // now update our flag
+ sll r9, pt_misc_v_cm, r9 // previous mode saved bit mask
+ bic r12, r9, r12 // clear saved previous mode
+ sll r10, pt_misc_v_cm, r9 // current mode saved bit mask
+ bis r12, r9, r12 // set saved current mode
+ mtpr r12, pt_misc // update pt_misc
+ mtpr r8, icsr // update icsr
+
+#if osf_chm_fix != 0
+
+
+ blbc r10, hw_rei_update_spe_10_ // branch if not user mode
+
+ mb // ensure no outstanding fills
+ lda r12, 1<<dc_mode_v_dc_ena(r31) // User mode
+ mtpr r12, dc_mode // Turn on dcache
+ mtpr r31, dc_flush // and flush it
+ br r31, pal_ic_flush
+
+hw_rei_update_spe_10_: mfpr r9, pt_pcbb // Kernel mode
+ ldqp r9, osfpcb_q_Fen(r9) // get FEN
+ blbc r9, pal_ic_flush // return if FP disabled
+ mb // ensure no outstanding fills
+ mtpr r31, dc_mode // turn off dcache
+#endif
+
+
+ br r31, pal_ic_flush // Pal restriction - must flush Icache if changing ICSR<SPE>
+#endif
+
diff --git a/system/alpha/palcode/platform.h b/system/alpha/palcode/platform.h
new file mode 100644
index 000000000..a7777d941
--- /dev/null
+++ b/system/alpha/palcode/platform.h
@@ -0,0 +1,251 @@
+/*
+ * VID: [T1.2] PT: [Fri Apr 21 16:47:18 1995] SF: [platform.h]
+ * TI: [/sae_users/cruz/bin/vice -iplatform.s -l// -p# -DEB164 -h -m -aeb164 ]
+ */
+#define __PLATFORM_LOADED 1
+/*
+*****************************************************************************
+** *
+** Copyright © 1993, 1994 *
+** by Digital Equipment Corporation, Maynard, Massachusetts. *
+** *
+** All Rights Reserved *
+** *
+** Permission is hereby granted to use, copy, modify and distribute *
+** this software and its documentation, in both source code and *
+** object code form, and without fee, for the purpose of distribution *
+** of this software or modifications of this software within products *
+** incorporating an integrated circuit implementing Digital's AXP *
+** architecture, regardless of the source of such integrated circuit, *
+** provided that the above copyright notice and this permission notice *
+** appear in all copies, and that the name of Digital Equipment *
+** Corporation not be used in advertising or publicity pertaining to *
+** distribution of the document or software without specific, written *
+** prior permission. *
+** *
+** Digital Equipment Corporation disclaims all warranties and/or *
+** guarantees with regard to this software, including all implied *
+** warranties of fitness for a particular purpose and merchantability, *
+** and makes no representations regarding the use of, or the results *
+** of the use of, the software and documentation in terms of correctness, *
+** accuracy, reliability, currentness or otherwise; and you rely on *
+** the software, documentation and results solely at your own risk. *
+** *
+** AXP is a trademark of Digital Equipment Corporation. *
+** *
+*****************************************************************************
+**
+** FACILITY:
+**
+** DECchip 21164 OSF/1 PALcode
+**
+** MODULE:
+**
+** platform.h
+**
+** MODULE DESCRIPTION:
+**
+** Platform specific definitions.
+**
+** AUTHOR: Lance Berc (taken from EB164 code)
+**
+** CREATION DATE: 14-Jun-1995
+**
+** $Id: platform.h,v 1.1.1.1 1997/10/30 23:27:20 verghese Exp $
+**
+** MODIFICATION HISTORY:
+**
+** $Log: platform.h,v $
+** Revision 1.1.1.1 1997/10/30 23:27:20 verghese
+** current 10/29/97
+**
+ * Revision 1.1 1995/06/14 18:50:42 berc
+ * Initial revision
+ *
+*/
+
+#if !defined(CONSOLE_ENTRY)
+#define CONSOLE_ENTRY 0x10000
+#endif /* CONSOLE_ENTRY */
+
+#define DEBUGDEATH(c) \
+ lda a0, c(zero) ; \
+ br DebugDeath
+
+#define DEBUGSTORE(c) \
+ stq_p t0,0(zero) ; \
+ stq_p t1,8(zero) ; \
+ lda t0, 0x400(zero) ; \
+ sll t0, 29, t0 ; \
+ ldah t0, 0x280(t0) ; \
+9: lda t1, 0x140(t0) ; \
+ ldl_p t1, 0(t1) ; \
+ srl t1, 16, t1 ; \
+ and t1, 0x20, t1 ; \
+ beq t1, 9b ; \
+ lda t1, c(zero) ; \
+ stl_p t1, 0(t0) ; \
+ mb ; \
+ ldq_p t1, 8(zero) ; \
+ ldq_p t0, 0(zero)
+
+
+/*
+** IPL translation table definitions:
+**
+** EB164 specific IRQ pins are
+**
+** Line IPL Source OSF/1 IPL
+** ---- --- ------ ---------
+** IRQ0 20 Corrected ECC error 7
+** IRQ1 21 PCI/ISA 3
+** IRQ2 22 Real Time Clock 5
+** IRQ3 23 SIO NMI, CIA errors 7
+**
+** The mask contains one byte for each IPL level, with IPL0 in the
+** least significant (right-most) byte and IPL7 in the most
+** significant (left-most) byte. Each byte in the mask maps the
+** OSF/1 IPL to the DC21164 IPL.
+**
+** OSF/1 IPL IPL
+** --------- ---
+** 0 0
+** 1 1
+** 2 2
+** 3 21 (to account for PCI/ISA at IPL 21)
+** 4 21
+** 5 22 (to account for clock at IPL 21)
+** 6 30 (to account for powerfail)
+** 7 31
+*/
+
+#define INT_K_MASK_HIGH 0x1F1E1615
+#define INT_K_MASK_LOW 0x15020100
+
+#define BYTE_ENABLE_SHIFT 5
+
+/*
+** Dallas DS1287A Real-Time Clock (RTC) Definitions:
+*/
+#define RTCADD 0x160000
+#define RTCDAT 0x170000
+
+
+/*
+** Serial Port (COM) Definitions:
+*/
+
+#define DLA_K_BRG 12 /* Baud Rate Divisor = 9600 */
+
+#define LSR_V_THRE 5 /* Xmit Holding Register Empty Bit */
+
+#define LCR_M_WLS 3 /* Word Length Select Mask */
+#define LCR_M_STB 4 /* Number Of Stop Bits Mask */
+#define LCR_M_PEN 8 /* Parity Enable Mask */
+#define LCR_M_DLAB 128 /* Divisor Latch Access Bit Mask */
+
+#define LCR_K_INIT (LCR_M_WLS | LCR_M_STB)
+
+#define MCR_M_DTR 1 /* Data Terminal Ready Mask */
+#define MCR_M_RTS 2 /* Request To Send Mask */
+#define MCR_M_OUT1 4 /* Output 1 Control Mask */
+#define MCR_M_OUT2 8 /* UART Interrupt Mask Enable */
+
+#define MCR_K_INIT (MCR_M_DTR | \
+ MCR_M_RTS | \
+ MCR_M_OUT1 | \
+ MCR_M_OUT2)
+
+/* CPU Adr[39:29]=0x500 select PCI Mem. */
+#define PCI_MEM 0x400
+#define SLOT_D_COM1 (0x140000)
+#define SLOT_D_COM2 (0x150000)
+
+#define COM1_RBR (SLOT_D_COM1 | (0x0 << 1)) /* Receive Buffer Register Offset */
+#define COM1_THR (SLOT_D_COM1 | (0x0 << 1)) /* Xmit Holding Register Offset */
+#define COM1_IER (SLOT_D_COM1 | (0x1 << 1)) /* Interrupt Enable Register Offset */
+#define COM1_IIR (SLOT_D_COM1 | (0x2 << 1)) /* Interrupt ID Register Offset */
+#define COM1_LCR (SLOT_D_COM1 | (0x3 << 1)) /* Line Control Register Offset */
+#define COM1_MCR (SLOT_D_COM1 | (0x4 << 1)) /* Modem Control Register Offset */
+#define COM1_LSR (SLOT_D_COM1 | (0x5 << 1)) /* Line Status Register Offset */
+#define COM1_MSR (SLOT_D_COM1 | (0x6 << 1)) /* Modem Status Register Offset */
+#define COM1_SCR (SLOT_D_COM1 | (0x7 << 1)) /* Scratch Register Offset */
+#define COM1_DLL (SLOT_D_COM1 | (0x8 << 1)) /* Divisor Latch (LS) Offset */
+#define COM1_DLH (SLOT_D_COM1 | (0x9 << 1)) /* Divisor Latch (MS) Offset */
+
+#define COM2_RBR (SLOT_D_COM2 | (0x0 << 1))
+#define COM2_THR (SLOT_D_COM2 | (0x0 << 1))
+#define COM2_IER (SLOT_D_COM2 | (0x1 << 1))
+#define COM2_IIR (SLOT_D_COM2 | (0x2 << 1))
+#define COM2_LCR (SLOT_D_COM2 | (0x3 << 1))
+#define COM2_MCR (SLOT_D_COM2 | (0x4 << 1))
+#define COM2_LSR (SLOT_D_COM2 | (0x5 << 1))
+#define COM2_MSR (SLOT_D_COM2 | (0x6 << 1))
+#define COM2_SCR (SLOT_D_COM2 | (0x7 << 1))
+#define COM2_DLL (SLOT_D_COM2 | (0x8 << 1))
+#define COM2_DLH (SLOT_D_COM2 | (0x9 << 1))
+
+
+/*
+** Macro to define a port address
+*/
+#define IO_MASK 0x7FFFFFF
+
+/* NOTE ON ADDITIONAL PORT DEFINITION:
+**
+** We also need to set bit 39! Since the span between bit 39
+** and the byte enable field is more than 32, we set bit 39 in the
+** port macros.
+*/
+
+/*
+** Macro to write a byte literal to a specified port
+*/
+#define OutPortByte(port,val,tmp0,tmp1) \
+ LDLI (tmp0, port); \
+ sll tmp0, BYTE_ENABLE_SHIFT, tmp0; \
+ lda tmp1, PCI_MEM(zero); \
+ sll tmp1, 29, tmp1; \
+ bis tmp0, tmp1, tmp0; \
+ lda tmp1, (val)(zero); \
+ sll tmp1, 8*(port & 3), tmp1; \
+ stl_p tmp1, 0x00(tmp0); \
+ mb
+
+/*
+** Macro to write a byte from a register to a specified port
+*/
+#define OutPortByteReg(port,reg,tmp0,tmp1) \
+ LDLI (tmp0, port); \
+ sll tmp0, BYTE_ENABLE_SHIFT, tmp0; \
+ lda tmp1, PCI_MEM(zero); \
+ sll tmp1, 29, tmp1; \
+ bis tmp0, tmp1, tmp0; \
+ sll reg, 8*(port & 3), tmp1; \
+ stl_p tmp1, 0x00(tmp0); \
+ mb
+
+/*
+** Macro to write a longword from a register to a specified port
+*/
+#define OutPortLongReg(port,reg,tmp0,tmp1) \
+ LDLI (tmp0, port); \
+ sll tmp0, BYTE_ENABLE_SHIFT, tmp0; \
+ lda tmp1, PCI_MEM(zero); \
+ sll tmp1, 29, tmp1; \
+ bis tmp0, tmp1, tmp0; \
+ stl_p tmp1, 0x18(tmp0); \
+ mb
+
+/*
+** Macro to read a byte from a specified port
+*/
+#define InPortByte(port,tmp0,tmp1) \
+ LDLI (tmp0, port); \
+ sll tmp0, BYTE_ENABLE_SHIFT, tmp0; \
+ lda tmp1, PCI_MEM(zero); \
+ sll tmp1, 29, tmp1; \
+ bis tmp0, tmp1, tmp0; \
+ ldl_p tmp0, 0x00(tmp0); \
+ srl tmp0, (8 * (port & 3)), tmp0; \
+ zap tmp0, 0xfe, tmp0
diff --git a/system/alpha/palcode/platform_srcmax.s b/system/alpha/palcode/platform_srcmax.s
new file mode 100644
index 000000000..b74573132
--- /dev/null
+++ b/system/alpha/palcode/platform_srcmax.s
@@ -0,0 +1,1825 @@
+// build_fixed_image: not sure what means
+// real_mm to be replaced during rewrite
+// remove_save_state remove_restore_state can be remooved to save space ??
+#define egore 0
+#define acore 0
+#define beh_model 0
+#define ev5_p2 1
+#define ev5_p1 0
+#define ldvpte_bug_fix 1
+#define spe_fix 0
+#define osf_chm_fix 0
+#define build_fixed_image 0
+#define enable_p4_fixups 0
+#define osf_svmin 1
+#define enable_physical_console 0
+#define fill_err_hack 0
+#define icflush_on_tbix 0
+#define max_cpuid 1
+#define perfmon_debug 0
+#define rax_mode 0
+
+#define hw_rei_spe hw_rei
+
+#include "ev5_defs.h"
+#include "ev5_impure.h"
+#include "ev5_alpha_defs.h"
+#include "ev5_paldef.h"
+#include "ev5_osfalpha_defs.h"
+#include "fromHudsonMacros.h"
+#include "fromHudsonOsf.h"
+#include "dc21164FromGasSources.h"
+#include "cserve.h"
+
+#define ldlp ldl_p
+#define ldqp ldq_p
+#define stqp stq_p
+#define stqpc stqp
+
+#define pt_entInt pt_entint
+#define pt_entArith pt_entarith
+#define mchk_size ((mchk_cpu_base + 7 + 8) &0xfff8)
+#define mchk_flag CNS_Q_FLAG
+#define mchk_sys_base 56
+#define mchk_cpu_base (CNS_Q_LD_LOCK + 8)
+#define mchk_offsets CNS_Q_EXC_ADDR
+#define mchk_mchk_code 8
+#define mchk_ic_perr_stat CNS_Q_ICPERR_STAT
+#define mchk_dc_perr_stat CNS_Q_DCPERR_STAT
+#define mchk_sc_addr CNS_Q_SC_ADDR
+#define mchk_sc_stat CNS_Q_SC_STAT
+#define mchk_ei_addr CNS_Q_EI_ADDR
+#define mchk_bc_tag_addr CNS_Q_BC_TAG_ADDR
+#define mchk_fill_syn CNS_Q_FILL_SYN
+#define mchk_ei_stat CNS_Q_EI_STAT
+#define mchk_exc_addr CNS_Q_EXC_ADDR
+#define mchk_ld_lock CNS_Q_LD_LOCK
+#define osfpcb_q_Ksp pcb_q_ksp
+#define pal_impure_common_size ((0x200 + 7) & 0xfff8)
+
+#define RTCADD 0x160000
+#define RTCDAT 0x170000
+
+/* Serial Port (COM) Definitions: */
+#define DLA_K_BRG 12 /* Baud Rate Divisor = 9600 */
+
+#define LSR_V_THRE 5 /* Xmit Holding Register Empty Bit */
+
+#define LCR_M_WLS 3 /* Word Length Select Mask */
+#define LCR_M_STB 4 /* Number Of Stop Bits Mask */
+#define LCR_M_PEN 8 /* Parity Enable Mask */
+#define LCR_M_DLAB 128 /* Divisor Latch Access Bit Mask */
+
+#define LCR_K_INIT (LCR_M_WLS | LCR_M_STB)
+
+#define MCR_M_DTR 1 /* Data Terminal Ready Mask */
+#define MCR_M_RTS 2 /* Request To Send Mask */
+#define MCR_M_OUT1 4 /* Output 1 Control Mask */
+#define MCR_M_OUT2 8 /* UART Interrupt Mask Enable */
+
+#define MCR_K_INIT (MCR_M_DTR | \
+ MCR_M_RTS | \
+ MCR_M_OUT1 | \
+ MCR_M_OUT2)
+#define SLOT_D_COM1 (0x140000)
+#define COM1_RBR (SLOT_D_COM1 | (0x0 << 1)) /* Receive Buffer Register Offset */
+#define COM1_THR (SLOT_D_COM1 | (0x0 << 1)) /* Xmit Holding Register Offset */
+#define COM1_IER (SLOT_D_COM1 | (0x1 << 1)) /* Interrupt Enable Register Offset
+*/
+#define COM1_IIR (SLOT_D_COM1 | (0x2 << 1)) /* Interrupt ID Register Offset */
+#define COM1_LCR (SLOT_D_COM1 | (0x3 << 1)) /* Line Control Register Offset */
+#define COM1_MCR (SLOT_D_COM1 | (0x4 << 1)) /* Modem Control Register Offset */
+#define COM1_LSR (SLOT_D_COM1 | (0x5 << 1)) /* Line Status Register Offset */
+#define COM1_MSR (SLOT_D_COM1 | (0x6 << 1)) /* Modem Status Register Offset */
+#define COM1_SCR (SLOT_D_COM1 | (0x7 << 1)) /* Scratch Register Offset */
+#define COM1_DLL (SLOT_D_COM1 | (0x8 << 1)) /* Divisor Latch (LS) Offset */
+#define COM1_DLH (SLOT_D_COM1 | (0x9 << 1)) /* Divisor Latch (MS) Offset */
+
+
+#define BYTE_ENABLE_SHIFT 5
+#define PCI_MEM 0x400
+
+#ifdef SIMOS
+#define OutPortByte(port,val,a,b)
+#define InPortByte(port,val,a)
+#else
+
+#define OutPortByte(port,val,tmp0,tmp1) \
+ LDLI (tmp0, port); \
+ sll tmp0, BYTE_ENABLE_SHIFT, tmp0; \
+ lda tmp1, PCI_MEM(zero); \
+ sll tmp1, 29, tmp1; \
+ bis tmp0, tmp1, tmp0; \
+ lda tmp1, (val)(zero); \
+ sll tmp1, 8*(port & 3), tmp1; \
+ stl_p tmp1, 0x00(tmp0); \
+ mb
+
+#define InPortByte(port,tmp0,tmp1) \
+ LDLI (tmp0, port); \
+ sll tmp0, BYTE_ENABLE_SHIFT, tmp0; \
+ lda tmp1, PCI_MEM(zero); \
+ sll tmp1, 29, tmp1; \
+ bis tmp0, tmp1, tmp0; \
+ ldl_p tmp0, 0x00(tmp0); \
+ srl tmp0, (8 * (port & 3)), tmp0; \
+ zap tmp0, 0xfe, tmp0
+#endif
+
+#define r0 $0
+#define r1 $1
+#define r2 $2
+#define r3 $3
+#define r4 $4
+#define r5 $5
+#define r6 $6
+#define r7 $7
+#define r8 $8
+#define r9 $9
+#define r10 $10
+#define r11 $11
+#define r12 $12
+#define r13 $13
+#define r14 $14
+#define r15 $15
+#define r16 $16
+#define r17 $17
+#define r18 $18
+#define r19 $19
+#define r20 $20
+#define r21 $21
+#define r22 $22
+#define r23 $23
+#define r24 $24
+#define r25 $25
+#define r26 $26
+#define r27 $27
+#define r28 $28
+#define r29 $29
+#define r30 $30
+#define r31 $31
+
+#ifdef SIMOS
+#define DEBUGSTORE(c)
+#define DEBUG_EXC_ADDR()
+#else
+#define DEBUGSTORE(c) \
+ lda r13, c(zero) ; \
+ bsr r25, debugstore
+
+#define DEBUG_EXC_ADDR() \
+ bsr r25, put_exc_addr; \
+ DEBUGSTORE(13) ; \
+ DEBUGSTORE(10)
+
+#endif /* SIMOS */
+
+#define ALIGN_BLOCK \
+ .align 5
+
+#define ALIGN_BRANCH \
+ .align 3
+
+// This module is for all the OSF system specific code.
+// This version is for the EV5 behavioral model.
+// .sbttl "Edit History"
+//+
+// Who Rev When What
+// ------------ --- ----------- --------------------------------
+//
+// [ deleted several pages of checking comments to make the file smaller - lance ]
+//
+// PALcode merge done here .... JRH ....8/30/94
+// JM 1.00 1-aug-1994 Add support for pass2 to sys_perfmon code
+// JM 1.01 2-aug-1994 Initialize cns_bc_config in reset
+// JM 1.02 5-aug-1994 Add ARITH_AND_MCHK routine.
+// Fix typo in bc_config init in rax reset flow.
+// JM 1.03 19-aug-1994 BUG: sys_perfmon not generating mux control mask properly, overwriting counters 0 & 1;
+// mode select masks messed up too.
+//
+// JH 1.03A 26-Oct-1994 Log PCI error2 register ...needed for CIA Pass 2 support
+//
+// JM 1.04 16-sep-1994 Moved perfmon code to ev5_osf_pal.m64
+// JM 1.05 9-jan-1995 Fix to EI_STAT entry in MCHK logout frame -- OR in lower 28 bits (previously wiped out)
+// JM 1.06 2-feb-1995 Change "HW_REI" to "HW_REI_SPE" everywhere that we may be changing to kernel mode from user
+// (part of super-page bug fix).
+// Initialize DC_TEST_CTL in reset flow.
+//
+// PALcode merge done here .... TLC ....02/06/95
+//
+// JM 1.07 3-mar-1995 Add init of dc_test_ctl; fix pvc_jsr statement in ret from set_sc_bc_ctl
+// ES 1.08 17-mar-1995 Add osf_chm_fix to disable dcache in reset
+//
+// PALcode merge done here .... TLC ....03/21/95
+//
+//
+// Entry points
+// SYS_CFLUSH - Cache flush
+// SYS_CSERVE - Console service
+// SYS_WRIPIR - interprocessor interrupts
+// SYS_HALT_INTERRUPT - Halt interrupt
+// SYS_PASSIVE_RELEASE - Interrupt, passive release
+// SYS_INTERRUPT - Interrupt
+// SYS_RESET - Reset
+// SYS_ENTER_CONSOLE
+// SYS_PERFMON - Performance monitoring setup
+//
+//------------------------------------------------------------------------------
+//
+// EB164 specific library files ....
+//
+//------------------------------------------------------------------------------
+#if eb164 != 0
+
+
+// .sbttl "EB164 firmware required definition files"
+_pal_def // console definition file
+_cserve_def // cserve definition file
+
+#define hlt_c_callback 33
+
+//
+// rtc based constants
+//
+#define rtc_base 0xe00 // RTC device is at offset 70 << 5
+#define rtc_ptr 0x0 // RTC index register (offset 0 from rtc base)
+#define rtc_data 0x20 // RTC data register (offset 1 from rtc base)
+#define rtc_idx_csrc 0x0c // point to CSR C in the TOY chip
+
+//
+// interrupt vectors
+//
+#define vec_eisa_base 0x800 // base for pci vectors
+#endif
+
+ ALIGN_BLOCK
+ .globl sys_wripir
+sys_wripir: // R16 has the processor number.
+ // The XXM has no interprocessor interrupts
+ hw_rei
+
+// .sbttl "CFLUSH- PALcode for CFLUSH instruction"
+//+
+// SYS_CFLUSH
+// Entry:
+//
+// R16 - contains the PFN of the page to be flushed
+//
+// Function:
+// Flush all Dstream caches of 1 entire page
+//
+//-
+ ALIGN_BLOCK
+ .globl sys_cflush
+sys_cflush:
+ // convert pfn to addr, and clean off <63:20>
+ sll r16, ((page_offset_size_bits)+(63-20)), r12
+
+ lda r13, 0x10(r31) // assume 16Mbytes of cache
+ sll r13, 20, r13 // convert to bytes
+
+ srl r12, 63-20, r12 // shift back to normal position
+ xor r12, r13, r12 // xor addr bit that aligns with cache size
+
+ or r31, 8192/(32*8), r13 // get count of loads
+ nop
+
+CFLUSH_LOOP:
+ subq r13, 1, r13 // decr counter
+ mfpr r25, ev5__intid // Fetch level of interruptor
+
+ ldqp r31, 32*0(r12) // do a load
+ ldqp r31, 32*1(r12) // do next load
+
+ ldqp r31, 32*2(r12) // do next load
+ ldqp r31, 32*3(r12) // do next load
+
+ ldqp r31, 32*4(r12) // do next load
+ ldqp r31, 32*5(r12) // do next load
+
+ ldqp r31, 32*6(r12) // do next load
+ ldqp r31, 32*7(r12) // do next load
+
+ mfpr r14, ev5__ipl // Fetch current level
+ lda r12, (32*8)(r12) // skip to next cache block addr
+
+ cmple r25, r14, r25 // R25 = 1 if intid .less than or eql ipl
+ beq r25, CFLUSH_LOOP_10_ // if any int's pending, re-queue CFLUSH
+
+ bne r13, CFLUSH_LOOP // loop till done
+ hw_rei // back to user
+
+ ALIGN_BRANCH
+CFLUSH_LOOP_10_: // Here if interrupted
+ mfpr r12, exc_addr
+ subq r12, 4, r12 // Backup PC to point to CFLUSH
+
+ mtpr r12, exc_addr
+ nop
+
+ mfpr r31, pt0 // Pad exc_addr write
+ hw_rei
+
+
+// .sbttl "CSERVE- PALcode for CSERVE instruction"
+//+
+// SYS_CSERVE
+//
+// Function:
+// Various functions for private use of console software
+//
+// option selector in r0
+// arguments in r16....
+//
+//
+// r0 = 0 unknown
+//
+// r0 = 1 ldqp
+// r0 = 2 stqp
+// args, are as for normal STQP/LDQP in VMS PAL
+//
+// r0 = 3 dump_tb's
+// r16 = detination PA to dump tb's to.
+//
+// r0<0> = 1, success
+// r0<0> = 0, failure, or option not supported
+// r0<63:1> = (generally 0, but may be function dependent)
+// r0 - load data on ldqp
+//
+//-
+ ALIGN_BLOCK
+ .globl sys_cserve
+sys_cserve:
+ cmpeq r18, CSERVE_K_RD_IMPURE, r0
+ bne r0, Sys_Cserve_Rd_Impure
+
+ cmpeq r18, CSERVE_K_JTOPAL, r0
+ bne r0, Sys_Cserve_Jtopal
+
+ // For now the XXM doesn't support any console callbacks
+ DEBUGSTORE(0x40)
+ bis r18, zero, r0
+ bsr r25, put_hex
+ DEBUGSTORE(13)
+ DEBUGSTORE(10)
+ or r31, r31, r0
+ hw_rei // and back we go
+
+Sys_Cserve_Rd_Impure:
+ mfpr r0, pt_impure // Get base of impure scratch area.
+ hw_rei
+
+// .sbttl "SYS_INTERRUPT - Interrupt processing code"
+//+
+// SYS_INTERRUPT
+//
+// Current state:
+// Stack is pushed
+// ps, sp and gp are updated
+// r12, r14 - available
+// r13 - INTID (new EV5 IPL)
+// r14 - exc_addr
+// r25 - ISR
+// r16, r17, r18 - available
+//
+//-
+ ALIGN_BLOCK
+ .globl sys_interrupt
+sys_interrupt:
+ extbl r12, r14, r14 // Translate new OSFIPL->EV5IPL
+ mfpr r29, pt_kgp // update gp
+
+ mtpr r14, ev5__ipl // load the new IPL into Ibox
+ cmpeq r13, 31, r12
+ bne r12, sys_int_mchk_or_crd // Check for level 31 interrupt (machine check or crd)
+
+ cmpeq r13, 30, r12
+ bne r12, sys_int_powerfail // Check for level 30 interrupt (powerfail)
+
+ cmpeq r13, 29, r12
+ bne r12, sys_int_perf_cnt // Check for level 29 interrupt (performance counters)
+
+ cmpeq r13, 23, r12
+ bne r12, sys_int_23 // Check for level 23 interrupt
+
+ cmpeq r13, 22, r12
+ bne r12, sys_int_22 // Check for level 22 interrupt (clock)
+
+ cmpeq r13, 21, r12
+ bne r12, sys_int_21 // Check for level 21 interrupt
+
+ cmpeq r13, 20, r12
+ bne r12, sys_int_20 // Check for level 20 interrupt (I/O)
+
+ mfpr r14, exc_addr // ooops, something is wrong
+ br r31, pal_pal_bug_check_from_int
+
+//+
+//sys_int_2*
+// Routines to handle device interrupts at IPL 23-20.
+// System specific method to ack/clear the interrupt, detect passive release,
+// detect interprocessor (22), interval clock (22), corrected
+// system error (20)
+//
+// Current state:
+// Stack is pushed
+// ps, sp and gp are updated
+// r12, r14 - available
+// r13 - INTID (new EV5 IPL)
+// r25 - ISR
+//
+// On exit:
+// Interrupt has been ack'd/cleared
+// a0/r16 - signals IO device interrupt
+// a1/r17 - contains interrupt vector
+// exit to ent_int address
+//
+//-
+ ALIGN_BLOCK
+sys_int_23:
+// INT23 is unused on the XXM
+ nop
+ DEBUGSTORE(0x21)
+ DEBUGSTORE(0x32)
+ DEBUGSTORE(0x33)
+ DEBUGSTORE(13)
+ DEBUGSTORE(10)
+ lda r14, 0xffff(zero)
+ br r31, sys_int_mchk // log as a machine check
+
+// INT22 is the Real Time Clock
+//
+// Dismiss the interrupt in the TOY
+// Dispatch to kernel
+//
+ ALIGN_BLOCK
+sys_int_22:
+ OutPortByte(RTCADD,0x0C,r12,r14)// Set up RTCADD to index register C.
+ InPortByte(RTCDAT,r12,r14) // Read to clear interrupt.
+ mfpr r12, pt_entInt
+ lda r17, 0x600(r31) // r17 = interrupt vector for interval timer
+ mtpr r12, exc_addr
+// DEBUGSTORE(0x2a)
+// DEBUG_EXC_ADDR()
+ lda r16, 0x1(r31) // r16 = interrupt type for interval timer
+ STALL
+ hw_rei
+
+ ALIGN_BLOCK
+sys_int_21:
+ // Not connected on the XXM
+ nop
+ DEBUGSTORE(0x21)
+ DEBUGSTORE(0x32)
+ DEBUGSTORE(0x31)
+ DEBUGSTORE(13)
+ DEBUGSTORE(10)
+ lda r14, 0xffff(zero)
+ br r31, sys_int_mchk // go log the interrupt as a machine check
+
+// INT20 are device interrupts
+// Dispatch to kernel
+//
+ ALIGN_BLOCK
+sys_int_20:
+ mfpr r12, pt_entInt // Get pointer to kernel handler.
+ lda r17, 0x800(zero) // Hardcode device interrupt for now
+ mtpr r12, exc_addr // Load kernel entry address
+ bis zero, 0x3, r16 // Signal I/O device interrupt
+ STALL
+ hw_rei
+
+
+//+
+// sys_passive_release
+// Just pretend the interrupt never occurred.
+//
+// make sure we restore the old PS before calling this ....
+//-
+ ALIGN_BRANCH
+ .globl sys_passive_release
+sys_passive_release:
+ mtpr r11, ev5__dtb_cm // Restore Mbox current mode for ps
+ nop
+
+ mfpr r31, pt0 // Pad write to dtb_cm
+
+ hw_rei_spe
+
+//+
+//sys_int_powerfail
+// The XXM doesn't generate these, right?
+
+ ALIGN_BLOCK
+sys_int_powerfail:
+ nop
+ lda r14, 0xffff(zero)
+ br r31, sys_int_mchk // go log the interrupt as a machine check
+
+//+
+// sys_halt_interrupt
+// A halt interrupt has been detected. Pass control to the console.
+//
+//
+//-
+ ALIGN_BLOCK
+ .globl sys_halt_interrupt
+sys_halt_interrupt:
+ // wait for halt to go away
+SYS_HALT_PIN: // REPEAT
+ mfpr r25, ev5__isr // : Fetch interrupt summary register
+ srl r25, isr_v_hlt, r25 // : Get HLT bit
+ blbs r25, SYS_HALT_PIN // UNTIL interrupt goes away
+ LDLI (r25, 25000000) // Loop 100msec on an XXM
+SYS_HALT_KEY_DEBOUNCE:
+ subq r25, 1, r25
+ bne r25, SYS_HALT_KEY_DEBOUNCE
+ mfpr r25, ev5__isr
+ srl r25, isr_v_hlt, r25
+ blbs r25, SYS_HALT_PIN
+
+ mtpr r11, dtb_cm // Restore Mbox current mode
+ mtpr r0, pt0
+// pvc_jsr updpcb, bsr=1
+// bsr r0, pal_update_pcb // update the pcb
+
+// lda r0, hlt_c_hw_halt(r31) // set halt code to hw halt
+ br r31, sys_enter_console // enter the console
+
+
+//+
+// sys_int_mchk_or_crd
+//
+// Current state:
+// Stack is pushed
+// ps, sp and gp are updated
+// r12
+// r13 - INTID (new EV5 IPL)
+// r14 - exc_addr
+// r25 - ISR
+// r16, r17, r18 - available
+//
+//-
+ ALIGN_BLOCK
+
+sys_int_mchk_or_crd:
+ srl r25, isr_v_mck, r12
+ lda r14, 7(zero)
+ blbs r12, sys_int_mchk
+ bsr r12, sys_int_mchk // on the XXM it's always a mchk
+
+
+
+// .sbttl "SYS_INT_MCHK - MCHK Interrupt code"
+//+
+// Machine check interrupt from the system. Setup and join the
+// regular machine check flow.
+// On exit:
+// pt0 - saved r0
+// pt1 - saved r1
+// pt4 - saved r4
+// pt5 - saved r5
+// pt6 - saved r6
+// pt10 - saved exc_addr
+// pt_misc<47:32> - mchk code
+// pt_misc<31:16> - scb vector
+// r14 - base of Cbox IPRs in IO space
+// MCES<mchk> is set
+//-
+ ALIGN_BLOCK
+sys_int_mchk:
+//
+// Common code to setup machine so we can join with
+// the code to log processor detected machine checks
+//
+// Input Registers:
+//
+// r14 - machine check code
+//
+ mfpr r12, exc_addr
+
+ mtpr r6, pt6
+ mtpr r12, pt10 // Stash exc_addr
+
+ bis r14, r31, r6 // save machine check code
+ sll r14, 32, r14 // Move mchk code to position
+
+ mfpr r12, pt_misc // Get MCES and scratch
+ mtpr r0, pt0 // Stash for scratch
+
+ zap r12, 0x3c, r12 // Clear scratch
+ blbs r12, sys_double_machine_check // MCHK halt if double machine check
+
+ or r12, r14, r12 // Combine mchk code
+ lda r14, scb_v_sysmchk(r31) // Get SCB vector
+
+ sll r14, 16, r14 // Move SCBv to position
+ or r12, r14, r14 // Combine SCBv
+
+ bis r14, 1<<mces_v_mchk, r14 // Set MCES<MCHK> bit
+ mtpr r14, pt_misc // Save mchk code!scbv!whami!mces
+
+ ldah r14, 0xfff0(r31)
+ mtpr r1, pt1 // Stash for scratch
+
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+ mtpr r4, pt4
+
+ mtpr r5, pt5
+
+ // Start to collect the IPRs. Common entry point for mchk flows.
+ //
+ // Current state:
+ // pt0 - saved r0
+ // pt1 - saved r1
+ // pt4 - saved r4
+ // pt5 - saved r5
+ // pt6 - saved r6
+ // pt10 - saved exc_addr
+ // pt_misc<47:32> - mchk code
+ // pt_misc<31:16> - scb vector
+ // r6 - saved machine check code
+ // r0, r1, r4, r5, r12, r13, r25 - available
+ // r8, r9, r10 - available as all loads are physical
+ // MCES<mchk> is set and machine check code is saved
+
+ ldah r14, 0xfff0(r31)
+ mtpr r1, pt1 // Stash for scratch - 30 instructions
+
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+
+ mb // MB before reading Scache IPRs
+ mfpr r1, icperr_stat
+
+ mfpr r8, dcperr_stat
+ mtpr r31, dc_flush // Flush the Dcache
+
+ mfpr r31, pt0 // Pad Mbox instructions from dc_flush
+ mfpr r31, pt0
+ nop
+ nop
+
+ ldqp r9, sc_addr(r14)// SC_ADDR IPR
+ bis r9, r31, r31 // Touch ld to make sure it completes before
+ // read of SC_STAT
+ ldqp r10, sc_stat(r14) // SC_STAT, also unlocks SC_ADDR
+
+ ldqp r12, ei_addr(r14)// EI_ADDR IPR
+ ldqp r13, bc_tag_addr(r14) // BC_TAG_ADDR IPR
+ ldqp r0, fill_syn(r14) // FILL_SYN IPR
+ // Touch lds to make sure they complete before reading EI_STAT
+ bis r12, r13, r31
+ bis r0, r0, r31
+ ldqp r25, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
+ ldqp r31, ei_stat(r14) // Read again to insure it is unlocked
+ br r31, sys_mchk_write_logout_frame // Join common machine check flow
+
+
+// .sbttl "SYS_INT_PERF_CNT - Performance counter interrupt code"
+//+
+//sys_int_perf_cnt
+//
+// A performance counter interrupt has been detected. The stack has been pushed.
+// IPL and PS are updated as well.
+//
+// on exit to interrupt entry point ENTINT::
+// a0 = osfint_c_perf
+// a1 = scb_v_perfmon (650)
+// a2 = 0 if performance counter 0 fired
+// a2 = 1 if performance counter 1 fired
+// a2 = 2 if performance counter 2 fired
+// (if more than one counter overflowed, an interrupt will be
+// generated for each counter that overflows)
+//
+//
+//-
+ ALIGN_BLOCK
+sys_int_perf_cnt: // Performance counter interrupt
+ lda r17, scb_v_perfmon(r31) // a1 to interrupt vector
+ mfpr r25, pt_entInt
+
+ lda r16, osfint_c_perf(r31) // a0 to perf counter code
+ mtpr r25, exc_addr
+
+ //isolate which perf ctr fired, load code in a2, and ack
+ mfpr r25, isr
+ or r31, r31, r18 // assume interrupt was pc0
+
+ srl r25, isr_v_pc1, r25 // isolate
+ cmovlbs r25, 1, r18 // if pc1 set, load 1 into r14
+
+ srl r25, 1, r25 // get pc2
+ cmovlbs r25, 2, r18 // if pc2 set, load 2 into r14
+
+ lda r25, 1(r31) // get a one
+ sll r25, r18, r25
+
+ sll r25, hwint_clr_v_pc0c, r25 // ack only the perf counter that generated the interrupt
+ mtpr r25, hwint_clr
+
+ hw_rei_spe
+
+
+
+// .sbttl "System specific RESET code"
+//+
+// RESET code
+// On entry:
+// r1 = pal_base +8
+//
+// Entry state on trap:
+// r0 = whami
+// r2 = base of scratch area
+// r3 = halt code
+// and the following 3 if init_cbox is enabled:
+// r5 = sc_ctl
+// r6 = bc_ctl
+// r7 = bc_cnfg
+//
+// Entry state on switch:
+// r17 - new PC
+// r18 - new PCBB
+// r19 - new VPTB
+//
+//-
+ ALIGN_BLOCK
+ .globl sys_reset
+sys_reset:
+ bis r1, r31, r22
+ bis r2, r31, r23
+ bis r3, r31, r24
+
+ lda r0, 0(zero) // The XXM has only one CPU
+ lda r2, 0x2000(zero) // KLUDGE the impure area address
+ lda r3, 0(zero) // Machine restart
+ mtpr r31, pt_misc // Zero out whami & swppal flag
+ mtpr r31, pt0 // halt code of "reset"
+ mtpr r31, pt1 // whami is always 0 on XXM
+
+/* Check to see if the transfer from the POST (Power-on-Self Test) code
+** is following a standard protocol and that the other input parameter
+** values may be trusted. Register a3 (r19) will contain a signature if so.
+**
+** Register values:
+**
+** t0 (r1) bcCtl value, saved into pt4
+** t1 (r2) bcCfg value
+** t2 (r3) bcCfgOff value (values for bcache off)
+**
+** s6 (r15) encoded srom.s RCS revision
+** a0 (r16) processor identification (a la SRM)
+** a1 (r17) size of contiguous, good memory in bytes
+** a2 (r18) cycle count in picoseconds
+** a3 (r19) signature (0xDECB) in <31:16> and system revision ID in <15:0>
+** a4 (r20) active processor mask
+** a5 (r21) system context value
+*/
+ lda r4, CNS_Q_IPR(r2) // Point to base of IPR area.
+ lda r4, pal_impure_common_size(r4) // Bias by PAL common size
+ SAVE_SHADOW(r22,CNS_Q_BC_CTL,r4) // Save shadow of bcCtl.
+ SAVE_SHADOW(r23,CNS_Q_BC_CFG,r4) // Save shadow of bcCfg.
+#ifdef undef
+ SAVE_SHADOW(r24,CNS_Q_BC_CFG_OFF,r4) // Save shadow of bcCfg.
+#endif
+ SAVE_SHADOW(r15,CNS_Q_SROM_REV,r4) // Save srom revision.
+ SAVE_SHADOW(r16,CNS_Q_PROC_ID,r4) // Save processor id.
+ SAVE_SHADOW(r17,CNS_Q_MEM_SIZE,r4) // Save memory size.
+ SAVE_SHADOW(r18,CNS_Q_CYCLE_CNT,r4) // Save cycle count.
+ SAVE_SHADOW(r19,CNS_Q_SIGNATURE,r4) // Save signature and sys rev.
+ SAVE_SHADOW(r20,CNS_Q_PROC_MASK,r4) // Save processor mask.
+ SAVE_SHADOW(r21,CNS_Q_SYSCTX,r4) // Save system context.
+
+//;;;;;; mtpr r31, ic_flush_ctl; do not flush the icache - done by hardware before SROM load
+
+/*
+ * Initialize the serial ports
+ *
+ * Baud rate: 9600 baud
+ * Word length: 8 bit characters
+ * Stop bits: 1 stop bit
+ * Parity: No parity
+ * Modem control: DTR, RTS active, OUT1 lov, UART interrupts enabled
+ */
+
+/* Initialize COM1*/
+ OutPortByte(COM1_LCR,LCR_M_DLAB,r12,r14)// Access clock divisor latch.
+ OutPortByte(COM1_DLL,DLA_K_BRG,r12,r14) // Set the baud rate.
+ OutPortByte(COM1_DLH,0,r12,r14)
+ OutPortByte(COM1_LCR,LCR_K_INIT,r12,r14)// Set line control register.
+ OutPortByte(COM1_MCR,MCR_K_INIT,r12,r14)// Set modem control register.
+ OutPortByte(COM1_IER,0x0f,r12,r14) // Turn on interrupts.
+
+/* Flush COM1's receive buffer*/
+#ifndef SIMOS
+Sys_ResetFlushCom1:
+ InPortByte(COM1_LSR,r12,r14) // Read the line status.
+ blbc t0, Sys_ResetCom1Done // Are we done yet?
+ InPortByte(COM1_RBR,r12,r14) // Read receive buffer reg.
+ br zero, Sys_ResetFlushCom1 // Loop till done.
+
+Sys_ResetCom1Done:
+#endif
+ mb
+
+ mtpr r31, itb_ia // clear the ITB
+ mtpr r31, dtb_ia // clear the DTB
+
+//;;;;;; lda r1, 0x1FFF(R31)
+//;;;;;; mtpr r1, dc_test_ctl ; initialize tag index to all 1's.
+
+ /* Hardwire the base address of the PALcode */
+ lda r1, 0x4000(r31) // point to start of code
+ mtpr r1, pal_base // initialize PAL_BASE
+
+ // Interrupts
+ mtpr r31, astrr // stop ASTs
+ mtpr r31, aster // stop ASTs
+ mtpr r31, sirr // clear software interrupts
+
+ // enable shadow registers, floating point, kseg addressing
+ ldah r1, ((1<<(icsr_v_sde-16)) | (1<<(icsr_v_fpe-16)) | (2<<(icsr_v_spe-16)))(r31)
+ mtpr r1, icsr
+
+ // Mbox/Dcache init
+ lda r1, (1<<(mcsr_v_sp1))(r31)
+ mtpr r1, mcsr // MCSR - Super page enabled
+ lda r1, 1<<dc_mode_v_dc_ena(r31)
+ bsr r31, 1f
+
+ ALIGN_BRANCH
+1: mtpr r1, dc_mode // turn Dcache on
+ nop
+ STALL // No Mbox instr in 1,2,3,4
+ STALL
+ STALL
+ STALL
+ mtpr r31, dc_flush // flush Dcache
+
+ // build PS (IPL=7,CM=K,VMM=0,SW=0)
+ lda r11, 0x7(r31) // Shadow copy of PS - kern mode, IPL=7
+ lda r1, 0x1e(r31)
+
+ mtpr r1, ev5__ipl // set internal <ipl>
+ mtpr r31, ips // set new ps<cm>=0, Ibox copy
+ mtpr r31, dtb_cm // set new ps<cm>=0, Mbox copy
+
+ // Create the PALtemp pt_intmask -
+ // MAP:
+ // OSF IPL EV5 internal IPL(hex) note
+ // 0 0
+ // 1 1
+ // 2 2
+ // 3 14 device
+ // 4 15 device
+ // 5 16 device
+ // 6 1e device
+ // 7 1e make sure we can take mchks at ipl 31
+
+ ldah r1, 0x1e1e(r31) // Create upper lw of int_mask
+ lda r1, 0x1615(r1)
+
+ sll r1, 32, r1
+ ldah r1, 0x1402(r1) // Create lower lw of int_mask
+
+ lda r1, 0x0100(r1)
+ mtpr r1, pt_intmask // Stash in PALtemp
+
+ // Unlock a bunch of chip internal IPRs
+ mtpr r31, exc_sum // clear exeception summary and exc_mask
+ mfpr r31, va // unlock va, mmstat
+ lda r8, ((1<<icperr_stat_v_dpe) | (1<<icperr_stat_v_tpe) | (1<<icperr_stat_v_tmr))(r31)
+ mtpr r8, icperr_stat // Clear Icache parity error & timeout
+ lda r8, ((1<<dcperr_stat_v_lock) | (1<<dcperr_stat_v_seo))(r31)
+ mtpr r8, dcperr_stat // Clear Dcache parity error status
+
+ rc r0 // clear intr_flag
+ mtpr r31, pt_trap
+
+ mfpr r0, pt_misc
+ srl r0, pt_misc_v_switch, r1
+ blbs r1, sys_reset_switch // see if we got here from swppal
+
+ // Rest of the "real" reset flow
+ // ASN
+ mtpr r31, dtb_asn
+ mtpr r31, itb_asn
+
+ lda r1, 0x1FFF(r31)
+ mtpr r1, dc_test_ctl // initialize tag index to all 1's.
+
+ lda r1, 0x67(r31)
+ sll r1, hwint_clr_v_pc0c, r1
+ mtpr r1, hwint_clr // Clear hardware interrupt requests
+
+ lda r1, 1<<mces_v_dpc(r31) // 1 in disable processor correctable error
+ mfpr r0, pt1 // get whami
+ insbl r0, 1, r0 // isolate whami in correct pt_misc position
+ or r0, r1, r1 // combine whami and mces
+ mtpr r1, pt_misc // store whami and mces, swap bit clear
+
+ // CPU specific impure pointer
+ extbl r0, 1, r0 //get whami again
+#if eb164 != 0
+// compile error .....
+//
+// mulq r0, ((pal_impure_specific_size+mchk_size+mchk_crd_size)/8), r0 ; whami * per_node size/8
+ lda r1, ((pal_impure_specific_size+mchk_size+mchk_crd_size)/8)(r31)
+ mulq r0, r1, r0 //
+#endif
+
+ addq r0, pal_impure_common_size/8, r0// add common area
+ sll r0, 3, r0 // * 8
+ addq r2, r0, r2 // addr our impure area offset
+ mtpr r2, pt_impure
+
+ zapnot r3, 1, r0 // isolate halt code
+ mtpr r0, pt0 // save entry type
+
+ // Cycle counter
+ or r31, 1, r9 // get a one
+ sll r9, 32, r9 // shift to <32>
+ mtpr r31, cc // clear Cycle Counter
+ mtpr r9, cc_ctl // clear and enable the Cycle Counter
+ mtpr r31, pt_scc // clear System Cycle Counter
+
+
+ // Misc PALtemps
+ mtpr r31, maf_mode // no mbox instructions for 3 cycles
+ or r31, 1, r1 // get bogus scbb value
+ mtpr r1, pt_scbb // load scbb
+ mtpr r31, pt_prbr // clear out prbr
+ mfpr r1, pal_base
+ lda r1, (kludge_initial_pcbb-0x4000)(r1) // get address for temp pcbb
+
+ mtpr r1, pt_pcbb // load pcbb
+ lda r1, 2(r31) // get a two
+ sll r1, 32, r1 // gen up upper bits
+ mtpr r1, mvptbr
+ mtpr r1, ivptbr
+ mtpr r31, pt_ptbr
+ // Performance counters
+ mtpr r31, pmctr
+
+ // Clear pmctr_ctl in impure area
+ mfpr r1, pt_impure
+ stqp r31, CNS_Q_PM_CTL(r1)
+
+ ldah r14, 0xfff0(r31)
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+
+#ifndef SIMOS
+ ldqp r31, sc_stat(r14) // Clear sc_stat and sc_addr
+ ldqp r31, ei_stat(r14)
+ ldqp r31, ei_stat(r14) // Clear ei_stat, ei_addr, bc_tag_addr, fill_syn
+#endif
+ mfpr r13, pt_impure
+ stqpc r31, 0(r13) // Clear lock_flag
+
+ lda r0, 0(zero)
+ mfpr r1, pt_impure
+ bsr r3, pal_save_state
+
+ mfpr r0, pt0 // get entry type
+ br r31, sys_enter_console // enter the cosole
+
+
+ // swppal entry
+ // r0 - pt_misc
+ // r17 - new PC
+ // r18 - new PCBB
+ // r19 - new VPTB
+sys_reset_switch:
+ or r31, 1, r9
+ sll r9, pt_misc_v_switch, r9
+ bic r0, r9, r0 // clear switch bit
+ mtpr r0, pt_misc
+
+ rpcc r1 // get cyccounter
+
+ ldqp r22, osfpcb_q_fen(r18) // get new fen/pme
+ ldlp r23, osfpcb_l_cc(r18) // get cycle counter
+ ldlp r24, osfpcb_l_asn(r18) // get new asn
+
+ ldqp r25, osfpcb_q_Mmptr(r18)// get new mmptr
+ sll r25, page_offset_size_bits, r25 // convert pfn to pa
+ mtpr r25, pt_ptbr // load the new mmptr
+ mtpr r18, pt_pcbb // set new pcbb
+
+ bic r17, 3, r17 // clean use pc
+ mtpr r17, exc_addr // set new pc
+ mtpr r19, mvptbr
+ mtpr r19, ivptbr
+
+ ldqp r30, osfpcb_q_Usp(r18) // get new usp
+ mtpr r30, pt_usp // save usp
+
+ sll r24, dtb_asn_v_asn, r8
+ mtpr r8, dtb_asn
+ sll r24, itb_asn_v_asn, r24
+ mtpr r24, itb_asn
+
+ mfpr r25, icsr // get current icsr
+ lda r24, 1(r31)
+ sll r24, icsr_v_fpe, r24 // 1 in icsr<fpe> position
+ bic r25, r24, r25 // clean out old fpe
+ and r22, 1, r22 // isolate new fen bit
+ sll r22, icsr_v_fpe, r22
+ or r22, r25, r25 // or in new fpe
+ mtpr r25, icsr // update ibox ipr
+
+ subl r23, r1, r1 // gen new cc offset
+ insll r1, 4, r1 // << 32
+ mtpr r1, cc // set new offset
+
+ or r31, r31, r0 // set success
+ ldqp r30, osfpcb_q_Ksp(r18) // get new ksp
+ mfpr r31, pt0 // stall
+ hw_rei_stall
+
+// .sbttl "SYS_MACHINE_CHECK - Machine check PAL"
+ ALIGN_BLOCK
+//+
+//sys_machine_check
+// A machine_check trap has occurred. The Icache has been flushed.
+//
+//-
+ .globl sys_machine_check
+sys_machine_check:
+ DEBUGSTORE(052)
+ DEBUGSTORE(052)
+ DEBUGSTORE(052)
+ // Need to fill up the refill buffer (32 instructions) and
+ // then flush the Icache again.
+ // Also, due to possible 2nd Cbox register file write for
+ // uncorrectable errors, no register file read or write for 7 cycles.
+
+ nop
+ mtpr r0, pt0 // Stash for scratch -- OK if Cbox overwrites r0 later
+
+ nop
+ nop
+
+ nop
+ nop
+
+ nop
+ nop
+
+ nop
+ nop
+ // 10 instructions; 5 cycles
+
+ nop
+ nop
+
+ nop
+ nop
+
+ // Register file can now be written
+ lda r0, scb_v_procmchk(r31) // SCB vector
+ mfpr r13, pt_mces // Get MCES
+ sll r0, 16, r0 // Move SCBv to correct position
+ bis r13, 1<<mces_v_mchk, r14 // Set MCES<MCHK> bit
+
+ zap r14, 0x3C, r14 // Clear mchk_code word and SCBv word
+ mtpr r14, pt_mces // 20 instructions
+
+ nop
+ or r14, r0, r14 // Insert new SCB vector
+ lda r0, mchk_c_proc_hrd_error(r31) // MCHK code
+ mfpr r12, exc_addr
+
+ sll r0, 32, r0 // Move MCHK code to correct position
+ mtpr r4, pt4
+ or r14, r0, r14 // Insert new MCHK code
+ mtpr r14, pt_misc // Store updated MCES, MCHK code, and SCBv
+
+ ldah r14, 0xfff0(r31)
+ mtpr r1, pt1 // Stash for scratch - 30 instructions
+
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+ mtpr r12, pt10 // Stash exc_addr
+
+ mtpr r31, ic_flush_ctl // Second Icache flush, now it is really flushed.
+// blbs r13, sys_double_machine_check ; MCHK halt if double machine check
+ blbs r12, sys_machine_check_while_in_pal // MCHK halt if machine check in pal
+
+ mtpr r6, pt6
+ mtpr r5, pt5
+
+
+ //+
+ // Start to collect the IPRs. Common entry point for mchk flows.
+ //
+ // Current state:
+ // pt0 - saved r0
+ // pt1 - saved r1
+ // pt4 - saved r4
+ // pt5 - saved r5
+ // pt6 - saved r6
+ // pt10 - saved exc_addr
+ // pt_misc<47:32> - mchk code
+ // pt_misc<31:16> - scb vector
+ // r14 - base of Cbox IPRs in IO space
+ // r0, r1, r4, r5, r6, r12, r13, r25 - available
+ // r8, r9, r10 - available as all loads are physical
+ // MCES<mchk> is set and machine check code is saved
+ //
+ //-
+
+ ALIGN_BRANCH
+ .globl sys_mchk_collect_iprs
+sys_mchk_collect_iprs:
+
+
+ mb // MB before reading Scache IPRs
+ mfpr r1, icperr_stat
+
+ mfpr r8, dcperr_stat
+ mtpr r31, dc_flush // Flush the Dcache
+
+ mfpr r31, pt0 // Pad Mbox instructions from dc_flush
+ mfpr r31, pt0
+ nop
+ nop
+
+ ldqp r9, sc_addr(r14) // SC_ADDR IPR
+ bis r9, r31, r31 // Touch ld to make sure it completes before
+ // read of SC_STAT
+ ldqp r10, sc_stat(r14) // SC_STAT, also unlocks SC_ADDR
+
+ ldqp r12, ei_addr(r14) // EI_ADDR IPR
+ ldqp r13, bc_tag_addr(r14) // BC_TAG_ADDR IPR
+ ldqp r0, fill_syn(r14) // FILL_SYN IPR
+ // Touch lds to make sure they complete before reading EI_STAT
+ bis r12, r13, r31
+ // wait for r0 ldqp to complete
+ bis r0, r0, r31
+ ldqp r25, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
+ ldqp r31, ei_stat(r14) // Read again to insure it is unlocked
+ mfpr r6, pt_misc
+ extwl r6, 4, r6 // Fetch mchk code
+ br r31, sys_mchk_write_logout_frame //
+
+
+
+ //+
+ // Write the logout frame
+ //
+ // Current state:
+ // r0 - fill_syn
+ // r1 - icperr_stat
+ // r4 - available
+ // r5<0> - retry flag
+ // r6 - mchk code
+ // r8 - dcperr_stat
+ // r9 - sc_addr
+ // r10 - sc_stat
+ // r12 - ei_addr
+ // r13 - bc_tag_addr
+ // r14 - available
+ // r25 - ei_stat (shifted)
+ // pt0 - saved r0
+ // pt1 - saved r1
+ // pt4 - saved r4
+ // pt5 - saved r5
+ // pt6 - saved r6
+ // pt10 - saved exc_addr
+ //
+ //-
+ ALIGN_BRANCH
+sys_mchk_write_logout_frame:
+ //------------------------------------------------------------------------------------
+ // EB164 specific code ....
+ //
+ // R14 - uncorrectable error logout frame address = 6000h + size of CRD frame
+ //
+ //------------------------------------------------------------------------------------
+#if eb164 != 0
+
+
+ lda r14,PAL_LOGOUT_BASE(r31) // get the start of logout frame location
+ lda r14,mchk_mchk_base(r14) // add in the size of the CRD frame
+#endif
+
+ // Write the first 2 quadwords of the logout area:
+
+ sll r5, 63, r5 // Move retry flag to bit 63
+ lda r4, mchk_size(r5) // Combine retry flag and frame size
+ stqp r4, mchk_flag(r14) // store flag/frame size
+ lda r4, mchk_sys_base(r31) // sys offset
+ sll r4, 32, r4
+ lda r4, mchk_cpu_base(r4) // cpu offset
+ stqp r4, mchk_offsets(r14) // store sys offset/cpu offset into logout frame
+
+ //+
+ // Write the mchk code to the logout area
+ // Write error IPRs already fetched to the logout area
+ // Restore some GPRs from PALtemps
+ //-
+
+ mfpr r5, pt5
+ stqp r6, mchk_mchk_code(r14)
+ mfpr r4, pt4
+ stqp r1, mchk_ic_perr_stat(r14)
+ mfpr r6, pt6
+ stqp r8, mchk_dc_perr_stat(r14)
+ mfpr r1, pt1
+ stqp r9, mchk_sc_addr(r14)
+ stqp r10, mchk_sc_stat(r14)
+ stqp r12, mchk_ei_addr(r14)
+ stqp r13, mchk_bc_tag_addr(r14)
+ stqp r0, mchk_fill_syn(r14)
+ mfpr r0, pt0
+#define ei_stat_v_bc_tperr 28
+ sll r25, ei_stat_v_bc_tperr, r25 // Move EI_STAT status bits back to expected position
+ //
+ // Steve Shirron ei_stat.chip_id bits fix
+ //
+ // r25 has the ei_stat<35:28> bits aligned to bit 0.
+ // Because this alignment trashed the ei_stat.chip_id<27:24> bits,
+ // Steve's fix re-reads the ei_stat to recover the chip_id bits before
+ // writing the ei_stat to the log.
+ //
+ ldah r13, 0xfff0(r31) // r13 <- pointer to cbox
+ zapnot r13, 0x1f, r13 // :
+ ldqp r13, ei_stat(r13) // r13 <- contents of ei_stat register
+ sll r13, 64-ei_stat_v_bc_tperr, r13 // clear bits <63:36>
+ srl r13, 64-ei_stat_v_bc_tperr, r13 // put ei_stat bits back
+ or r25, r13, r25 // or-in the stat bits from the original read
+ stqp r25, mchk_ei_stat(r14) // write to the log
+
+ mfpr r25, pt10
+ stqp r25, mchk_exc_addr(r14)
+
+ // complete the CPU-specific part of the logout frame
+
+#define mchk_logout(regName, regOff) \
+ mfpr r25, regName ; \
+ stqp r25, CNS_Q_/**/regOff(r14)
+
+ mchk_logout(mm_stat, MM_STAT)
+ mchk_logout(va, VA) // Unlocks VA and MM_STAT
+ mchk_logout(isr, ISR)
+ mchk_logout(icsr, ICSR)
+ mchk_logout(pal_base, PAL_BASE)
+ mchk_logout(exc_mask, EXC_MASK)
+ mchk_logout(exc_sum, EXC_SUM)
+
+ ldah r13, 0xfff0(r31)
+ zap r13, 0xE0, r13 // Get Cbox IPR base
+ ldqp r13, ld_lock(r13) // Get ld_lock IPR
+ stqp r13, mchk_ld_lock(r14) // and stash it in the frame
+
+ //+
+ // complete the PAL-specific part of the logout frame
+ //-
+#define svpt(n)\
+ mfpr r25, pt/**/n ;\
+ stqp r25, CNS_Q_PT+(8*n)(r14)
+
+ svpt(0)
+ svpt(1)
+ svpt(2)
+ svpt(3)
+ svpt(4)
+ svpt(5)
+ svpt(6)
+ svpt(7)
+ svpt(8)
+ svpt(9)
+ svpt(10)
+ svpt(11)
+ svpt(12)
+ svpt(13)
+ svpt(14)
+ svpt(15)
+ svpt(16)
+ svpt(17)
+ svpt(18)
+ svpt(19)
+ svpt(20)
+ svpt(21)
+ svpt(22)
+ svpt(23)
+
+ //+
+ // Log system specific info here
+ //-
+
+ // Unlock IPRs
+ lda r8, ((1<<dcperr_stat_v_lock) | (1<<dcperr_stat_v_seo))(r31)
+ mtpr r8, dcperr_stat // Clear Dcache parity error status
+
+ lda r8, ((1<<icperr_stat_v_dpe) | (1<<icperr_stat_v_tpe) | (1<<icperr_stat_v_tmr))(r31)
+ mtpr r8, icperr_stat // Clear Icache parity error & timeout status
+
+// pvc_jsr armc, bsr=1
+ bsr r12, sys_arith_and_mchk // go check for and deal with arith trap
+
+ mtpr r31, exc_sum // Clear Exception Summary
+
+ mfpr r25, pt10 // write exc_addr after arith_and_mchk to pickup new pc
+ stqp r25, mchk_exc_addr(r14)
+
+ //+
+ // Set up the km trap
+ //-
+
+sys_post_mchk_trap:
+ mfpr r25, pt_misc // Check for flag from mchk interrupt
+ extwl r25, 4, r25
+ blbs r25, sys_mchk_stack_done // Stack from already pushed if from interrupt flow
+
+ bis r14, r31, r12 // stash pointer to logout area
+ mfpr r14, pt10 // get exc_addr
+
+ sll r11, 63-3, r25 // get mode to msb
+ bge r25, sys_post_mchk_trap_30_
+
+ mtpr r31, dtb_cm
+ mtpr r31, ips
+
+ mtpr r30, pt_usp // save user stack
+ mfpr r30, pt_ksp
+
+sys_post_mchk_trap_30_:
+ lda sp, 0-osfsf_c_size(sp) // allocate stack space
+ nop
+
+ stq r18, osfsf_a2(sp) // a2
+ stq r11, osfsf_ps(sp) // save ps
+
+ stq r14, osfsf_pc(sp) // save pc
+ mfpr r25, pt_entInt // get the VA of the interrupt routine
+
+ stq r16, osfsf_a0(sp) // a0
+ lda r16, osfint_c_mchk(r31) // flag as mchk in a0
+
+ stq r17, osfsf_a1(sp) // a1
+ mfpr r17, pt_misc // get vector
+
+ stq r29, osfsf_gp(sp) // old gp
+ mtpr r25, exc_addr //
+
+ or r31, 7, r11 // get new ps (km, high ipl)
+ subq r31, 1, r18 // get a -1
+
+ extwl r17, 2, r17 // a1 <- interrupt vector
+ bis r31, ipl_machine_check, r25
+
+ mtpr r25, ev5__ipl // Set internal ipl
+ srl r18, 42, r18 // shift off low bits of kseg addr
+
+ sll r18, 42, r18 // shift back into position
+ mfpr r29, pt_kgp // get the kern r29
+
+ or r12, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
+
+ hw_rei_spe // out to interrupt dispatch routine
+
+
+ //+
+ // The stack is pushed. Load up a0,a1,a2 and vector via entInt
+ //
+ //-
+ ALIGN_BRANCH
+sys_mchk_stack_done:
+ lda r16, osfint_c_mchk(r31) // flag as mchk/crd in a0
+ lda r17, scb_v_sysmchk(r31) // a1 <- interrupt vector
+
+ subq r31, 1, r18 // get a -1
+ mfpr r25, pt_entInt
+
+ srl r18, 42, r18 // shift off low bits of kseg addr
+ mtpr r25, exc_addr // load interrupt vector
+
+ sll r18, 42, r18 // shift back into position
+ or r14, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
+
+ hw_rei_spe // done
+
+
+
+
+//sys_double_machine_check - a machine check was started, but MCES<MCHK> was
+// already set. We will now double machine check halt.
+//
+// pt0 - old R0
+//
+ ALIGN_BLOCK
+ .globl sys_double_machine_check
+sys_double_machine_check:
+
+// pvc_jsr updpcb, bsr=1
+// bsr r0, pal_update_pcb // update the pcb
+ lda r0, hlt_c_dbl_mchk(r31)
+ br r31, sys_enter_console
+
+//sys_machine_check_while_in_pal - a machine check was started, exc_addr points to
+// a PAL PC. We will now machine check halt.
+//
+// pt0 - old R0
+//
+sys_machine_check_while_in_pal:
+
+// pvc_jsr updpcb, bsr=1
+// bsr r0, pal_update_pcb // update the pcb
+ lda r0, hlt_c_mchk_from_pal(r31)
+ br r31, sys_enter_console
+
+//ARITH and MCHK
+// Check for arithmetic errors and build trap frame,
+// but don't post the trap.
+// on entry:
+// pt10 - exc_addr
+// r12 - return address
+// r14 - logout frame pointer
+// r13 - available
+// r8,r9,r10 - available except across stq's
+// pt0,1,6 - available
+//
+// on exit:
+// pt10 - new exc_addr
+// r17 = exc_mask
+// r16 = exc_sum
+// r14 - logout frame pointer
+//
+ ALIGN_BRANCH
+sys_arith_and_mchk:
+ mfpr r13, ev5__exc_sum
+ srl r13, exc_sum_v_swc, r13
+ bne r13, handle_arith_and_mchk
+
+// pvc_jsr armc, bsr=1, dest=1
+ ret r31, (r12) // return if no outstanding arithmetic error
+
+handle_arith_and_mchk:
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r14, pt0
+
+ mtpr r1, pt1 // get a scratch reg
+ and r11, osfps_m_mode, r1 // get mode bit
+
+ bis r11, r31, r25 // save ps
+ beq r1, handle_arith_and_mchk_10_ // if zero we are in kern now
+
+ bis r31, r31, r25 // set the new ps
+ mtpr r30, pt_usp // save user stack
+
+ mfpr r30, pt_ksp // get kern stack
+handle_arith_and_mchk_10_:
+ mfpr r14, exc_addr // get pc into r14 in case stack writes fault
+
+ lda sp, 0-osfsf_c_size(sp) // allocate stack space
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r1, pt_entArith
+ stq r14, osfsf_pc(sp) // save pc
+
+ stq r17, osfsf_a1(sp)
+ mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
+
+ stq r29, osfsf_gp(sp)
+ stq r16, osfsf_a0(sp) // save regs
+
+ bis r13, r31, r16 // move exc_sum to r16
+ stq r18, osfsf_a2(sp)
+
+ stq r11, osfsf_ps(sp) // save ps
+ mfpr r29, pt_kgp // get the kern gp
+
+ mfpr r14, pt0 // restore logout frame pointer from pt0
+ bis r25, r31, r11 // set new ps
+
+ mtpr r1, pt10 // Set new PC
+ mfpr r1, pt1
+
+// pvc_jsr armc, bsr=1, dest=1
+ ret r31, (r12) // return if no outstanding arithmetic error
+
+
+
+
+
+// .sbttl "SYS_ENTER_CONSOLE - Common PALcode for ENTERING console"
+
+// SYS_enter_console
+//
+// Entry:
+// Entered when PAL wants to enter the console.
+// usually as the result of a HALT instruction or button,
+// or catastrophic error.
+//
+// Regs on entry...
+//
+// R0 = halt code
+// pt0 <- r0
+//
+// Function:
+//
+// Save all readable machine state, and "call" the console
+//
+// Returns:
+//
+//
+// Notes:
+//
+// In these routines, once the save state routine has been executed,
+// the remainder of the registers become scratchable, as the only
+// "valid" copy of them is the "saved" copy.
+//
+// Any registers or PTs that are modified before calling the save
+// routine will have there data lost. The code below will save all
+// state, but will loose pt 0,4,5.
+//
+//-
+#define KSEG 0xfffffc0000000000
+ ALIGN_BLOCK
+ .globl sys_enter_console
+sys_enter_console:
+
+ DEBUGSTORE(0x7a)
+ DEBUG_EXC_ADDR()
+ DEBUGSTORE(13)
+ DEBUGSTORE(10)
+ mtpr r1, pt4
+ mtpr r3, pt5
+ STALL
+ STALL
+ mfpr r1, pt_impure
+ bsr r3, pal_save_state
+
+ // build PS (IPL=7,CM=K,VMM=0,SW=0)
+ lda r11, 0x7(r31) // Shadow copy of PS - kern mode, IPL=7
+ lda r1, 0x1e(r31)
+ mtpr r1, ev5__ipl // set internal <ipl>
+
+ /* kernel sp: KSEG + 0xffe000 */
+ subq r31, 1, sp
+ sll sp, 42, sp
+ lda r3, 0xffe(zero)
+ sll r3, 12, r3
+ addq r3, sp, sp
+ mtpr sp, ptKsp // Update the saved KSP value.
+ mtpr zero, ips // Set mode to kernel, IPL = 0.
+ mtpr zero, dtb_ia // Flush the DTB
+ mtpr zero, itb_ia // Flush the ITB
+ mtpr zero, astrr // Clear all ASTs ...
+ mtpr zero, aster
+ mtpr zero, sirr // Clear all software interrupts.
+// lda r3, pal_enter_console_ptr(r31) //find stored vector
+// ldqp r1, 0(r3)
+ /* put the KSEG address in the top, then add 0x10000 to it */
+ subq r31, 1, r1
+ sll r1, 42, r1
+
+ ldah r1, 1(r1)
+ mtpr r1, exc_addr
+ mfpr r1, pt4
+ mfpr r3, pt5
+ STALL
+ STALL
+ hw_rei_stall
+
+ .align 7
+kludge_initial_pcbb: // PCB is 128 bytes long
+ .quad 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+
+ .globl debugstore
+debugstore:
+ stq_p r16,0(zero)
+ stq_p r17,8(zero)
+ lda r16, 0x400(zero)
+ sll r16, 29, r16
+ ldah r16, 0x280(r16)
+9: lda r17, 0x140(r16)
+ ldl_p r17, 0(r17)
+ srl r17, 16, r17
+ and r17, 0x20, r17
+ beq r17, 9b
+ stl_p r13, 0(r16)
+ mb
+ ldq_p r17, 8(zero)
+ ldq_p r16, 0(zero)
+ jmp r31, (r25)
+
+//
+ .globl put_exc_addr
+put_exc_addr:
+ stq_p r16,0(zero)
+ stq_p r17,8(zero)
+ stq_p r18,16(zero)
+ stq_p r19,24(zero)
+
+ mfpr r16, exc_addr
+ bis r31, 64-4, r17 // shift count for quadword
+
+1: lda r18, 0x400(zero) // Wait for UART ready
+ sll r18, 29, r18
+ ldah r18, 0x280(r18)
+9: lda r19, 0x140(r18)
+ ldl_p r19, 0(r19)
+ srl r19, 16, r19
+ and r19, 0x20, r19
+ beq r19, 9b
+
+ srl r16, r17, r19 // grab a nibble
+ and r19, 0x0f, r19
+ addq r19, 0x30, r19 // make ascii
+ cmple r19, 0x39, r18
+ bne r18, 0f
+ addq r19, 0x27, r19 // 0x07 gets A-F
+
+0: lda r18, 0x400(zero) // Wait for UART ready
+ sll r18, 29, r18
+ ldah r18, 0x280(r18)
+
+ stl_p r19, 0(r18)
+ mb
+
+ subq r17, 4, r17 // do all nibbles
+ bge r17, 1b
+
+ ldq_p r16,0(zero)
+ ldq_p r17,8(zero)
+ ldq_p r18,16(zero)
+ ldq_p r19,24(zero)
+
+ ret r31, (r25)
+
+ // print out r13
+ .globl put_hex
+put_hex:
+ stq_p r16,0(zero)
+ stq_p r17,8(zero)
+ stq_p r18,16(zero)
+ stq_p r19,24(zero)
+
+ bis r13, zero, r16
+ bis r31, 64-4, r17 // shift count for quadword
+
+1: lda r18, 0x400(zero) // Wait for UART ready
+ sll r18, 29, r18
+ ldah r18, 0x280(r18)
+9: lda r19, 0x140(r18)
+ ldl_p r19, 0(r19)
+ srl r19, 16, r19
+ and r19, 0x20, r19
+ beq r19, 9b
+
+ srl r16, r17, r19 // grab a nibble
+ and r19, 0x0f, r19
+ addq r19, 0x30, r19 // make ascii
+ cmple r19, 0x39, r18
+ bne r18, 0f
+ addq r19, 0x27, r19 // 0x07 gets A-F
+
+0: lda r18, 0x400(zero) // Wait for UART ready
+ sll r18, 29, r18
+ ldah r18, 0x280(r18)
+
+ stl_p r19, 0(r18)
+ mb
+
+ subq r17, 4, r17 // do all nibbles
+ bge r17, 1b
+
+ ldq_p r16,0(zero)
+ ldq_p r17,8(zero)
+ ldq_p r18,16(zero)
+ ldq_p r19,24(zero)
+
+ ret r31, (r25)
+
+
+/*
+**
+** FUNCTIONAL DESCRIPTION:
+**
+** Transfer control to the specified address, passed in
+** register a0, in PAL mode.
+**
+** INPUT PARAMETERS:
+**
+** a0 (r16) - Transfer address
+**
+** OUTPUT PARAMETERS:
+**
+** DECchip 21064 specific parameters:
+**
+** t0 (r1) - bcCtl
+** t1 (r2) - bcCfg
+**
+** Firmware specific parameters:
+**
+** s6 (r15) - Encoded srom.s RCS revision
+** a0 (r16) - Processor identification (a la SRM)
+** a1 (r17) - Size of good memory in bytes
+** a2 (r18) - Cycle count in picoseconds
+** a3 (r19) - Protocol signature and system revision
+** a4 (r20) - Active processor mask
+** a5 (r21) - System Context value
+**
+** SIDE EFFECTS:
+**
+*/
+ ALIGN_BRANCH
+
+Sys_Cserve_Jtopal:
+ bic a0, 3, t8 // Clear out low 2 bits of address
+ bis t8, 1, t8 // Or in PAL mode bit
+
+ mfpr t9, ptImpure // Get base of impure scratch area.
+ lda t9, CNS_Q_IPR(t9) // Point to start of IPR area.
+
+ RESTORE_SHADOW(a3,CNS_Q_SIGNATURE,t9) // Get signature.
+ srl a3, 16, t0 // Shift signature into lower word.
+
+ LDLI(t10,0xDECB) // Load the expected valid signature.
+
+ cmpeq t0, t10, t0 // Check if saved signature was valid.
+ blbc t0, 1f // If invalid, pass nothing.
+/*
+** Load the processor specific parameters ...
+*/
+ RESTORE_SHADOW(t0,CNS_Q_BC_CTL,t9) // Get bcCtl.
+ RESTORE_SHADOW(t1,CNS_Q_BC_CFG,t9) // Get bcCfg.
+/* RESTORE_SHADOW(t2,CNS_Q_BC_CFG_OFF,t9) // Get bcCfg.*/
+/*
+** Load the firmware specific parameters ...
+*/
+ RESTORE_SHADOW(s6,CNS_Q_SROM_REV,t9) // Get srom revision.
+ RESTORE_SHADOW(a0,CNS_Q_PROC_ID,t9) // Get processor id.
+ RESTORE_SHADOW(a1,CNS_Q_MEM_SIZE,t9) // Get memory size.
+ RESTORE_SHADOW(a2,CNS_Q_CYCLE_CNT,t9) // Get cycle count.
+ RESTORE_SHADOW(a4,CNS_Q_PROC_MASK,t9) // Get processor mask.
+ RESTORE_SHADOW(a5,CNS_Q_SYSCTX,t9) // Get system context.
+
+ STALL
+ STALL
+
+1: mtpr zero, ptWhami // Clear WHAMI and swap flag.
+ mtpr t8, excAddr // Load the dispatch address.
+ br zero, 2f
+
+ ALIGN_BLOCK
+
+2: NOP
+ mtpr zero, icFlush // Flush the icache.
+ NOP
+ NOP
+
+ NOP // Required NOPs ... 1-10
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+
+ NOP // Required NOPs ... 11-20
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+
+ NOP // Required NOPs ... 21-30
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+
+ NOP // Required NOPs ... 31-40
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+
+
+
+ NOP // Required NOPs ... 41-44
+ NOP
+ NOP
+ NOP
+
+ hw_rei // Dispatch in PAL mode ...
diff --git a/system/alpha/palcode/platform_tlaser.s b/system/alpha/palcode/platform_tlaser.s
new file mode 100644
index 000000000..000ed9f38
--- /dev/null
+++ b/system/alpha/palcode/platform_tlaser.s
@@ -0,0 +1,2814 @@
+// build_fixed_image: not sure what means
+// real_mm to be replaced during rewrite
+// remove_save_state remove_restore_state can be remooved to save space ??
+#define egore 0
+#define acore 0
+#define beh_model 0
+#define ev5_p2 1
+#define ev5_p1 0
+#define ldvpte_bug_fix 1
+#define spe_fix 0
+#define osf_chm_fix 0
+#define build_fixed_image 0
+#define enable_p4_fixups 0
+#define osf_svmin 1
+#define enable_physical_console 0
+#define fill_err_hack 0
+#define icflush_on_tbix 0
+#define max_cpuid 1
+#define perfmon_debug 0
+#define rax_mode 0
+
+#define hw_rei_spe hw_rei
+
+#include "ev5_defs.h"
+#include "ev5_impure.h"
+#include "ev5_alpha_defs.h"
+#include "ev5_paldef.h"
+#include "ev5_osfalpha_defs.h"
+#include "fromHudsonMacros.h"
+#include "fromHudsonOsf.h"
+#include "dc21164FromGasSources.h"
+#include "cserve.h"
+#include "simos.h"
+
+
+#define ldlp ldl_p
+#define ldqp ldq_p
+
+#define stlp stl_p
+#define stqp stq_p
+#define stqpc stqp
+
+#ifdef SIMOS
+#define ldqpl ldq_p
+#define sdqpl sdq_p
+#else
+<--bomb>
+#endif
+
+#define pt_entInt pt_entint
+#define pt_entArith pt_entarith
+#define mchk_size ((mchk_cpu_base + 7 + 8) &0xfff8)
+#define mchk_flag CNS_Q_FLAG
+#define mchk_sys_base 56
+#define mchk_cpu_base (CNS_Q_LD_LOCK + 8)
+#define mchk_offsets CNS_Q_EXC_ADDR
+#define mchk_mchk_code 8
+#define mchk_ic_perr_stat CNS_Q_ICPERR_STAT
+#define mchk_dc_perr_stat CNS_Q_DCPERR_STAT
+#define mchk_sc_addr CNS_Q_SC_ADDR
+#define mchk_sc_stat CNS_Q_SC_STAT
+#define mchk_ei_addr CNS_Q_EI_ADDR
+#define mchk_bc_tag_addr CNS_Q_BC_TAG_ADDR
+#define mchk_fill_syn CNS_Q_FILL_SYN
+#define mchk_ei_stat CNS_Q_EI_STAT
+#define mchk_exc_addr CNS_Q_EXC_ADDR
+#define mchk_ld_lock CNS_Q_LD_LOCK
+#define osfpcb_q_Ksp pcb_q_ksp
+#define pal_impure_common_size ((0x200 + 7) & 0xfff8)
+
+#define ALIGN_BLOCK \
+ .align 5
+
+#define ALIGN_BRANCH \
+ .align 3
+
+#define EXPORT(_x) \
+ .align 5; \
+ .globl _x; \
+_x:
+
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// XXX the following is 'made up'
+// XXX bugnion
+
+// XXX bugnion not sure how to align 'quad'
+#define ALIGN_QUAD \
+ .align 3
+
+#define ALIGN_128 \
+ .align 7
+
+
+#define GET_IMPURE(_r) mfpr _r,pt_impure
+#define GET_ADDR(_r1,_off,_r2) lda _r1,_off(_r2)
+
+
+#define BIT(_x) (1<<(_x))
+
+
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// XXX back to original code
+
+// .sbttl "System specific code - beh model version"
+
+//
+// Entry points
+// SYS$CFLUSH - Cache flush
+// SYS$CSERVE - Console service
+// SYS$WRIPIR - interprocessor interrupts
+// SYS$HALT_INTERRUPT - Halt interrupt
+// SYS$PASSIVE_RELEASE - Interrupt, passive release
+// SYS$INTERRUPT - Interrupt
+// SYS$RESET - Reset
+// SYS$ENTER_CONSOLE
+
+//
+// Macro to read TLINTRSUMx
+//
+// Based on the CPU_NUMBER, read either the TLINTRSUM0 or TLINTRSUM1 register
+//
+// Assumed register usage:
+// rsum TLINTRSUMx contents
+// raddr node space address
+// scratch scratch register
+
+
+// .macro Read_TLINTRSUMx rsum, raddr, scratch, ?label1, ?label2
+//
+// nop
+// mfpr 'scratch', pt_whami // Get our whami (VID)
+//
+// extbl 'scratch', #1, 'scratch' // shift down to bit 0
+// lda 'raddr', ^xff88(r31) // Get base node space address bits
+//
+// sll 'raddr', #24, 'raddr' // Shift up to proper position
+// srl 'scratch', #1, 'rsum' // Shift off the cpu number
+//
+// sll 'rsum', #22, 'rsum' // Get our node offset
+// addq 'raddr', 'rsum', 'raddr' // Get our base node space address
+//
+// blbs 'scratch', label1
+// lda 'raddr', <tlep$tlintrsum0_offset>('raddr')
+//
+// br r31, label2
+//label1: lda 'raddr', <tlep$tlintrsum1_offset>('raddr')
+//
+//label2: ldlp 'rsum', 0('raddr') // read the right tlintrsum reg
+//.endm
+
+#define Read_TLINTRSUMx(_rsum,_raddr,_scratch) \
+ nop; \
+ mfpr _scratch,pt_whami; \
+ extbl _scratch,1,_scratch; \
+ lda _raddr,0xff88(zero); \
+ sll _raddr,24,_raddr; \
+ srl _scratch,1,_rsum; \
+ sll _rsum,22,_rsum; \
+ addq _raddr,_rsum,_raddr; \
+ blbs _scratch,1f; \
+ lda _raddr,tlep_lintrsum0_offset(_raddr); \
+ br r31,2f; \
+1: \
+ lda _raddr,tlep_lintrsum1_offset(_raddr); \
+2: ldlp _rsum,0(_raddr)
+
+
+
+//
+// Macro to write TLINTRSUMx
+//
+// Based on the CPU_NUMBER, write either the TLINTRSUM0 or TLINTRSUM1 register
+//
+// Assumed register usage:
+// rsum TLINTRSUMx write data
+// raddr node space address
+// scratch scratch register
+
+// .macro Write_TLINTRSUMx rsum, raddr, whami, ?label1, ?label2
+//
+// nop
+// mfpr 'whami', pt_whami // Get our whami (VID)
+//
+// extbl 'whami', #1, 'whami' // shift down to bit 0
+// lda 'raddr', ^xff88(r31) // Get base node space address bits
+//
+// sll 'raddr', #24, 'raddr' // Shift up to proper position
+// blbs 'whami', label1
+//
+// lda 'raddr', <tlep$tlintrsum0_offset>('raddr')
+// br r31, label2
+//
+// label1: lda 'raddr', <tlep$tlintrsum1_offset>('raddr')
+// label2: srl 'whami', #1, 'whami' // Shift off the cpu number
+//
+// sll 'whami', #22, 'whami' // Get our node offset
+// addq 'raddr', 'whami', 'raddr' // Get our base node space address
+//
+// mb
+// stqp 'rsum', 0('raddr') // write the right tlintrsum reg
+// mb
+// ldqp 'rsum', 0('raddr') // dummy read to tlintrsum
+// bis 'rsum', 'rsum', 'rsum' // needed to complete the ldqp above -jpo
+// .endm
+
+
+#define Write_TLINTRSUMx(_rsum,_raddr,_whami) \
+ nop; \
+ mfpr _whami,pt_whami; \
+ extbl _whami,1,_whami; \
+ lda _raddr,0xff88(zero); \
+ sll _raddr,24,_raddr; \
+ blbs _whami,1f; \
+ lda _raddr,tlep_tlintrsum0_offset(_raddr);\
+ br zero,2f; \
+1: lda _raddr,tlep_tlintrsum1_offset(_raddr);\
+2: srl _whami,1,_whami; \
+ addq _raddr,_whami,_raddr; \
+ mb; \
+ stqp _rsum,0(_raddr); \
+ ldqp _rsum,0(_raddr); \
+ bis _rsum,_rsum,_rsum
+
+
+//
+// Macro to determine highest priority TIOP Node ID from interrupt pending mask
+//
+// Assumed register usage:
+// rmask - TLINTRSUMx contents, shifted to isolate IOx bits
+// rid - TLSB Node ID of highest TIOP
+
+//.macro Intr_Find_TIOP rmask, rid, ?l1, ?l2, ?l3, ?l4, ?l5, ?l6
+// srl 'rmask', #4, 'rid' // check IOP8
+// blbc 'rid', l1 // not IOP8
+//
+// lda 'rid', 8(r31) // IOP8
+// br r31, l6
+//
+// l1: srl 'rmask', #3, 'rid' // check IOP7
+// blbc 'rid', l2 // not IOP7
+//
+// lda 'rid', 7(r31) // IOP7
+// br r31, l6
+//
+// l2: srl 'rmask', #2, 'rid' // check IOP6
+// blbc 'rid', l3 // not IOP6
+//
+// lda 'rid', 6(r31) // IOP6
+// br r31, l6
+//
+// l3: srl 'rmask', #1, 'rid' // check IOP5
+// blbc 'rid', l4 // not IOP5
+//
+// lda 'rid', 5(r31) // IOP5
+// br r31, l6
+//
+// l4: srl 'rmask', #0, 'rid' // check IOP4
+// blbc 'rid', l5 // not IOP4
+//
+// lda r14, 4(r31) // IOP4
+// br r31, l6
+//
+// l5: lda r14, 0(r31) // passive release
+// l6:
+// .endm
+
+
+#define Intr_Find_TIOP(_rmask,_rid) \
+ srl _rmask,3,_rid; \
+ blbc _rid,1f; \
+ lda _rid,8(zero); \
+ br zero,6f; \
+1: srl _rmask,3,_rid; \
+ blbc _rid, 2f; \
+ lda _rid, 7(r31); \
+ br r31, 6f; \
+2: srl _rmask, 2, _rid; \
+ blbc _rid, 3f; \
+ lda _rid, 6(r31); \
+ br r31, 6f; \
+3: srl _rmask, 1, _rid; \
+ blbc _rid, 4f; \
+ lda _rid, 5(r31); \
+ br r31, 6f; \
+4: srl _rmask, 0, _rid; \
+ blbc _rid, 5f; \
+ lda r14, 4(r31); \
+ br r31, 6f; \
+5: lda r14, 0(r31); \
+6:
+
+
+
+
+//
+// Macro to calculate base node space address for given node id
+//
+// Assumed register usage:
+// rid - TLSB node id
+// raddr - base node space address
+
+//.macro Get_TLSB_Node_Address rid, raddr
+// sll 'rid', #22, 'rid' // Get offset of IOP node
+// lda 'raddr', ^xff88(r31) // Get base node space address bits
+//
+// sll 'raddr', #24, 'raddr' // Shift up to proper position
+// addq 'raddr', 'rid', 'raddr' // Get TIOP node space address
+// .iif ne turbo_pcia_intr_fix, srl 'rid', #22, 'rid' // Restore IOP node id
+//.endm
+
+
+#define turbo_pcia_intr_fix 0
+
+
+#if turbo_pcia_intr_fix != 0
+#define Get_TLSB_Node_Address(_rid,_raddr) \
+ sll _rid,22,_rid; \
+ lda _raddr,0xff88(zero); \
+ sll _raddr,24,_raddr; \
+ addq _raddr,_rid,_raddr; \
+ srl _rid,22,_rid
+#else
+#define Get_TLSB_Node_Address(_rid,_raddr) \
+ sll _rid,22,_rid; \
+ lda _raddr,0xff88(zero); \
+ sll _raddr,24,_raddr; \
+ addq _raddr,_rid,_raddr
+#endif
+
+
+
+
+
+// .macro mchk$TLEPstore rlog, rs, rs1, nodebase, tlepreg, clr, tlsb, crd
+// .iif eq tlsb, lda 'rs1',<tlep$'tlepreg'_offset>(r31)
+// .iif ne tlsb, lda 'rs1',<tlsb$'tlepreg'_offset>(r31)
+// or 'rs1', 'nodebase', 'rs1'
+// ldlp 'rs', 0('rs1')
+// .iif eq crd, stlp 'rs', mchk$'tlepreg'('rlog') // store in frame
+// .iif ne crd, stlp 'rs', mchk$crd_'tlepreg'('rlog') // store in frame
+// .iif ne clr, stlp 'rs',0('rs1') // optional write to clear
+// .endm
+
+
+// .macro OSFmchk$TLEPstore tlepreg, clr=0, tlsb=0
+// mchk$TLEPstore r14, r8, r4, r13, <tlepreg>, <clr>, <tlsb>, crd=0
+// .endm
+
+#define CONCAT(_a,_b) _a ## _b
+
+#define OSFmchk_TLEPstore_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
+ lda _rs1,CONCAT(tlep_,_tlepreg)(zero); \
+ or _rs1,_nodebase,_rs1; \
+ ldlp _rs1,0(_rs1); \
+ stlp _rs,CONCAT(mchk_,_tlepreg)(_rlog)
+
+
+#define OSFmchk_TLEPstore(_tlepreg) OSFmchk_TLEPstore_1(r14,r8,r4,r13,_tlepreg)
+
+
+// .macro OSFcrd$TLEPstore tlepreg, clr=0, tlsb=0
+// mchk$TLEPstore r14, r10, r1, r0, <tlepreg>, <clr>, <tlsb>, crd=1
+// .endm
+
+#define OSFcrd_TLEPstore_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
+ lda _rs1,CONCAT(tlep_,_tlepreg)(zero); \
+ or _rs1,_nodebase,_rs1; \
+ ldlp _rs1,0(_rs1); \
+ stlp _rs,CONCAT(mchk_crd_,_tlepreg)(_rlog)
+
+#define OSFcrd_TLEPstore_tlsb_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
+ lda _rs1,CONCAT(tlsb_,_tlepreg)(zero); \
+ or _rs1,_nodebase,_rs1; \
+ ldlp _rs1,0(_rs1); \
+ stlp _rs,CONCAT(mchk_crd_,_tlepreg)(_rlog)
+
+#define OSFcrd_TLEPstore_tlsb_clr_1(_rlog,_rs,_rs1,_nodebase,_tlepreg) \
+ lda _rs1,CONCAT(tlsb_,_tlepreg)(zero); \
+ or _rs1,_nodebase,_rs1; \
+ ldlp _rs1,0(_rs1); \
+ stlp _rs,CONCAT(mchk_crd_,_tlepreg)(_rlog); \
+ stlp _rs,0(_rs1)
+
+
+#define OSFcrd_TLEPstore(_tlepreg) OSFcrd_TLEPstore_1(r14,r8,r4,r13,_tlepreg)
+#define OSFcrd_TLEPstore_tlsb(_tlepreg) OSFcrd_TLEPstore_tlsb_1(r14,r8,r4,r13,_tlepreg)
+#define OSFcrd_TLEPstore_tlsb_clr(_tlepreg) OSFcrd_TLEPstore_tlsb_clr_1(r14,r8,r4,r13,_tlepreg)
+
+
+
+
+// .macro save_pcia_intr irq
+// and r13, #^xf, r25 // isolate low 4 bits
+// addq r14, #4, r14 // format the TIOP Node id field
+// sll r14, #4, r14 // shift the TIOP Node id
+// or r14, r25, r10 // merge Node id/hose/HPC
+// mfpr r14, pt14 // get saved value
+// extbl r14, #'irq', r25 // confirm none outstanding
+// bne r25, sys$machine_check_while_in_pal
+// insbl r10, #'irq', r10 // align new info
+// or r14, r10, r14 // merge info
+// mtpr r14, pt14 // save it
+// bic r13, #^xf, r13 // clear low 4 bits of vector
+// .endm
+
+#define save_pcia_intr(_irq) \
+ and r13, 0xf, r25; \
+ addq r14, 4, r14; \
+ sll r14, 4, r14; \
+ or r14, r25, r10; \
+ mfpr r14, pt14; \
+ extbl r14, _irq, r25; \
+ bne r25, sys_machine_check_while_in_pal; \
+ insbl r10, _irq, r10; \
+ or r14, r10, r14; \
+ mtpr r14, pt14; \
+ bic r13, 0xf, r13
+
+
+
+ ALIGN_BLOCK
+
+// .sbttl "wripir - PALcode for wripir instruction"
+//orig SYS$WRIPIR: // R16 has the processor number.
+
+EXPORT(sys_wripir)
+
+//++
+// Convert the processor number to a CPU mask
+//--
+
+ and r16,0xf, r14 // mask the top stuff (16 CPUs supported)
+ bis r31,0x1,r16 // get a one
+ sll r16,r14,r14 // shift the bit to the right place
+
+//++
+// Build the Broadcast Space base address
+//--
+ lda r13,0xff8e(r31) // Load the upper address bits
+ sll r13,24,r13 // shift them to the top
+
+//++
+// Send out the IP Intr
+//--
+ stqp r14, TLSB_TLIPINTR_OFFSET(r13) // Write to TLIPINTR reg
+ wmb // Push out the store
+
+ hw_rei
+
+
+ ALIGN_BLOCK
+// .sbttl "CFLUSH- PALcode for CFLUSH instruction"
+//+
+// SYS$CFLUSH
+// Entry:
+//
+// R16 - contains the PFN of the page to be flushed
+//
+// Function:
+// Flush all Dstream caches of 1 entire page
+//
+//-
+
+EXPORT(sys_cflush)
+
+// #convert pfn to addr, and clean off <63:20>
+// #sll r16, <page_offset_size_bits>+<63-20>>, r12
+ sll r16, page_offset_size_bits+(63-20),r12
+
+// #ldah r13,<<1@22>+32768>@-16(r31)// + xxx<31:16>
+// # stolen from srcmax code. XXX bugnion
+ lda r13, 0x10(r31) // assume 16Mbytes of cache
+ sll r13, 20, r13 // convert to bytes
+
+
+ srl r12, 63-20, r12 // shift back to normal position
+ xor r12, r13, r12 // xor addr<18>
+
+ or r31, 8192/(32*8), r13 // get count of loads
+ nop
+
+cflush_loop:
+ subq r13, 1, r13 // decr counter
+ mfpr r25, ev5__intid // Fetch level of interruptor
+
+ ldqp r31, 32*0(r12) // do a load
+ ldqp r31, 32*1(r12) // do next load
+
+ ldqp r31, 32*2(r12) // do next load
+ ldqp r31, 32*3(r12) // do next load
+
+ ldqp r31, 32*4(r12) // do next load
+ ldqp r31, 32*5(r12) // do next load
+
+ ldqp r31, 32*6(r12) // do next load
+ ldqp r31, 32*7(r12) // do next load
+
+ mfpr r14, ev5__ipl // Fetch current level
+ lda r12, (32*8)(r12) // skip to next cache block addr
+
+ cmple r25, r14, r25 // R25 = 1 if intid .less than or eql ipl
+ beq r25, 1f // if any int's pending, re-queue CFLUSH -- need to check for hlt interrupt???
+
+ bne r13, cflush_loop // loop till done
+ hw_rei // back to user
+
+ ALIGN_BRANCH
+1: // Here if interrupted
+ mfpr r12, exc_addr
+ subq r12, 4, r12 // Backup PC to point to CFLUSH
+
+ mtpr r12, exc_addr
+ nop
+
+ mfpr r31, pt0 // Pad exc_addr write
+ hw_rei
+
+
+ ALIGN_BLOCK
+// .sbttl "CSERVE- PALcode for CSERVE instruction"
+//+
+// SYS$CSERVE
+//
+// Function:
+// Various functions for private use of console software
+//
+// option selector in r0
+// arguments in r16....
+//
+//
+// r0 = 0 unknown
+//
+// r0 = 1 ldqp
+// r0 = 2 stqp
+// args, are as for normal STQP/LDQP in VMS PAL
+//
+// r0 = 3 dump_tb's
+// r16 = detination PA to dump tb's to.
+//
+// r0<0> = 1, success
+// r0<0> = 0, failure, or option not supported
+// r0<63:1> = (generally 0, but may be function dependent)
+// r0 - load data on ldqp
+//
+//-
+EXPORT(sys_cserve)
+
+#ifdef SIMOS
+ /* taken from scrmax */
+ cmpeq r18, CSERVE_K_RD_IMPURE, r0
+ bne r0, Sys_Cserve_Rd_Impure
+
+ cmpeq r18, CSERVE_K_JTOPAL, r0
+ bne r0, Sys_Cserve_Jtopal
+ call_pal 0
+
+ or r31, r31, r0
+ hw_rei // and back we go
+
+Sys_Cserve_Rd_Impure:
+ mfpr r0, pt_impure // Get base of impure scratch area.
+ hw_rei
+
+ ALIGN_BRANCH
+
+Sys_Cserve_Jtopal:
+ bic a0, 3, t8 // Clear out low 2 bits of address
+ bis t8, 1, t8 // Or in PAL mode bit
+ mtpr t8,exc_addr
+ hw_rei
+
+
+#else /* SIMOS */
+
+ cmpeq r16, cserve_ldlp, r12 // check for ldqp
+ bne r12, 1f // br if
+
+ cmpeq r16, cserve_stlp, r12 // check for stqp
+ bne r12, 2f // br if
+
+ cmpeq r16, cserve_callback, r12 // check for callback entry
+ bne r12, csrv_callback // br if
+
+ cmpeq r16, cserve_identify, r12 // check for callback entry
+ bne r12, csrv_identify // br if
+
+ or r31, r31, r0 // set failure
+ nop // pad palshadow write
+
+ hw_rei // and back we go
+#endif /* SIMOS */
+
+// ldqp
+ ALIGN_QUAD
+1:
+ ldqp r0,0(r17) // get the data
+ nop // pad palshadow write
+
+ hw_rei // and back we go
+
+
+// stqp
+ ALIGN_QUAD
+2:
+ stqp r18, 0(r17) // store the data
+#ifdef SIMOS
+ lda r0,17(r31) // bogus
+#else
+ lda r0, CSERVE_SUCCESS(r31) // set success
+#endif
+ hw_rei // and back we go
+
+
+ ALIGN_QUAD
+csrv_callback:
+ ldq r16, 0(r17) // restore r16
+ ldq r17, 8(r17) // restore r17
+ lda r0, hlt_c_callback(r31)
+ br r31, sys_enter_console
+
+
+csrv_identify:
+ mfpr r0, pal_base
+ ldqp r0, 8(r0)
+ hw_rei
+
+
+// dump tb's
+ ALIGN_QUAD
+0:
+ // DTB PTEs - 64 entries
+ addq r31, 64, r0 // initialize loop counter
+ nop
+
+1: mfpr r12, ev5__dtb_pte_temp // read out next pte to temp
+ mfpr r12, ev5__dtb_pte // read out next pte to reg file
+
+ subq r0, 1, r0 // decrement loop counter
+ nop // Pad - no Mbox instr in cycle after mfpr
+
+ stqp r12, 0(r16) // store out PTE
+ addq r16, 8 ,r16 // increment pointer
+
+ bne r0, 1b
+
+ ALIGN_BRANCH
+ // ITB PTEs - 48 entries
+ addq r31, 48, r0 // initialize loop counter
+ nop
+
+2: mfpr r12, ev5__itb_pte_temp // read out next pte to temp
+ mfpr r12, ev5__itb_pte // read out next pte to reg file
+
+ subq r0, 1, r0 // decrement loop counter
+ nop //
+
+ stqp r12, 0(r16) // store out PTE
+ addq r16, 8 ,r16 // increment pointer
+
+ bne r0, 2b
+ or r31, 1, r0 // set success
+
+ hw_rei // and back we go
+
+
+// .sbttl "SYS$INTERRUPT - Interrupt processing code"
+
+//+
+// SYS$INTERRUPT
+//
+// Current state:
+// Stack is pushed
+// ps, sp and gp are updated
+// r12, r14 - available
+// r13 - INTID (new EV5 IPL)
+// r25 - ISR
+// r16, r17, r18 - available
+//
+//-
+
+
+EXPORT(sys_interrupt)
+ cmpeq r13, 31, r12
+ bne r12, sys_int_mchk_or_crd // Check for level 31 interrupt (machine check or crd)
+
+ cmpeq r13, 30, r12
+ bne r12, sys_int_powerfail // Check for level 30 interrupt (powerfail)
+
+ cmpeq r13, 29, r12
+ bne r12, sys_int_perf_cnt // Check for level 29 interrupt (performance counters)
+
+ cmpeq r13, 23, r12
+ bne r12, sys_int_23 // Check for level 23 interrupt
+
+ cmpeq r13, 22, r12
+ bne r12, sys_int_22 // Check for level 22 interrupt (might be
+ // interprocessor or timer interrupt)
+
+ cmpeq r13, 21, r12
+ bne r12, sys_int_21 // Check for level 21 interrupt
+
+ cmpeq r13, 20, r12
+ bne r12, sys_int_20 // Check for level 20 interrupt (might be corrected
+ // system error interrupt)
+
+ mfpr r14, exc_addr // ooops, something is wrong
+ br r31, pal_pal_bug_check_from_int
+
+
+
+
+//+
+//sys$int_2*
+// Routines to handle device interrupts at IPL 23-20.
+// System specific method to ack/clear the interrupt, detect passive release,
+// detect interprocessor (22), interval clock (22), corrected
+// system error (20)
+//
+// Current state:
+// Stack is pushed
+// ps, sp and gp are updated
+// r12, r14 - available
+// r13 - INTID (new EV5 IPL)
+// r25 - ISR
+//
+// On exit:
+// Interrupt has been ack'd/cleared
+// a0/r16 - signals IO device interrupt
+// a1/r17 - contains interrupt vector
+// exit to ent_int address
+//
+//-
+ ALIGN_BRANCH
+sys_int_23:
+ Read_TLINTRSUMx(r13,r10,r14) // read the right TLINTRSUMx
+ srl r13, 22, r13 // shift down to examine IPL17
+
+ Intr_Find_TIOP(r13,r14)
+ beq r14, 1f
+
+ Get_TLSB_Node_Address(r14,r10)
+ lda r10, tlsb_tlilid3_offset(r10) // Get base TLILID address
+
+ ldlp r13, 0(r10) // Read the TLILID register
+ bne r13, pal_post_dev_interrupt
+
+1: lda r16, osfint_c_passrel(r31) // passive release
+ br r31, pal_post_interrupt //
+
+
+ ALIGN_BRANCH
+sys_int_22:
+ Read_TLINTRSUMx(r13,r10,r14) // read the right TLINTRSUMx
+ srl r13, 6, r14 // check the Intim bit
+
+ blbs r14, tlep_intim // go service Intim
+ srl r13, 5, r14 // check the IP Int bit
+
+ blbs r14, tlep_ipint // go service IP Int
+ srl r13, 17, r13 // shift down to examine IPL16
+
+ Intr_Find_TIOP(r13,r14)
+ beq r14, 1f
+
+ Get_TLSB_Node_Address(r14,r10)
+ lda r10, tlsb_tlilid2_offset(r10) // Get base TLILID address
+
+ ldlp r13, 0(r10) // Read the TLILID register
+#if turbo_pcia_intr_fix == 0
+// .if eq turbo_pcia_intr_fix
+ bne r13, pal_post_dev_interrupt
+//orig .iff
+ beq r13, 1f
+
+ and r13, 0x3, r10 // check for PCIA bits
+ beq r10, pal_post_dev_interrupt // done if nothing set
+ save_pcia_intr(2)
+ br r31, pal_post_dev_interrupt //
+// .endc
+#endif /* turbo_pcia_intr_fix == 0 */
+
+1: lda r16, osfint_c_passrel(r31) // passive release
+ br r31, pal_post_interrupt //
+
+
+ ALIGN_BRANCH
+sys_int_21:
+ Read_TLINTRSUMx(r13,r10,r14) // read the right TLINTRSUMx
+ srl r13, 12, r13 // shift down to examine IPL15
+
+ Intr_Find_TIOP(r13,r14)
+ beq r14, 1f
+
+ Get_TLSB_Node_Address(r14,r10)
+ lda r10, tlsb_tlilid1_offset(r10) // Get base TLILID address
+
+ ldlp r13, 0(r10) // Read the TLILID register
+#if turbo_pcia_intr_fix == 0
+//orig .if eq turbo_pcia_intr_fix
+ bne r13, pal_post_dev_interrupt
+//orig .iff
+ beq r13, 1f
+
+ and r13, 0x3, r10 // check for PCIA bits
+ beq r10, pal_post_dev_interrupt // done if nothing set
+ save_pcia_intr(1)
+ br r31, pal_post_dev_interrupt //
+// orig .endc
+#endif /* turbo_pcia_intr_fix == 0 */
+
+1: lda r16, osfint_c_passrel(r31) // passive release
+ br r31, pal_post_interrupt //
+
+
+ ALIGN_BRANCH
+sys_int_20:
+ lda r13, 1(r31) // Duart0 bit
+ Write_TLINTRSUMx(r13,r10,r14) // clear the duart0 bit
+
+ Read_TLINTRSUMx(r13,r10,r14) // read the right TLINTRSUMx
+ blbs r13, tlep_uart0 // go service UART int
+
+ srl r13, 7, r13 // shift down to examine IPL14
+ Intr_Find_TIOP(r13,r14)
+
+ beq r14, tlep_ecc // Branch if not IPL14
+ Get_TLSB_Node_Address(r14,r10)
+
+ lda r10, tlsb_tlilid0_offset(r10) // Get base TLILID address
+ ldlp r13, 0(r10) // Read the TLILID register
+
+#if turbo_pcia_intr_fix == 0
+// orig .if eq turbo_pcia_intr_fix
+ bne r13, pal_post_dev_interrupt
+// orig .iff
+ beq r13, 1f
+
+ and r13, 0x3, r10 // check for PCIA bits
+ beq r10, pal_post_dev_interrupt // done if nothing set
+ save_pcia_intr(0)
+ br r31, pal_post_dev_interrupt //
+// orig .endc
+#endif
+1: lda r16, osfint_c_passrel(r31) // passive release
+ br r31, pal_post_interrupt //
+
+
+ ALIGN_BRANCH
+tlep_intim:
+ lda r13, 0xffb(r31) // get upper GBUS address bits
+ sll r13, 28, r13 // shift up to top
+
+ lda r13, (tlep_watch_csrc_offset)(r13) // full CSRC address
+ ldqp r13, 0(r13) // read CSRC
+
+ lda r13, 0x40(r31) // load Intim bit
+ Write_TLINTRSUMx(r13,r10,r14) // clear the Intim bit
+
+ lda r16, osfint_c_clk(r31) // passive release
+ br r31, pal_post_interrupt // Build the stack frame
+
+
+ ALIGN_BRANCH
+tlep_ipint:
+ lda r13, 0x20(r31) // load IP Int bit
+ Write_TLINTRSUMx(r13,r10,r14) // clear the IP Int bit
+
+ lda r16, osfint_c_ip(r31) // passive release
+ br r31, pal_post_interrupt // Build the stack frame
+
+
+ ALIGN_BRANCH
+tlep_uart0:
+ lda r13, 0xffa(r31) // get upper GBUS address bits
+ sll r13, 28, r13 // shift up to top
+
+ ldlp r14, 0x80(r13) // zero pointer register
+ lda r14, 3(r31) // index to RR3
+
+ stlp r14, 0x80(r13) // write pointer register
+ mb
+
+ mb
+ ldlp r14, 0x80(r13) // read RR3
+
+ srl r14, 5, r10 // is it Channel A RX?
+ blbs r10, uart0_rx
+
+ srl r14, 4, r10 // is it Channel A TX?
+ blbs r10, uart0_tx
+
+ srl r14, 2, r10 // is it Channel B RX?
+ blbs r10, uart1_rx
+
+ srl r14, 1, r10 // is it Channel B TX?
+ blbs r10, uart1_tx
+
+ lda r8, 0(r31) // passive release
+ br r31, clear_duart0_int // clear tlintrsum and post
+
+
+ ALIGN_BRANCH
+uart0_rx:
+ lda r8, 0x680(r31) // UART0 RX vector
+ br r31, clear_duart0_int // clear tlintrsum and post
+
+
+ ALIGN_BRANCH
+uart0_tx:
+ lda r14, 0x28(r31) // Reset TX Int Pending code
+ mb
+ stlp r14, 0x80(r13) // write Channel A WR0
+ mb
+
+ lda r8, 0x6c0(r31) // UART0 TX vector
+ br r31, clear_duart0_int // clear tlintrsum and post
+
+
+ ALIGN_BRANCH
+uart1_rx:
+ lda r8, 0x690(r31) // UART1 RX vector
+ br r31, clear_duart0_int // clear tlintrsum and post
+
+
+ ALIGN_BRANCH
+uart1_tx:
+ lda r14, 0x28(r31) // Reset TX Int Pending code
+ stlp r14, 0(r13) // write Channel B WR0
+
+ lda r8, 0x6d0(r31) // UART1 TX vector
+ br r31, clear_duart0_int // clear tlintrsum and post
+
+
+ ALIGN_BRANCH
+clear_duart0_int:
+ lda r13, 1(r31) // load duart0 bit
+ Write_TLINTRSUMx(r13,r10,r14) // clear the duart0 bit
+
+ beq r8, 1f
+ or r8, r31, r13 // move vector to r13
+ br r31, pal_post_dev_interrupt // Build the stack frame
+1: nop
+ nop
+ hw_rei
+// lda r16, osfint_c_passrel(r31) // passive release
+// br r31, pal_post_interrupt //
+
+
+ ALIGN_BRANCH
+tlep_ecc:
+ mfpr r14, pt_whami // get our node id
+ extbl r14, 1, r14 // shift to bit 0
+
+ srl r14, 1, r14 // shift off cpu number
+ Get_TLSB_Node_Address(r14,r10) // compute our nodespace address
+
+ ldlp r13, tlsb_tlber_offset(r10) // read our TLBER
+ srl r13, 17, r13 // shift down the CWDE/CRDE bits
+
+ and r13, 3, r13 // mask the CWDE/CRDE bits
+ beq r13, 1f
+
+ ornot r31, r31, r12 // set flag
+ lda r9, mchk_c_sys_ecc(r31) // System Correctable error MCHK code
+ br r31, sys_merge_sys_corr // jump to CRD logout frame code
+
+1: lda r16, osfint_c_passrel(r31) // passive release
+
+
+ ALIGN_BRANCH
+
+pal_post_dev_interrupt:
+ or r13, r31, r17 // move vector to a1
+ or r31, osfint_c_dev, r16 // a0 signals IO device interrupt
+
+pal_post_interrupt:
+ mfpr r12, pt_entint
+
+ mtpr r12, exc_addr
+
+ nop
+ nop
+
+ hw_rei_spe
+
+
+
+//+
+// sys_passive_release
+// Just pretend the interrupt never occurred.
+//-
+
+EXPORT(sys_passive_release)
+ mtpr r11, ev5__dtb_cm // Restore Mbox current mode for ps
+ nop
+
+ mfpr r31, pt0 // Pad write to dtb_cm
+ hw_rei
+
+//+
+//sys_int_powerfail
+// A powerfail interrupt has been detected. The stack has been pushed.
+// IPL and PS are updated as well.
+//
+// I'm not sure what to do here, I'm treating it as an IO device interrupt
+//
+//-
+
+ ALIGN_BLOCK
+sys_int_powerfail:
+ lda r12, 0xffc4(r31) // get GBUS_MISCR address bits
+ sll r12, 24, r12 // shift to proper position
+ ldqp r12, 0(r12) // read GBUS_MISCR
+ srl r12, 5, r12 // isolate bit <5>
+ blbc r12, 1f // if clear, no missed mchk
+
+ // Missed a CFAIL mchk
+ lda r13, 0xffc7(r31) // get GBUS$SERNUM address bits
+ sll r13, 24, r13 // shift to proper position
+ lda r14, 0x40(r31) // get bit <6> mask
+ ldqp r12, 0(r13) // read GBUS$SERNUM
+ or r12, r14, r14 // set bit <6>
+ stqp r14, 0(r13) // clear GBUS$SERNUM<6>
+ mb
+ mb
+
+1: br r31, sys_int_mchk // do a machine check
+
+ lda r17, scb_v_pwrfail(r31) // a1 to interrupt vector
+ mfpr r25, pt_entint
+
+ lda r16, osfint_c_dev(r31) // a0 to device code
+ mtpr r25, exc_addr
+
+ nop // pad exc_addr write
+ nop
+
+ hw_rei_spe
+
+//+
+// sys$halt_interrupt
+// A halt interrupt has been detected. Pass control to the console.
+//
+//
+//-
+ EXPORT(sys_halt_interrupt)
+
+ ldah r13, 0x1800(r31) // load Halt/^PHalt bits
+ Write_TLINTRSUMx(r13,r10,r14) // clear the ^PHalt bits
+
+ mtpr r11, dtb_cm // Restore Mbox current mode
+ nop
+ nop
+ mtpr r0, pt0
+#ifndef SIMOS
+ pvc_jsr updpcb, bsr=1
+ bsr r0, pal_update_pcb // update the pcb
+#endif
+ lda r0, hlt_c_hw_halt(r31) // set halt code to hw halt
+ br r31, sys_enter_console // enter the console
+
+
+
+//+
+// sys$int_mchk_or_crd
+//
+// Current state:
+// Stack is pushed
+// ps, sp and gp are updated
+// r12
+// r13 - INTID (new EV5 IPL)
+// r14 - exc_addr
+// r25 - ISR
+// r16, r17, r18 - available
+//
+//-
+ ALIGN_BLOCK
+
+sys_int_mchk_or_crd:
+ srl r25, isr_v_mck, r12
+ blbs r12, sys_int_mchk
+ //+
+ // Not a Machine check interrupt, so must be an Internal CRD interrupt
+ //-
+
+ mb //Clear out Cbox prior to reading IPRs
+ srl r25, isr_v_crd, r13 //Check for CRD
+ blbc r13, pal_pal_bug_check_from_int //If CRD not set, shouldn't be here!!!
+
+ lda r9, 1(r31)
+ sll r9, hwint_clr_v_crdc, r9 // get ack bit for crd
+ mtpr r9, ev5__hwint_clr // ack the crd interrupt
+
+ or r31, r31, r12 // clear flag
+ lda r9, mchk_c_ecc_c(r31) // Correctable error MCHK code
+
+sys_merge_sys_corr:
+ ldah r14, 0xfff0(r31)
+ mtpr r0, pt0 // save r0 for scratch
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+ mtpr r1, pt1 // save r0 for scratch
+
+ ldqp r0, ei_addr(r14) // EI_ADDR IPR
+ ldqp r10, fill_syn(r14) // FILL_SYN IPR
+ bis r0, r10, r31 // Touch lds to make sure they complete before doing scrub
+
+ blbs r12, 1f // no scrubbing for IRQ0 case
+// XXX bugnion pvc_jsr crd_scrub_mem, bsr=1
+ bsr r13, sys_crd_scrub_mem // and go scrub
+
+ // ld/st pair in scrub routine will have finished due
+ // to ibox stall of stx_c. Don't need another mb.
+ ldqp r8, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
+ or r8, r31, r12 // Must only be executed once in this flow, and must
+ br r31, 2f // be after the scrub routine.
+
+1: ldqp r8, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
+ // For IRQ0 CRD case only - meaningless data.
+
+2: mfpr r13, pt_mces // Get MCES
+ srl r12, ei_stat_v_ei_es, r14 // Isolate EI_STAT:EI_ES
+ blbc r14, 6f // branch if 630
+ srl r13, mces_v_dsc, r14 // check if 620 reporting disabled
+ blbc r14, 5f // branch if enabled
+ or r13, r31, r14 // don't set SCE if disabled
+ br r31, 8f // continue
+5: bis r13, BIT(mces_v_sce), r14 // Set MCES<SCE> bit
+ br r31, 8f
+
+6: srl r13, mces_v_dpc, r14 // check if 630 reporting disabled
+ blbc r14, 7f // branch if enabled
+ or r13, r31, r14 // don't set PCE if disabled
+ br r31, 8f // continue
+7: bis r13, BIT(mces_v_pce), r14 // Set MCES<PCE> bit
+
+ // Setup SCB if dpc is not set
+8: mtpr r14, pt_mces // Store updated MCES
+ srl r13, mces_v_sce, r1 // Get SCE
+ srl r13, mces_v_pce, r14 // Get PCE
+ or r1, r14, r1 // SCE OR PCE, since they share
+ // the CRD logout frame
+ // Get base of the logout area.
+ GET_IMPURE(r14) // addr of per-cpu impure area
+ GET_ADDR(r14,(pal_logout_area+mchk_crd_base),r14)
+
+ blbc r1, sys_crd_write_logout_frame // If pce/sce not set, build the frame
+
+ // Set the 2nd error flag in the logout area:
+
+ lda r1, 3(r31) // Set retry and 2nd error flags
+ sll r1, 30, r1 // Move to bits 31:30 of logout frame flag longword
+ stlp r1, mchk_crd_flag+4(r14) // store flag longword
+ br sys_crd_ack
+
+sys_crd_write_logout_frame:
+ // should only be here if neither the pce or sce bits are set
+
+ //+
+ // Write the mchk code to the logout area
+ //-
+ stqp r9, mchk_crd_mchk_code(r14)
+
+
+ //+
+ // Write the first 2 quadwords of the logout area:
+ //-
+ lda r1, 1(r31) // Set retry flag
+ sll r1, 63, r9 // Move retry flag to bit 63
+ lda r1, mchk_crd_size(r9) // Combine retry flag and frame size
+ stqp r1, mchk_crd_flag(r14) // store flag/frame size
+
+#ifndef SIMOS
+ /* needed? bugnion */
+ lda r1, mchk_crd_sys_base(r31) // sys offset
+ sll r1, 32, r1
+ lda r1, mchk_crd_cpu_base(r1) // cpu offset
+ stqp r1, mchk_crd_offsets(r14) // store sys offset/cpu offset into logout frame
+
+#endif
+ //+
+ // Write error IPRs already fetched to the logout area
+ //-
+ stqp r0, mchk_crd_ei_addr(r14)
+ stqp r10, mchk_crd_fill_syn(r14)
+ stqp r8, mchk_crd_ei_stat(r14)
+ stqp r25, mchk_crd_isr(r14)
+ //+
+ // Log system specific info here
+ //-
+crd_storeTLEP_:
+ lda r1, 0xffc4(r31) // Get GBUS$MISCR address
+ sll r1, 24, r1
+ ldqp r1, 0(r1) // Read GBUS$MISCR
+ sll r1, 16, r1 // shift up to proper field
+ mfpr r10, pt_whami // get our node id
+ extbl r10, 1, r10 // shift to bit 0
+ or r1, r10, r1 // merge MISCR and WHAMI
+ stlp r1, mchk_crd_whami(r14) // write to crd logout area
+ srl r10, 1, r10 // shift off cpu number
+
+ Get_TLSB_Node_Address(r10,r0) // compute our nodespace address
+
+ OSFcrd_TLEPstore_tlsb(tldev)
+ OSFcrd_TLEPstore_tlsb_clr(tlber)
+ OSFcrd_TLEPstore_tlsb_clr(tlesr0)
+ OSFcrd_TLEPstore_tlsb_clr(tlesr1)
+ OSFcrd_TLEPstore_tlsb_clr(tlesr2)
+ OSFcrd_TLEPstore_tlsb_clr(tlesr3)
+
+sys_crd_ack:
+ mfpr r0, pt0 // restore r0
+ mfpr r1, pt1 // restore r1
+
+ srl r12, ei_stat_v_ei_es, r12
+ blbc r12, 5f
+ srl r13, mces_v_dsc, r10 // logging enabled?
+ br r31, 6f
+5: srl r13, mces_v_dpc, r10 // logging enabled?
+6: blbc r10, sys_crd_post_interrupt // logging enabled -- report it
+
+ // logging not enabled --
+ // Get base of the logout area.
+ GET_IMPURE(r13) // addr of per-cpu impure area
+ GET_ADDR(r13,(pal_logout_area+mchk_crd_base),r13)
+ ldlp r10, mchk_crd_rsvd(r13) // bump counter
+ addl r10, 1, r10
+ stlp r10, mchk_crd_rsvd(r13)
+ mb
+ br r31, sys_crd_dismiss_interrupt // just return
+
+ //+
+ // The stack is pushed. Load up a0,a1,a2 and vector via entInt
+ //
+ //-
+
+ ALIGN_BRANCH
+sys_crd_post_interrupt:
+ lda r16, osfint_c_mchk(r31) // flag as mchk/crd in a0
+ lda r17, scb_v_proc_corr_err(r31) // a1 <- interrupt vector
+
+ blbc r12, 1f
+ lda r17, scb_v_sys_corr_err(r31) // a1 <- interrupt vector
+
+1: subq r31, 1, r18 // get a -1
+ mfpr r25, pt_entInt
+
+ srl r18, 42, r18 // shift off low bits of kseg addr
+ mtpr r25, exc_addr // load interrupt vector
+
+ sll r18, 42, r18 // shift back into position
+ or r14, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
+
+ hw_rei_spe // done
+
+
+ //+
+ // The stack is pushed. Need to back out of it all.
+ //-
+
+sys_crd_dismiss_interrupt:
+ br r31, Call_Pal_Rti
+
+
+// .sbttl sys_crd_scrub_mem
+
+ //+
+ //
+ // sys_crd_scrub_mem
+ // called
+ // jsr r13, sys$crd_scrub_mem
+ // r0 = addr of cache block
+ //
+ //-
+
+
+
+ ALIGN_BLOCK // align for branch target
+sys_crd_scrub_mem:
+ // now find error in memory, and attempt to scrub that cache block
+ // This routine just scrubs the failing octaword
+ // Only need to "touch" one quadword per octaword to accomplish the scrub
+ srl r0, 39, r8 // get high bit of bad pa
+ blbs r8, 1f // don't attempt fixup on IO space addrs
+ nop // needed to align the ldqpl to octaword boundary
+ nop // "
+
+ ldqpl r8, 0(r0) // attempt to read the bad memory
+ // location
+ // (Note bits 63:40,3:0 of ei_addr
+ // are set to 1, but as long as
+ // we are doing a phys ref, should
+ // be ok)
+ nop // Needed to keep the Ibox from swapping the ldqpl into E1
+
+ stqpc r8, 0(r0) // Store it back if it is still there.
+ // If store fails, location already
+ // scrubbed by someone else
+
+ nop // needed to align the ldqpl to octaword boundary
+
+ lda r8, 0x20(r31) // flip bit 5 to touch next hexaword
+ xor r8, r0, r0
+ nop // needed to align the ldqpl to octaword boundary
+ nop // "
+
+ ldqpl r8, 0(r0) // attempt to read the bad memory
+ // location
+ // (Note bits 63:40,3:0 of ei_addr
+ // are set to 1, but as long as
+ // we are doing a phys ref, should
+ // be ok)
+ nop // Needed to keep the Ibox from swapping the ldqpl into E1
+
+ stqpc r8, 0(r0) // Store it back if it is still there.
+ // If store fails, location already
+ // scrubbed by someone else
+
+ lda r8, 0x20(r31) // restore r0 to original address
+ xor r8, r0, r0
+
+ //at this point, ei_stat could be locked due to a new corr error on the ld,
+ //so read ei_stat to unlock AFTER this routine.
+
+// XXX bugnion pvc$jsr crd_scrub_mem, bsr=1, dest=1
+1: ret r31, (r13) // and back we go
+
+
+// .sbttl "SYS$INT_MCHK - MCHK Interrupt code"
+//+
+// Machine check interrupt from the system. Setup and join the
+// regular machine check flow.
+// On exit:
+// pt0 - saved r0
+// pt1 - saved r1
+// pt4 - saved r4
+// pt5 - saved r5
+// pt6 - saved r6
+// pt10 - saved exc_addr
+// pt_misc<47:32> - mchk code
+// pt_misc<31:16> - scb vector
+// r14 - base of Cbox IPRs in IO space
+// MCES<mchk> is set
+//-
+ ALIGN_BLOCK
+sys_int_mchk:
+ lda r14, mchk_c_sys_hrd_error(r31)
+ mfpr r12, exc_addr
+
+ addq r14, 1, r14 // Flag as interrupt
+ nop
+
+ sll r14, 32, r14 // Move mchk code to position
+ mtpr r12, pt10 // Stash exc_addr
+
+ mfpr r12, pt_misc // Get MCES and scratch
+ mtpr r0, pt0 // Stash for scratch
+
+ zap r12, 0x3c, r12 // Clear scratch
+ blbs r12, sys_double_machine_check // MCHK halt if double machine check
+
+ or r12, r14, r12 // Combine mchk code
+ lda r14, scb_v_sysmchk(r31) // Get SCB vector
+
+ sll r14, 16, r14 // Move SCBv to position
+ or r12, r14, r14 // Combine SCBv
+
+ bis r14, BIT(mces_v_mchk), r14 // Set MCES<MCHK> bit
+ mtpr r14, pt_misc // Save mchk code!scbv!whami!mces
+
+ ldah r14, 0xfff0(r31)
+ mtpr r1, pt1 // Stash for scratch
+
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+ mtpr r4, pt4
+
+ mtpr r5, pt5
+
+#if beh_model
+// .if ne beh_model
+ ldah r25, 0xC000(r31) // Get base of demon space
+ lda r25, 0x340(r25) // Add interrupt demon offset
+
+ ldqp r13, 0(r25) // Read the control register
+ nop
+
+ and r13, 0x10, r8 // For debug, check that the interrupt is expected
+ beq r8, interrupt_not_expected
+
+ bic r13, 0x10, r13
+ stqp r13, 0(r25) // Ack and clear the interrupt
+// XXX bugnion pvc$violate 379 // stqp can't trap except replay. mt ipr only problem if mf same ipr in same shadow
+ .endc
+#endif
+
+ mtpr r6, pt6
+ br r31, sys_mchk_collect_iprs // Join common machine check flow
+
+
+// .sbttl "SYS$INT_PERF_CNT - Performance counter interrupt code"
+//+
+//sys$int_perf_cnt
+//
+// A performance counter interrupt has been detected. The stack has been pushed.
+// IPL and PS are updated as well.
+//
+// on exit to interrupt entry point ENTINT::
+// a0 = osfint$c_perf
+// a1 = scb$v_perfmon (650)
+// a2 = 0 if performance counter 0 fired
+// a2 = 1 if performance counter 1 fired
+// a2 = 2 if performance counter 2 fired
+// (if more than one counter overflowed, an interrupt will be
+// generated for each counter that overflows)
+//
+//
+//-
+ ALIGN_BLOCK
+sys_int_perf_cnt: // Performance counter interrupt
+ lda r17, scb_v_perfmon(r31) // a1 to interrupt vector
+ mfpr r25, pt_entint
+
+ lda r16, osfint_c_perf(r31) // a0 to perf counter code
+ mtpr r25, exc_addr
+
+ //isolate which perf ctr fired, load code in a2, and ack
+ mfpr r25, isr
+ or r31, r31, r18 // assume interrupt was pc0
+
+ srl r25, isr_v_pc1, r25 // isolate
+ cmovlbs r25, 1, r18 // if pc1 set, load 1 into r14
+
+ srl r25, 1, r25 // get pc2
+ cmovlbs r25, 2, r18 // if pc2 set, load 2 into r14
+
+ lda r25, 1(r31) // get a one
+ sll r25, r18, r25
+
+ sll r25, hwint_clr_v_pc0c, r25 // ack only the perf counter that generated the interrupt
+ mtpr r25, hwint_clr
+
+ hw_rei_spe
+
+
+
+ ALIGN_BLOCK
+// .sbttl "System specific RESET code"
+//+
+// RESET code
+// On entry:
+// r1 = pal_base +8
+//
+// Entry state on trap:
+// r0 = whami
+// r2 = base of scratch area
+// r3 = halt code
+// and the following 3 if init_cbox is enabled:
+// r5 = sc_ctl
+// r6 = bc_ctl
+// r7 = bc_cnfg
+//
+// Entry state on switch:
+// r17 - new PC
+// r18 - new PCBB
+// r19 - new VPTB
+//
+//-
+
+#if rax_mode==0
+ .globl sys_reset
+sys_reset:
+// mtpr r31, ic_flush_ctl // do not flush the icache - done by hardware before SROM load
+ mtpr r31, itb_ia // clear the ITB
+ mtpr r31, dtb_ia // clear the DTB
+
+ lda r1, -8(r1) // point to start of code
+ mtpr r1, pal_base // initialize PAL_BASE
+
+ // Interrupts
+ mtpr r31, astrr // stop ASTs
+ mtpr r31, aster // stop ASTs
+ mtpr r31, sirr // clear software interrupts
+
+ mtpr r0, pt1 // r0 is whami (unless we entered via swp)
+
+//orig ldah r1, <<1@<icsr$v_sde-16>> ! <1@<icsr$v_fpe-16>> ! <2@<icsr$v_spe-16>>>(r31)
+ ldah r1,(BIT(icsr_v_sde-16)|BIT(icsr_v_fpe-16)|BIT(icsr_v_spe-16+1))(zero)
+
+#if disable_crd == 0
+// .if eq disable_crd
+ bis r31, 1, r0
+ sll r0, icsr_v_crde, r0 // A 1 in iscr<corr_read_enable>
+ or r0, r1, r1 // Set the bit
+#endif
+
+ mtpr r1, icsr // ICSR - Shadows enabled, Floating point enable,
+ // super page enabled, correct read per assembly option
+
+ // Mbox/Dcache init
+//orig lda r1, <1@<mcsr$v_sp1>>(r31)
+ lda r1,BIT(mcsr_v_sp1)(zero)
+
+ mtpr r1, mcsr // MCSR - Super page enabled
+ lda r1, BIT(dc_mode_v_dc_ena)(r31)
+ ALIGN_BRANCH
+// mtpr r1, dc_mode // turn Dcache on
+ nop
+
+ mfpr r31, pt0 // No Mbox instr in 1,2,3,4
+ mfpr r31, pt0
+ mfpr r31, pt0
+ mfpr r31, pt0
+ mtpr r31, dc_flush // flush Dcache
+
+ // build PS (IPL=7,CM=K,VMM=0,SW=0)
+ lda r11, 0x7(r31) // Set shadow copy of PS - kern mode, IPL=7
+ lda r1, 0x1F(r31)
+ mtpr r1, ipl // set internal <ipl>=1F
+ mtpr r31, ps // set new ps<cm>=0, Ibox copy
+ mtpr r31, dtb_cm // set new ps<cm>=0, Mbox copy
+
+ // Create the PALtemp pt_intmask -
+ // MAP:
+ // OSF IPL EV5 internal IPL(hex) note
+ // 0 0
+ // 1 1
+ // 2 2
+ // 3 14 device
+ // 4 15 device
+ // 5 16 device
+ // 6 1E device,performance counter, powerfail
+ // 7 1F
+ //
+
+ ldah r1, 0x1f1E(r31) // Create upper lw of int_mask
+ lda r1, 0x1615(r1)
+
+ sll r1, 32, r1
+ ldah r1, 0x1402(r1) // Create lower lw of int_mask
+
+ lda r1, 0x0100(r1)
+ mtpr r1, pt_intmask // Stash in PALtemp
+
+ // Unlock a bunch of chip internal IPRs
+ mtpr r31, exc_sum // clear out exeception summary and exc_mask
+ mfpr r31, va // unlock va, mmstat
+//rig lda r8, <<1@icperr_stat$v_dpe> ! <1@icperr_stat$v_tpe> ! <1@icperr_stat$v_tmr>>(r31)
+ lda r8,(BIT(icperr_stat_v_dpe)|BIT(icperr_stat_v_tpe)|BIT(icperr_stat_v_tmr))(zero)
+
+ mtpr r8, icperr_stat // Clear Icache parity error & timeout status
+//orig lda r8, <<1@dcperr_stat$v_lock> ! <1@dcperr_stat$v_seo>>(r31)
+ lda r8,(BIT(dcperr_stat_v_lock)|BIT(dcperr_stat_v_seo))(r31)
+
+ mtpr r8, dcperr_stat // Clear Dcache parity error status
+
+ rc r0 // clear intr_flag
+ mtpr r31, pt_trap
+
+ mfpr r0, pt_misc
+ srl r0, pt_misc_v_switch, r1
+ blbs r1, sys_reset_switch // see if we got here from swppal
+
+ // Rest of the "real" reset flow
+ // ASN
+ mtpr r31, dtb_asn
+ mtpr r31, itb_asn
+
+ lda r1, 0x67(r31)
+ sll r1, hwint_clr_v_pc0c, r1
+ mtpr r1, hwint_clr // Clear hardware interrupt requests
+
+ lda r1, BIT(mces_v_dpc)(r31) // 1 in disable processor correctable error
+ mfpr r0, pt1 // get whami
+ insbl r0, 1, r0 // isolate whami in correct pt_misc position
+ or r0, r1, r1 // combine whami and mces
+ mtpr r1, pt_misc // store whami and mces, swap bit clear
+
+ zapnot r3, 1, r0 // isolate halt code
+ mtpr r0, pt0 // save entry type
+
+ // Cycle counter
+ or r31, 1, r9 // get a one
+ sll r9, 32, r9 // shift to <32>
+ mtpr r31, cc // clear Cycle Counter
+ mtpr r9, cc_ctl // clear and enable the Cycle Counter
+ mtpr r31, pt_scc // clear System Cycle Counter
+
+
+ // Misc PALtemps
+ mtpr r31, maf_mode // no mbox instructions for 3 cycles
+ or r31, 1, r1 // get bogus scbb value
+ mtpr r1, pt_scbb // load scbb
+ mtpr r31, pt_prbr // clear out prbr
+#ifdef SIMOS
+ or zero,kludge_initial_pcbb,r1
+#else
+ mfpr r1, pal_base
+//orig sget_addr r1, (kludge_initial_pcbb-pal$base), r1, verify=0// get address for temp pcbb
+ GET_ADDR(r1, (kludge_initial_pcbb-pal_base), r1)
+#endif
+ mtpr r1, pt_pcbb // load pcbb
+ lda r1, 2(r31) // get a two
+ sll r1, 32, r1 // gen up upper bits
+ mtpr r1, mvptbr
+ mtpr r1, ivptbr
+ mtpr r31, pt_ptbr
+ // Performance counters
+ mtpr r31, pmctr
+
+#if init_cbox != 0
+// .if ne init_cbox
+ // Only init the Scache and the Bcache if there have been no previous
+ // cacheable dstream loads or stores.
+ //
+ // Inputs:
+ // r5 - sc_ctl
+ // r6 - bc_ctl
+ // r7 - bc_cnfg
+
+ ldah r0, 0xfff0(r31)
+ zap r0, 0xE0, r0 // Get Cbox IPR base
+ ldqp r19, ev5__sc_ctl(r0) // read current sc_ctl
+temp = <<<1@bc_ctl$v_ei_dis_err> + <1@bc_ctl$v_ei_ecc_or_parity> + <1@bc_ctl$v_corr_fill_dat>>@-1>
+ lda r20, temp(r31) // create default bc_ctl (bc disabled, errors disabled, ecc mode)
+ sll r20, 1, r20
+temp = 0x017441 // default bc_config
+ get_addr r21, temp, r31 // create default bc_config
+ lda r23, <1@sc_ctl_v_sc_flush>(r31) //set flag to invalidate scache in set_sc_bc_ctl
+
+// XXX bugnion pvc$jsr scbcctl, bsr=1
+ bsr r10, set_sc_bc_ctl
+ update_bc_ctl_shadow r6, r23 // update bc_ctl shadow using r6 as input// r23 gets adjusted impure pointer
+ store_reg1 bc_config, r7, r23, ipr=1 // update bc_config shadow in impure area
+// .endc
+#endif
+ // Clear pmctr_ctl in impure area
+
+#ifndef SIMOS
+ // can't assemble ???
+ update_pmctr_ctl r31, r1 // clear pmctr_ctl // r1 trashed
+#endif
+
+ ldah r14, 0xfff0(r31)
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+#ifndef SIMOS
+ ldqp r31, sc_stat(r14) // Clear sc_stat and sc_addr
+ ldqp r31, ei_stat(r14)
+ ldqp r31, ei_stat(r14) // Clear ei_stat, ei_addr, bc_tag_addr, fill_syn
+#endif
+ GET_IMPURE(r13)
+ stqpc r31, 0(r13) // Clear lock_flag
+
+ mfpr r0, pt0 // get entry type
+ br r31, sys_enter_console // enter the cosole
+
+#endif /* rax_mode == 0 */
+
+
+
+
+//.if ne rax_mode
+#if rax_mode != 0
+
+ // For RAX:
+ // r0 - icsr at first, then used for cbox ipr base offset
+ // r2 - mcsr
+ // r3 - dc_mode
+ // r4 - maf_mode
+ // r5 - sc_ctl
+ // r6 - bc_ctl
+ // r7 - bc_cnfg
+ .globl sys_reset
+sys_reset:
+ mtpr r31, ev5__dtb_cm // set mbox mode to kernel
+ mtpr r31, ev5__ps // set Ibox mode to kernel - E1
+
+ mtpr r0, ev5__icsr // Load ICSR - E1
+
+ mtpr r2, ev5__mcsr
+ mfpr r8, pal_base
+
+ ldah r0, 0xfff0(r31)
+ zap r0, 0xE0, r0 // Get Cbox IPR base
+
+ mtpr r31, ev5__itb_asn // clear asn - E1
+ ldqp r19, ev5__sc_ctl(r0) // read current sc_ctl
+
+temp = <<<1@bc_ctl$v_ei_dis_err> + <1@bc_ctl$v_ei_ecc_or_parity> + <1@bc_ctl$v_corr_fill_dat>>@-1>
+ lda r20, temp(r31) // create default bc_ctl (bc disabled, errors disabled, ecc mode)
+ sll r20, 1, r20
+
+temp = 0x017441 // default bc_config
+ get_addr r21, temp, r31 // create default bc_config
+ lda r23, <1@sc_ctl_v_sc_flush>(r31) //set flag to invalidate scache in set_sc_bc_ctl
+
+// XXX bugnion pvc$jsr scbcctl, bsr=1
+ bsr r10, set_sc_bc_ctl
+ update_bc_ctl_shadow r6, r2 // initialize bc_ctl shadow// adjusted impure pointer in r2
+ store_reg1 pmctr_ctl, r31, r2, ipr=1 // clear pmctr_ctl
+ store_reg1 bc_config, r7, r2, ipr=1 // initialize bc_config shadow
+
+ mtpr r3, ev5__dc_mode // write dc_mode
+ mtpr r31, ev5__dc_flush // flush dcache
+
+ mtpr r31, ev5__exc_sum // clear exc_sum - E1
+ mtpr r31, ev5__exc_mask // clear exc_mask - E1
+
+ ldah r2, 4(r31) // For EXC_ADDR
+ mtpr r2, ev5__exc_addr // EXC_ADDR to 40000 (hex)
+
+ mtpr r31, ev5__sirr // Clear SW interrupts (for ISP)
+ mtpr r4, ev5__maf_mode // write maf_mode
+
+ mtpr r31, ev5__alt_mode // set alt_mode to kernel
+ mtpr r31, ev5__itb_ia // clear ITB - E1
+
+ lda r1, 0x1F(r31) // For IPL
+ mtpr r1, ev5__ipl // IPL to 1F
+
+ mtpr r31, ev5__hwint_clr // clear hardware interrupts
+ mtpr r31, ev5__aster // disable AST interrupts
+
+ mtpr r31, ev5__astrr // clear AST requests
+ mtpr r31, ev5__dtb_ia // clear dtb
+
+ nop
+ mtpr r31, pt_trap
+
+ srl r2, page_offset_size_bits, r9 // Start to make PTE for address 40000
+ sll r9, 32, r9
+
+ lda r9, 0x7F01(r9) // Make PTE, V set, all RE set, all but UWE set
+ nop
+
+ mtpr r9, dtb_pte // ACORE hack, load TB with 1-1 translation for address 40000
+ mtpr r2, itb_tag // ACORE hack, load TB with 1-1 translation for address 40000
+
+ mtpr r2, dtb_tag
+ mtpr r9, itb_pte
+
+ and r31, r31, r0 // clear deposited registers, note: r2 already overwritten
+ and r31, r31, r3
+
+ and r31, r31, r4
+ and r31, r31, r5
+
+ and r31, r31, r6
+ and r31, r31, r7
+
+ hw_rei //May need to be a rei_stall since
+ //we write to TB's above
+ //However, it currently works ok. (JH)
+
+
+// .endc
+#endif /*rax_mode != 0 */
+
+
+ // swppal entry
+ // r0 - pt_misc
+ // r17 - new PC
+ // r18 - new PCBB
+ // r19 - new VPTB
+sys_reset_switch:
+ or r31, 1, r9
+ sll r9, pt_misc_v_switch, r9
+ bic r0, r9, r0 // clear switch bit
+ mtpr r0, pt_misc
+
+ rpcc r1 // get cyccounter
+
+ ldqp r22, osfpcb_q_fen(r18) // get new fen/pme
+ ldlp r23, osfpcb_l_cc(r18) // get cycle counter
+ ldlp r24, osfpcb_l_asn(r18) // get new asn
+
+
+ ldqp r25, osfpcb_q_Mmptr(r18)// get new mmptr
+ sll r25, page_offset_size_bits, r25 // convert pfn to pa
+ mtpr r25, pt_ptbr // load the new mmptr
+ mtpr r18, pt_pcbb // set new pcbb
+
+ bic r17, 3, r17 // clean use pc
+ mtpr r17, exc_addr // set new pc
+ mtpr r19, mvptbr
+ mtpr r19, ivptbr
+
+ ldqp r30, osfpcb_q_Usp(r18) // get new usp
+ mtpr r30, pt_usp // save usp
+
+ sll r24, dtb_asn_v_asn, r8
+ mtpr r8, dtb_asn
+ sll r24, itb_asn_v_asn, r24
+ mtpr r24, itb_asn
+
+ mfpr r25, icsr // get current icsr
+ lda r24, 1(r31)
+ sll r24, icsr_v_fpe, r24 // 1 in icsr<fpe> position
+ bic r25, r24, r25 // clean out old fpe
+ and r22, 1, r22 // isolate new fen bit
+ sll r22, icsr_v_fpe, r22
+ or r22, r25, r25 // or in new fpe
+ mtpr r25, icsr // update ibox ipr
+
+ subl r23, r1, r1 // gen new cc offset
+ insll r1, 4, r1 // << 32
+ mtpr r1, cc // set new offset
+
+ or r31, r31, r0 // set success
+ ldqp r30, osfpcb_q_Ksp(r18) // get new ksp
+ mfpr r31, pt0 // stall
+ hw_rei_stall
+
+// .sbttl "SYS_MACHINE_CHECK - Machine check PAL"
+ ALIGN_BLOCK
+//+
+//sys$machine_check
+// A machine_check trap has occurred. The Icache has been flushed.
+//
+//-
+
+EXPORT(sys_machine_check)
+ // Need to fill up the refill buffer (32 instructions) and
+ // then flush the Icache again.
+ // Also, due to possible 2nd Cbox register file write for
+ // uncorrectable errors, no register file read or write for 7 cycles.
+
+ nop
+ mtpr r0, pt0 // Stash for scratch -- OK if Cbox overwrites r0 later
+
+ nop
+ nop
+
+ nop
+ nop
+
+ nop
+ nop
+
+ nop
+ nop
+ // 10 instructions// 5 cycles
+
+ nop
+ nop
+
+ nop
+ nop
+
+ // Register file can now be written
+ lda r0, scb_v_procmchk(r31) // SCB vector
+ mfpr r13, pt_mces // Get MCES
+ sll r0, 16, r0 // Move SCBv to correct position
+ bis r13, #<1@mces$v_mchk>, r14 // Set MCES<MCHK> bit
+
+
+ zap r14, 0x3C, r14 // Clear mchk_code word and SCBv word
+ mtpr r14, pt_mces
+ // 20 instructions
+
+ nop
+ or r14, r0, r14 // Insert new SCB vector
+ lda r0, mchk_c_proc_hrd_error(r31) // MCHK code
+ mfpr r12, exc_addr
+
+ sll r0, 32, r0 // Move MCHK code to correct position
+ mtpr r4, pt4
+ or r14, r0, r14 // Insert new MCHK code
+ mtpr r14, pt_misc // Store updated MCES, MCHK code, and SCBv
+
+ ldah r14, 0xfff0(r31)
+ mtpr r1, pt1 // Stash for scratch - 30 instructions
+
+ zap r14, 0xE0, r14 // Get Cbox IPR base
+ mtpr r12, pt10 // Stash exc_addr
+
+
+
+ mtpr r31, ic_flush_ctl // Second Icache flush, now it is really flushed.
+ blbs r13, sys_double_machine_check // MCHK halt if double machine check
+
+ mtpr r6, pt6
+ mtpr r5, pt5
+
+ // Look for the powerfail cases here....
+ mfpr r4, isr
+ srl r4, isr_v_pfl, r4
+ blbc r4, sys_mchk_collect_iprs // skip if no powerfail interrupt pending
+ lda r4, 0xffc4(r31) // get GBUS$MISCR address bits
+ sll r4, 24, r4 // shift to proper position
+ ldqp r4, 0(r4) // read GBUS$MISCR
+ srl r4, 5, r4 // isolate bit <5>
+ blbc r4, sys_mchk_collect_iprs // skip if already cleared
+ // No missed CFAIL mchk
+ lda r5, 0xffc7(r31) // get GBUS$SERNUM address bits
+ sll r5, 24, r5 // shift to proper position
+ lda r6, 0x40(r31) // get bit <6> mask
+ ldqp r4, 0(r5) // read GBUS$SERNUM
+ or r4, r6, r6 // set bit <6>
+ stqp r6, 0(r5) // clear GBUS$SERNUM<6>
+ mb
+ mb
+
+
+ //+
+ // Start to collect the IPRs. Common entry point for mchk flows.
+ //
+ // Current state:
+ // pt0 - saved r0
+ // pt1 - saved r1
+ // pt4 - saved r4
+ // pt5 - saved r5
+ // pt6 - saved r6
+ // pt10 - saved exc_addr
+ // pt_misc<47:32> - mchk code
+ // pt_misc<31:16> - scb vector
+ // r14 - base of Cbox IPRs in IO space
+ // r0, r1, r4, r5, r6, r12, r13, r25 - available
+ // r8, r9, r10 - available as all loads are physical
+ // MCES<mchk> is set
+ //
+ //-
+
+EXPORT(sys_mchk_collect_iprs)
+ mb // MB before reading Scache IPRs
+ mfpr r1, icperr_stat
+
+ mfpr r8, dcperr_stat
+ mtpr r31, dc_flush // Flush the Dcache
+
+ mfpr r31, pt0 // Pad Mbox instructions from dc_flush
+ mfpr r31, pt0
+ nop
+ nop
+
+ ldqp r9, sc_addr(r14) // SC_ADDR IPR
+ bis r9, r31, r31 // Touch ld to make sure it completes before
+ // read of SC_STAT
+ ldqp r10, sc_stat(r14) // SC_STAT, also unlocks SC_ADDR
+
+ ldqp r12, ei_addr(r14) // EI_ADDR IPR
+ ldqp r13, bc_tag_addr(r14) // BC_TAG_ADDR IPR
+ ldqp r0, fill_syn(r14) // FILL_SYN IPR
+ bis r12, r13, r31 // Touch lds to make sure they complete before reading EI_STAT
+ bis r0, r0, r31 // Touch lds to make sure they complete before reading EI_STAT
+ ldqp r25, ei_stat(r14) // EI_STAT, unlock EI_ADDR, BC_TAG_ADDR, FILL_SYN
+ ldqp r31, ei_stat(r14) // Read again to insure it is unlocked
+
+
+
+
+ //+
+ // Look for nonretryable cases
+ // In this segment:
+ // r5<0> = 1 means retryable
+ // r4, r6, and r14 are available for scratch
+ //
+ //-
+
+
+ bis r31, r31, r5 // Clear local retryable flag
+ srl r25, ei_stat_v_bc_tperr, r25 // Move EI_STAT status bits to low bits
+
+ lda r4, 1(r31)
+ sll r4, icperr_stat_v_tmr, r4
+ and r1, r4, r4 // Timeout reset
+ bne r4, sys_cpu_mchk_not_retryable
+
+ and r8, BIT(dcperr_stat_v_lock), r4 // DCache parity error locked
+ bne r4, sys_cpu_mchk_not_retryable
+
+ lda r4, 1(r31)
+ sll r4, sc_stat_v_sc_scnd_err, r4
+ and r10, r4, r4 // 2nd Scache error occurred
+ bne r4, sys_cpu_mchk_not_retryable
+
+
+ bis r31, 0xa3, r4 // EI_STAT Bcache Tag Parity Error, Bcache Tag Control
+ // Parity Error, Interface Parity Error, 2nd Error
+
+ and r25, r4, r4
+ bne r4, sys_cpu_mchk_not_retryable
+
+ bis r31, #<1@<ei_stat$v_unc_ecc_err-ei_stat$v_bc_tperr>>, r4
+ and r25, r4, r4 // Isolate the Uncorrectable Error Bit
+ bis r31, #<1@<ei_stat$v_fil_ird-ei_stat$v_bc_tperr>>, r6
+ cmovne r6, 0, r4 // r4 = 0 if IRD or if No Uncorrectable Error
+ bne r4, sys_cpu_mchk_not_retryable
+
+ lda r4, 7(r31)
+ and r10, r4, r4 // Isolate the Scache Tag Parity Error bits
+ bne r4, sys_cpu_mchk_not_retryable // All Scache Tag PEs are not retryable
+
+
+ lda r4, 0x7f8(r31)
+ and r10, r4, r4 // Isolate the Scache Data Parity Error bits
+ srl r10, sc_stat_v_cbox_cmd, r6
+ and r6, 0x1f, r6 // Isolate Scache Command field
+ subq r6, 1, r6 // Scache Iread command = 1
+ cmoveq r6, 0, r4 // r4 = 0 if IRD or if No Parity Error
+ bne r4, sys_cpu_mchk_not_retryable
+
+ // Look for the system unretryable cases here....
+
+ mfpr r4, isr // mchk_interrupt pin asserted
+ srl r4, isr_v_mck, r4
+ blbs r4, sys_cpu_mchk_not_retryable
+
+
+
+ //+
+ // Look for retryable cases
+ // In this segment:
+ // r5<0> = 1 means retryable
+ // r6 - holds the mchk code
+ // r4 and r14 are available for scratch
+ //
+ //-
+
+
+ // Within the chip, the retryable cases are Istream errors
+ lda r4, 3(r31)
+ sll r4, icperr_stat_v_dpe, r4
+ and r1, r4, r4
+ cmovne r4, 1, r5 // Retryable if just Icache parity error
+
+
+ lda r4, 0x7f8(r31)
+ and r10, r4, r4 // Isolate the Scache Data Parity Error bits
+ srl r10, sc_stat_v_cbox_cmd, r14
+ and r14, 0x1f, r14 // Isolate Scache Command field
+ subq r14, 1, r14 // Scache Iread command = 1
+ cmovne r4, 1, r4 // r4 = 1 if Scache data parity error bit set
+ cmovne r14, 0, r4 // r4 = 1 if Scache PE and Iread
+ bis r4, r5, r5 // Accumulate
+
+
+ bis r31, BIT((ei_stat_v_unc_ecc_err-ei_stat_v_bc_tperr)), r4
+ and r25, r4, r4 // Isolate the Uncorrectable Error Bit
+ and r25, BIT((ei_stat_v_fil_ird-ei_stat_v_bc_tperr)), r14 // Isolate the Iread bit
+ cmovne r4, 1, r4 // r4 = 1 if uncorr error
+ cmoveq r14, 0, r4 // r4 = 1 if uncorr and Iread
+ bis r4, r5, r5 // Accumulate
+
+ mfpr r6, pt_misc
+ extwl r6, 4, r6 // Fetch mchk code
+ bic r6, 1, r6 // Clear flag from interrupt flow
+ cmovne r5, mchk_c_retryable_ird, r6 // Set mchk code
+
+
+
+ // In the system, the retryable cases are ...
+ // (code here handles beh model read NXM)
+
+#if beh_model != 0
+// .if ne beh_model
+ ldah r4, 0xC000(r31) // Get base of demon space
+ lda r4, 0x550(r4) // Add NXM demon flag offset
+
+ ldqp r4, 0(r4) // Read the demon register
+ lda r14, mchk_c_read_nxm(r31)
+ cmovlbs r4, r14, r6 // Set mchk code if read NXM
+ cmovlbs r4, 1, r4
+ bis r4, r5, r5 // Accumulate retry bit
+#endif
+
+
+ //+
+ // Write the logout frame
+ //
+ // Current state:
+ // r0 - fill_syn
+ // r1 - icperr_stat
+ // r4 - available
+ // r5<0> - retry flag
+ // r6 - mchk code
+ // r8 - dcperr_stat
+ // r9 - sc_addr
+ // r10 - sc_stat
+ // r12 - ei_addr
+ // r13 - bc_tag_addr
+ // r14 - available
+ // r25 - ei_stat (shifted)
+ // pt0 - saved r0
+ // pt1 - saved r1
+ // pt4 - saved r4
+ // pt5 - saved r5
+ // pt6 - saved r6
+ // pt10 - saved exc_addr
+ //
+ //-
+
+sys_mchk_write_logout_frame:
+ // Get base of the logout area.
+ GET_IMPURE(r14) // addr of per-cpu impure area
+ GET_ADDR(r14,pal_logout_area+mchk_mchk_base,r14)
+
+ // Write the first 2 quadwords of the logout area:
+
+ sll r5, 63, r5 // Move retry flag to bit 63
+ lda r4, mchk_size(r5) // Combine retry flag and frame size
+ stqp r4, mchk_flag(r14) // store flag/frame size
+ lda r4, mchk_sys_base(r31) // sys offset
+ sll r4, 32, r4
+ lda r4, mchk_cpu_base(r4) // cpu offset
+ stqp r4, mchk_offsets(r14) // store sys offset/cpu offset into logout frame
+
+ //+
+ // Write the mchk code to the logout area
+ // Write error IPRs already fetched to the logout area
+ // Restore some GPRs from PALtemps
+ //-
+
+ mfpr r5, pt5
+ stqp r6, mchk_mchk_code(r14)
+ mfpr r4, pt4
+ stqp r1, mchk_ic_perr_stat(r14)
+ mfpr r6, pt6
+ stqp r8, mchk_dc_perr_stat(r14)
+ mfpr r1, pt1
+ stqp r9, mchk_sc_addr(r14)
+ stqp r10, mchk_sc_stat(r14)
+ stqp r12, mchk_ei_addr(r14)
+ stqp r13, mchk_bc_tag_addr(r14)
+ stqp r0, mchk_fill_syn(r14)
+ mfpr r0, pt0
+ sll r25, ei_stat_v_bc_tperr, r25 // Move EI_STAT status bits back to expected position
+ // retrieve lower 28 bits again from ei_stat and restore before storing to logout frame
+ ldah r13, 0xfff0(r31)
+ zapnot r13, 0x1f, r13
+ ldqp r13, ei_stat(r13)
+ sll r13, 64-ei_stat_v_bc_tperr, r13
+ srl r13, 64-ei_stat_v_bc_tperr, r13
+ or r25, r13, r25
+ stqp r25, mchk_ei_stat(r14)
+
+
+
+
+ //+
+ // complete the CPU-specific part of the logout frame
+ //-
+
+#ifndef SIMOS
+ // cant' assemble.Where is the macro ?
+ mchk_logout mm_stat
+ mchk_logout va // Unlocks VA and MM_STAT
+ mchk_logout isr
+ mchk_logout icsr
+ mchk_logout pal_base
+ mchk_logout exc_mask
+ mchk_logout exc_sum
+#endif
+
+ ldah r13, 0xfff0(r31)
+ zap r13, 0xE0, r13 // Get Cbox IPR base
+ ldqp r13, ld_lock(r13) // Get ld_lock IPR
+ stqp r13, mchk_ld_lock(r14) // and stash it in the frame
+
+ //+
+ // complete the PAL-specific part of the logout frame
+ //-
+#ifdef vms
+ t = 0
+ .repeat 24
+ pt_mchk_logout \t
+ t = t + 1
+ .endr
+#endif
+#ifndef SIMOS
+ //can't assemble ?
+ pt_mchk_logout 0
+ pt_mchk_logout 1
+ pt_mchk_logout 2
+ pt_mchk_logout 3
+ pt_mchk_logout 4
+ pt_mchk_logout 5
+ pt_mchk_logout 6
+ pt_mchk_logout 7
+ pt_mchk_logout 8
+ pt_mchk_logout 9
+ pt_mchk_logout 10
+ pt_mchk_logout 11
+ pt_mchk_logout 12
+ pt_mchk_logout 13
+ pt_mchk_logout 14
+ pt_mchk_logout 15
+ pt_mchk_logout 16
+ pt_mchk_logout 17
+ pt_mchk_logout 18
+ pt_mchk_logout 19
+ pt_mchk_logout 20
+ pt_mchk_logout 21
+ pt_mchk_logout 22
+ pt_mchk_logout 23
+#endif
+
+
+ //+
+ // Log system specific info here
+ //-
+
+#if alpha_fw != 0
+// .if ne alpha_fw
+storeTLEP_:
+ lda r13, 0xffc4(r31) // Get GBUS$MISCR address
+ sll r13, 24, r13
+ ldqp r13, 0(r13) // Read GBUS$MISCR
+ sll r13, 16, r13 // shift up to proper field
+ mfpr r8, pt_whami // get our node id
+ extbl r8, 1, r8 // shift to bit 0
+ or r13, r8, r13 // merge MISCR and WHAMI
+ stlp r13, mchk$gbus(r14) // write to logout area
+ srl r8, 1, r8 // shift off cpu number
+
+ Get_TLSB_Node_Address r8,r13 // compute our nodespace address
+
+ OSFmchk_TLEPstore tldev, tlsb=1
+ OSFmchk_TLEPstore tlber, tlsb=1, clr=1
+ OSFmchk_TLEPstore tlcnr, tlsb=1
+ OSFmchk_TLEPstore tlvid, tlsb=1
+ OSFmchk_TLEPstore tlesr0, tlsb=1, clr=1
+ OSFmchk_TLEPstore tlesr1, tlsb=1, clr=1
+ OSFmchk_TLEPstore tlesr2, tlsb=1, clr=1
+ OSFmchk_TLEPstore tlesr3, tlsb=1, clr=1
+ OSFmchk_TLEPstore tlmodconfig
+ OSFmchk_TLEPstore tlepaerr, clr=1
+ OSFmchk_TLEPstore tlepderr, clr=1
+ OSFmchk_TLEPstore tlepmerr, clr=1
+ OSFmchk_TLEPstore tlintrmask0
+ OSFmchk_TLEPstore tlintrmask1
+ OSFmchk_TLEPstore tlintrsum0
+ OSFmchk_TLEPstore tlintrsum1
+ OSFmchk_TLEPstore tlep_vmg
+// .endc
+#endif /*alpha_fw != 0 */
+ // Unlock IPRs
+ lda r8, (BIT(dcperr_stat_v_lock)|BIT(dcperr_stat_v_seo))(r31)
+ mtpr r8, dcperr_stat // Clear Dcache parity error status
+
+ lda r8, (BIT(icperr_stat_v_dpe)|BIT(icperr_stat_v_tpe)|BIT(icperr_stat_v_tmr))(r31)
+ mtpr r8, icperr_stat // Clear Icache parity error & timeout status
+
+1: ldqp r8, mchk_ic_perr_stat(r14) // get ICPERR_STAT value
+ GET_ADDR(r0,0x1800,r31) // get ICPERR_STAT value
+ and r0, r8, r0 // compare
+ beq r0, 2f // check next case if nothing set
+ lda r0, mchk_c_retryable_ird(r31) // set new MCHK code
+ br r31, do_670 // setup new vector
+
+2: ldqp r8, mchk_dc_perr_stat(r14) // get DCPERR_STAT value
+ GET_ADDR(r0,0x3f,r31) // get DCPERR_STAT value
+ and r0, r8, r0 // compare
+ beq r0, 3f // check next case if nothing set
+ lda r0, mchk_c_dcperr(r31) // set new MCHK code
+ br r31, do_670 // setup new vector
+
+3: ldqp r8, mchk_sc_stat(r14) // get SC_STAT value
+ GET_ADDR(r0,0x107ff,r31) // get SC_STAT value
+ and r0, r8, r0 // compare
+ beq r0, 4f // check next case if nothing set
+ lda r0, mchk_c_scperr(r31) // set new MCHK code
+ br r31, do_670 // setup new vector
+
+4: ldqp r8, mchk_ei_stat(r14) // get EI_STAT value
+ GET_ADDR(r0,0x30000000,r31) // get EI_STAT value
+ and r0, r8, r0 // compare
+ beq r0, 5f // check next case if nothing set
+ lda r0, mchk_c_bcperr(r31) // set new MCHK code
+ br r31, do_670 // setup new vector
+
+5: ldlp r8, mchk_tlber(r14) // get TLBER value
+ GET_ADDR(r0,0xfe01,r31) // get high TLBER mask value
+ sll r0, 16, r0 // shift into proper position
+ GET_ADDR(r1,0x03ff,r31) // get low TLBER mask value
+ or r0, r1, r0 // merge mask values
+ and r0, r8, r0 // compare
+ beq r0, 6f // check next case if nothing set
+ GET_ADDR(r0, 0xfff0, r31) // set new MCHK code
+ br r31, do_660 // setup new vector
+
+6: ldlp r8, mchk_tlepaerr(r14) // get TLEPAERR value
+ GET_ADDR(r0,0xff7f,r31) // get TLEPAERR mask value
+ and r0, r8, r0 // compare
+ beq r0, 7f // check next case if nothing set
+ GET_ADDR(r0, 0xfffa, r31) // set new MCHK code
+ br r31, do_660 // setup new vector
+
+7: ldlp r8, mchk_tlepderr(r14) // get TLEPDERR value
+ GET_ADDR(r0,0x7,r31) // get TLEPDERR mask value
+ and r0, r8, r0 // compare
+ beq r0, 8f // check next case if nothing set
+ GET_ADDR(r0, 0xfffb, r31) // set new MCHK code
+ br r31, do_660 // setup new vector
+
+8: ldlp r8, mchk_tlepmerr(r14) // get TLEPMERR value
+ GET_ADDR(r0,0x3f,r31) // get TLEPMERR mask value
+ and r0, r8, r0 // compare
+ beq r0, 9f // check next case if nothing set
+ GET_ADDR(r0, 0xfffc, r31) // set new MCHK code
+ br r31, do_660 // setup new vector
+
+9: ldqp r8, mchk_ei_stat(r14) // get EI_STAT value
+ GET_ADDR(r0,0xb,r31) // get EI_STAT mask value
+ sll r0, 32, r0 // shift to upper lw
+ and r0, r8, r0 // compare
+ beq r0, 1f // check next case if nothing set
+ GET_ADDR(r0,0xfffd,r31) // set new MCHK code
+ br r31, do_660 // setup new vector
+
+1: ldlp r8, mchk_tlepaerr(r14) // get TLEPAERR value
+ GET_ADDR(r0,0x80,r31) // get TLEPAERR mask value
+ and r0, r8, r0 // compare
+ beq r0, cont_logout_frame // check next case if nothing set
+ GET_ADDR(r0, 0xfffe, r31) // set new MCHK code
+ br r31, do_660 // setup new vector
+
+do_670: lda r8, scb_v_procmchk(r31) // SCB vector
+ br r31, do_6x0_cont
+do_660: lda r8, scb_v_sysmchk(r31) // SCB vector
+do_6x0_cont:
+ sll r8, 16, r8 // shift to proper position
+ mfpr r1, pt_misc // fetch current pt_misc
+ GET_ADDR(r4,0xffff, r31) // mask for vector field
+ sll r4, 16, r4 // shift to proper position
+ bic r1, r4, r1 // clear out old vector field
+ or r1, r8, r1 // merge in new vector
+ mtpr r1, pt_misc // save new vector field
+ stlp r0, mchk_mchk_code(r14) // save new mchk code
+
+cont_logout_frame:
+ // Restore some GPRs from PALtemps
+ mfpr r0, pt0
+ mfpr r1, pt1
+ mfpr r4, pt4
+
+ mfpr r12, pt10 // fetch original PC
+ blbs r12, sys_machine_check_while_in_pal // MCHK halt if machine check in pal
+
+//XXXbugnion pvc_jsr armc, bsr=1
+ bsr r12, sys_arith_and_mchk // go check for and deal with arith trap
+
+ mtpr r31, exc_sum // Clear Exception Summary
+
+ mfpr r25, pt10 // write exc_addr after arith_and_mchk to pickup new pc
+ stqp r25, mchk_exc_addr(r14)
+
+ //+
+ // Set up the km trap
+ //-
+
+
+sys_post_mchk_trap:
+ mfpr r25, pt_misc // Check for flag from mchk interrupt
+ extwl r25, 4, r25
+ blbs r25, sys_mchk_stack_done // Stack from already pushed if from interrupt flow
+
+ bis r14, r31, r12 // stash pointer to logout area
+ mfpr r14, pt10 // get exc_addr
+
+ sll r11, 63-3, r25 // get mode to msb
+ bge r25, 3f
+
+ mtpr r31, dtb_cm
+ mtpr r31, ps
+
+ mtpr r30, pt_usp // save user stack
+ mfpr r30, pt_ksp
+
+3:
+ lda sp, 0-osfsf_c_size(sp) // allocate stack space
+ nop
+
+ stq r18, osfsf_a2(sp) // a2
+ stq r11, osfsf_ps(sp) // save ps
+
+ stq r14, osfsf_pc(sp) // save pc
+ mfpr r25, pt_entint // get the VA of the interrupt routine
+
+ stq r16, osfsf_a0(sp) // a0
+ lda r16, osfint_c_mchk(r31) // flag as mchk in a0
+
+ stq r17, osfsf_a1(sp) // a1
+ mfpr r17, pt_misc // get vector
+
+ stq r29, osfsf_gp(sp) // old gp
+ mtpr r25, exc_addr //
+
+ or r31, 7, r11 // get new ps (km, high ipl)
+ subq r31, 1, r18 // get a -1
+
+ extwl r17, 2, r17 // a1 <- interrupt vector
+ bis r31, ipl_machine_check, r25
+
+ mtpr r25, ipl // Set internal ipl
+ srl r18, 42, r18 // shift off low bits of kseg addr
+
+ sll r18, 42, r18 // shift back into position
+ mfpr r29, pt_kgp // get the kern r29
+
+ or r12, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
+ hw_rei_spe // out to interrupt dispatch routine
+
+
+ //+
+ // The stack is pushed. Load up a0,a1,a2 and vector via entInt
+ //
+ //-
+ ALIGN_BRANCH
+sys_mchk_stack_done:
+ lda r16, osfint_c_mchk(r31) // flag as mchk/crd in a0
+ lda r17, scb_v_sysmchk(r31) // a1 <- interrupt vector
+
+ subq r31, 1, r18 // get a -1
+ mfpr r25, pt_entInt
+
+ srl r18, 42, r18 // shift off low bits of kseg addr
+ mtpr r25, exc_addr // load interrupt vector
+
+ sll r18, 42, r18 // shift back into position
+ or r14, r18, r18 // EV4 algorithm - pass pointer to mchk frame as kseg address
+
+ hw_rei_spe // done
+
+
+ ALIGN_BRANCH
+sys_cpu_mchk_not_retryable:
+ mfpr r6, pt_misc
+ extwl r6, 4, r6 // Fetch mchk code
+ br r31, sys_mchk_write_logout_frame //
+
+
+
+//+
+//sys$double_machine_check - a machine check was started, but MCES<MCHK> was
+// already set. We will now double machine check halt.
+//
+// pt0 - old R0
+//
+//+
+
+EXPORT(sys_double_machine_check)
+#ifndef SIMOS
+ pvc$jsr updpcb, bsr=1
+ bsr r0, pal_update_pcb // update the pcb
+#endif
+ lda r0, hlt_c_dbl_mchk(r31)
+ br r31, sys_enter_console
+
+//+
+//sys$machine_check_while_in_pal - a machine check was started, exc_addr points to
+// a PAL PC. We will now machine check halt.
+//
+// pt0 - old R0
+//
+//+
+sys_machine_check_while_in_pal:
+ stqp r12, mchk_exc_addr(r14) // exc_addr has not yet been written
+
+#ifndef SIMOS
+ pvc$jsr updpcb, bsr=1
+ bsr r0, pal_update_pcb // update the pcb
+#endif
+ lda r0, hlt_c_mchk_from_pal(r31)
+ br r31, sys_enter_console
+
+
+//ARITH and MCHK
+// Check for arithmetic errors and build trap frame,
+// but don't post the trap.
+// on entry:
+// pt10 - exc_addr
+// r12 - return address
+// r14 - logout frame pointer
+// r13 - available
+// r8,r9,r10 - available except across stq's
+// pt0,1,6 - available
+//
+// on exit:
+// pt10 - new exc_addr
+// r17 = exc_mask
+// r16 = exc_sum
+// r14 - logout frame pointer
+//
+ ALIGN_BRANCH
+sys_arith_and_mchk:
+ mfpr r13, ev5__exc_sum
+ srl r13, exc_sum_v_swc, r13
+ bne r13, handle_arith_and_mchk
+
+// XXX bugnion pvc$jsr armc, bsr=1, dest=1
+ ret r31, (r12) // return if no outstanding arithmetic error
+
+handle_arith_and_mchk:
+ mtpr r31, ev5__dtb_cm // Set Mbox current mode to kernel -
+ // no virt ref for next 2 cycles
+ mtpr r14, pt0
+
+ mtpr r1, pt1 // get a scratch reg
+ and r11, osfps_m_mode, r1 // get mode bit
+
+ bis r11, r31, r25 // save ps
+ beq r1, 1f // if zero we are in kern now
+
+ bis r31, r31, r25 // set the new ps
+ mtpr r30, pt_usp // save user stack
+
+ mfpr r30, pt_ksp // get kern stack
+1:
+ mfpr r14, exc_addr // get pc into r14 in case stack writes fault
+
+ lda sp, 0-osfsf_c_size(sp) // allocate stack space
+ mtpr r31, ev5__ps // Set Ibox current mode to kernel
+
+ mfpr r1, pt_entArith
+ stq r14, osfsf_pc(sp) // save pc
+
+ stq r17, osfsf_a1(sp)
+ mfpr r17, ev5__exc_mask // Get exception register mask IPR - no mtpr exc_sum in next cycle
+
+ stq r29, osfsf_gp(sp)
+ stq r16, osfsf_a0(sp) // save regs
+
+ bis r13, r31, r16 // move exc_sum to r16
+ stq r18, osfsf_a2(sp)
+
+ stq r11, osfsf_ps(sp) // save ps
+ mfpr r29, pt_kgp // get the kern gp
+
+ mfpr r14, pt0 // restore logout frame pointer from pt0
+ bis r25, r31, r11 // set new ps
+
+ mtpr r1, pt10 // Set new PC
+ mfpr r1, pt1
+
+// XXX bugnion pvc$jsr armc, bsr=1, dest=1
+ ret r31, (r12) // return if no outstanding arithmetic error
+
+
+
+// .sbttl "SYS$ENTER_CONSOLE - Common PALcode for ENTERING console"
+
+ ALIGN_BLOCK
+
+// SYS$enter_console
+//
+// Entry:
+// Entered when PAL wants to enter the console.
+// usually as the result of a HALT instruction or button,
+// or catastrophic error.
+//
+// Regs on entry...
+//
+// R0 = halt code
+// pt0 <- r0
+//
+// Function:
+//
+// Save all readable machine state, and "call" the console
+//
+// Returns:
+//
+//
+// Notes:
+//
+// In these routines, once the save state routine has been executed,
+// the remainder of the registers become scratchable, as the only
+// "valid" copy of them is the "saved" copy.
+//
+// Any registers or PTs that are modified before calling the save
+// routine will have there data lost. The code below will save all
+// state, but will loose pt 0,4,5.
+//
+//-
+
+EXPORT(sys_enter_console)
+ mtpr r1, pt4
+ mtpr r3, pt5
+#ifdef SIMOS
+ subq r31, 1, r1
+ sll r1, 42, r1
+ ldah r1, 1(r1)
+#else /* SIMOS */
+ lda r3, pal_enter_console_ptr(r31) //find stored vector
+ ldqp r1, 0(r3)
+#endif /* SIMOS */
+
+#ifdef SIMOS
+ /* taken from scrmax, seems like the obvious thing to do */
+ mtpr r1, exc_addr
+ mfpr r1, pt4
+ mfpr r3, pt5
+ STALL
+ STALL
+ hw_rei_stall
+#else
+ pvc$violate 1007
+ jmp r31, (r1) // off to common routine
+#endif
+
+
+// .sbttl "SYS$EXIT_CONSOLE - Common PALcode for ENTERING console"
+//+
+// sys$exit_console
+//
+// Entry:
+// Entered when console wants to reenter PAL.
+// usually as the result of a CONTINUE.
+//
+//
+// Regs' on entry...
+//
+//
+// Function:
+//
+// Restore all readable machine state, and return to user code.
+//
+//
+//
+//-
+ ALIGN_BLOCK
+sys_exit_console:
+ //Disable physical mode:
+#if enable_physical_console != 0
+// .if ne enable_physical_console
+ mfpr r25, pt_ptbr
+ bic r25, 1, r25 // clear physical console flag
+ mtpr r25, pt_ptbr
+#endif
+
+ GET_IMPURE(r1)
+
+ // clear lock and intr_flags prior to leaving console
+ rc r31 // clear intr_flag
+ // lock flag cleared by restore_state
+#ifndef SIMOS
+ pvc$jsr rststa, bsr=1
+ bsr r3, pal_restore_state // go restore all state
+ // note, R1 and R3 are NOT restored
+ // by restore_state.
+#endif
+ // TB's have been flushed
+
+ ldqp r3, (cns_gpr+(8*3))(r1) // restore r3
+ ldqp r1, (cns_gpr+8)(r1) // restore r1
+ hw_rei_stall // back to user
+
+#if turbo_pcia_intr_fix != 0
+// .if ne turbo_pcia_intr_fix
+check_pcia_intr:
+ mfpr r14, pt14 // fetch saved PCIA interrupt info
+ beq r14, check_done // don't bother checking if no info
+ mfpr r13, ipl // check the current IPL
+ bic r13, 3, r25 // isolate ipl<5:2>
+ cmpeq r25, 0x14, r25 // is it an I/O interrupt?
+ beq r25, check_done // no, return
+ and r13, 3, r25 // get I/O interrupt index
+ extbl r14, r25, r13 // extract info for this interrupt
+ beq r13, check_done // if no info, return
+
+ // This is an RTI from a PCIA interrupt
+ lda r12, 1(r31) // get initial bit mask
+ sll r12, r25, r25 // shift to select interrupt index
+ zap r14, r25, r14 // clear out info from this interrupt
+ mtpr r14, pt14 // and save it
+
+ and r13, 3, r25 // isolate HPC field
+ subq r25, 1, r25 // subtract 1 to get HPC number
+ srl r13, 2, r13 // generate base register address
+ sll r13, 6, r13 // get slot/hose address bits
+ lda r13, 0x38(r13) // insert other high bits
+ sll r13, 28, r13 // shift high bits into position
+
+ // Read the IPROGx register
+ sll r25, 21, r14 // HPC address bit position
+ or r13, r14, r14 // add in upper bits
+ lda r14, 0x400(r14) // add in lower bits
+ ldqp r14, 0(r14) // read IPROG
+ srl r14, 4, r12 // check the In Progress bit
+ blbc r12, 1f // skip if none in progress
+ and r14, 0xf, r14 // isolate interrupt source
+ lda r12, 1(r31) // make initial mask
+ sll r12, r14, r14 // shift to make new intr source mask
+ br r31, 2f
+ // Write the SMPLIRQx register
+1: or r31, r31, r14 // default interrupt source mask
+2: GET_ADDR(r12, 0xffff, r31) // default SMPLIRQx data
+ bic r12, r14, r12 // clear any interrupts in progres
+//orig lda r14, <0xbffc@-2>(r31) // get register address bits
+ lda r14,(0xbffc>>2)(r31)
+
+ sll r14, 10, r14 // shift into position
+ or r14, r13, r14 // add in upper bits
+ sll r25, 8, r25 // shift HPC number into position
+ or r14, r25, r14 // add in lower bits
+ stqp r12, 0(r14) // write SMPLIRQx register
+ mb
+ ldqp r12, 0(r14) // read it back
+ bis r12, r12, r12 // touch register to insure completion
+
+check_done: // do these now and return
+ lda r25, osfsf_c_size(sp) // get updated sp
+ bis r25, r31, r14 // touch r14,r25 to stall mf exc_addr
+ br r31, pcia_check_return
+#endif
+
+
+// .sbttl KLUDGE_INITIAL_PCBB - PCB for Boot use only
+
+ ALIGN_128
+
+kludge_initial_pcbb: // PCB is 128 bytes long
+// .repeat 16
+// .quad 0
+// .endr
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+ nop
+ nop
+ nop
+ nop
+
+// .sbttl "SET_SC_BC_CTL subroutine"
+//
+// Subroutine to set the SC_CTL, BC_CONFIG, and BC_CTL registers and flush the Scache
+// There must be no outstanding memory references -- istream or dstream -- when
+// these registers are written. EV5 prefetcher is difficult to turn off. So,
+// this routine needs to be exactly 32 instructions long// the final jmp must
+// be in the last octaword of a page (prefetcher doesn't go across page)
+//
+//
+// Register expecations:
+// r0 base address of CBOX iprs
+// r5 value to set sc_ctl to (flush bit is added in)
+// r6 value to set bc_ctl to
+// r7 value to set bc_config to
+// r10 return address
+// r19 old sc_ctl value
+// r20 old value of bc_ctl
+// r21 old value of bc_config
+// r23 flush scache flag
+// Register usage:
+// r17 sc_ctl with flush bit cleared
+// r22 loop address
+//
+//
+#ifndef SIMOS
+ align_page <32*4> // puts start of routine at next page boundary minus 32 longwords.
+#endif
+
+set_sc_bc_ctl:
+
+#ifndef SIMOS
+ br r22, sc_ctl_loop //this branch must be in the same 4 instruction block as it's dest
+sc_ctl_loop:
+// XXX bugnion pvc$jsr scloop, dest=1
+ mb
+ mb
+
+ bis r5, r23, r5 //r5 <- same sc_ctl with flush bit set (if flag set in r23)
+
+ stqp r19, ev5__sc_ctl(r0) // write sc_ctl
+ stqp r20, ev5__bc_ctl(r0) // write bc_ctl
+ bis r31, r6, r20 // update r20 with new bc_ctl for 2nd time through loop
+ stqp r21, bc_config(r0) // write bc_config register
+ bis r31, r7, r21 // update r21 with new bc_config for 2nd time through loop
+
+ bic r19, BIT(sc_ctl_v_sc_flush), r17 //r17 <- same sc_ctl without flush bit set
+ //NOTE: only works because flush bit is in lower 16 bits
+
+ wmb // don't merge with other writes
+ stqp r17, ev5__sc_ctl(r0) // write sc_ctl without flush bit
+ ldqp r17, ev5__sc_ctl(r0) // read sc_ctl
+ bis r17, r17, r17 // stall until the data comes back
+ bis r31, r5, r19 // update r19 with new sc_ctl for 2nd time through loop
+
+ // fill with requisite number of nops (unops ok) to make exactly 32 instructions in loop
+ t = 0
+ .repeat 15
+ unop
+ t = t + 1
+ .endr
+ $opdef mnemonic= myjmp, -
+ format= <custom=iregister, iregister, branch_offset>, -
+ encoding= <26:31=0x1A, 21:25=%OP1,16:20=%OP2,14:15=0x00,0:13=%op3>
+
+// XXXbugnion pvc$jsr scloop
+ myjmp r22,r22,sc_ctl_loop // first time, jump to sc_ctl_loop (hint will cause prefetcher to go to loop instead
+ // of straight) // r22 gets sc_ctl_done
+ // 2nd time, code continues at sc_ctl_done (I hope)
+sc_ctl_done:
+// XXX bugnion pvc$jsr scloop, dest=1
+// XXX bugnion pvc$jsr scbcctl
+#endif /*SIMOS*/
+ ret r31, (r10) // return to where we came from
+
+
+.end
+
+
+
+
diff --git a/system/alpha/palcode/simos.h b/system/alpha/palcode/simos.h
new file mode 100644
index 000000000..be5a5a93b
--- /dev/null
+++ b/system/alpha/palcode/simos.h
@@ -0,0 +1,16 @@
+
+
+/*
+ * this contains the random stuff that is not defined
+ * elsewhere
+ * probably hacked up
+ */
+
+
+#include "tlaser.h" /* from the simulation tree */
+
+/*
+ * wierd. different ways of referencing ips
+ */
+
+#define ps ips
diff --git a/system/alpha/palcode/xxm.sed b/system/alpha/palcode/xxm.sed
new file mode 100644
index 000000000..24e9bf49a
--- /dev/null
+++ b/system/alpha/palcode/xxm.sed
@@ -0,0 +1,5 @@
+/^[ ]*$/d
+s/^[ ]*//
+s/ / /g
+s/ [ ]*/ /g
+s/ ,/,/