From ae51f41d14f548d494ac41e0d21137c5a4c3f59c Mon Sep 17 00:00:00 2001 From: Iru Cai Date: Wed, 30 Oct 2019 14:21:52 +0800 Subject: import the U-Boot code and make it compile --- arch/mips/include/asm/addrspace.h | 164 ++ arch/mips/include/asm/asm-offsets.h | 3 + arch/mips/include/asm/asm.h | 424 +++++ arch/mips/include/asm/atomic.h | 12 + arch/mips/include/asm/bitops.h | 903 +++++++++ arch/mips/include/asm/byteorder.h | 72 + arch/mips/include/asm/cache.h | 32 + arch/mips/include/asm/cachectl.h | 23 + arch/mips/include/asm/cacheops.h | 114 ++ arch/mips/include/asm/cm.h | 61 + arch/mips/include/asm/config.h | 12 + arch/mips/include/asm/cpu-features.h | 29 + arch/mips/include/asm/global_data.h | 36 + arch/mips/include/asm/gpio.h | 1 + arch/mips/include/asm/io.h | 585 ++++++ arch/mips/include/asm/isadep.h | 35 + arch/mips/include/asm/linkage.h | 0 .../asm/mach-generic/cpu-feature-overrides.h | 10 + arch/mips/include/asm/mach-generic/ioremap.h | 30 + arch/mips/include/asm/mach-generic/mangle-port.h | 49 + arch/mips/include/asm/mach-generic/spaces.h | 101 + arch/mips/include/asm/malta.h | 71 + arch/mips/include/asm/mipsregs.h | 2014 ++++++++++++++++++++ arch/mips/include/asm/pgtable-bits.h | 282 +++ arch/mips/include/asm/posix_types.h | 125 ++ arch/mips/include/asm/processor.h | 137 ++ arch/mips/include/asm/ptrace.h | 105 + arch/mips/include/asm/reboot.h | 11 + arch/mips/include/asm/reg.h | 123 ++ arch/mips/include/asm/regdef.h | 103 + arch/mips/include/asm/relocs.h | 23 + arch/mips/include/asm/sections.h | 18 + arch/mips/include/asm/sgidefs.h | 41 + arch/mips/include/asm/spl.h | 33 + arch/mips/include/asm/string.h | 36 + arch/mips/include/asm/system.h | 285 +++ arch/mips/include/asm/types.h | 55 + arch/mips/include/asm/u-boot-mips.h | 12 + arch/mips/include/asm/u-boot.h | 24 + arch/mips/include/asm/unaligned.h | 23 + 40 files changed, 6217 insertions(+) create mode 100644 arch/mips/include/asm/addrspace.h create mode 100644 arch/mips/include/asm/asm-offsets.h create mode 100644 arch/mips/include/asm/asm.h create mode 100644 arch/mips/include/asm/atomic.h create mode 100644 arch/mips/include/asm/bitops.h create mode 100644 arch/mips/include/asm/byteorder.h create mode 100644 arch/mips/include/asm/cache.h create mode 100644 arch/mips/include/asm/cachectl.h create mode 100644 arch/mips/include/asm/cacheops.h create mode 100644 arch/mips/include/asm/cm.h create mode 100644 arch/mips/include/asm/config.h create mode 100644 arch/mips/include/asm/cpu-features.h create mode 100644 arch/mips/include/asm/global_data.h create mode 100644 arch/mips/include/asm/gpio.h create mode 100644 arch/mips/include/asm/io.h create mode 100644 arch/mips/include/asm/isadep.h create mode 100644 arch/mips/include/asm/linkage.h create mode 100644 arch/mips/include/asm/mach-generic/cpu-feature-overrides.h create mode 100644 arch/mips/include/asm/mach-generic/ioremap.h create mode 100644 arch/mips/include/asm/mach-generic/mangle-port.h create mode 100644 arch/mips/include/asm/mach-generic/spaces.h create mode 100644 arch/mips/include/asm/malta.h create mode 100644 arch/mips/include/asm/mipsregs.h create mode 100644 arch/mips/include/asm/pgtable-bits.h create mode 100644 arch/mips/include/asm/posix_types.h create mode 100644 arch/mips/include/asm/processor.h create mode 100644 arch/mips/include/asm/ptrace.h create mode 100644 arch/mips/include/asm/reboot.h create mode 100644 arch/mips/include/asm/reg.h create mode 100644 arch/mips/include/asm/regdef.h create mode 100644 arch/mips/include/asm/relocs.h create mode 100644 arch/mips/include/asm/sections.h create mode 100644 arch/mips/include/asm/sgidefs.h create mode 100644 arch/mips/include/asm/spl.h create mode 100644 arch/mips/include/asm/string.h create mode 100644 arch/mips/include/asm/system.h create mode 100644 arch/mips/include/asm/types.h create mode 100644 arch/mips/include/asm/u-boot-mips.h create mode 100644 arch/mips/include/asm/u-boot.h create mode 100644 arch/mips/include/asm/unaligned.h (limited to 'arch/mips') diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h new file mode 100644 index 0000000..b6d3876 --- /dev/null +++ b/arch/mips/include/asm/addrspace.h @@ -0,0 +1,164 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1996, 99 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999 by Silicon Graphics, Inc. + */ +#ifndef _ASM_ADDRSPACE_H +#define _ASM_ADDRSPACE_H + +#include + +/* + * Configure language + */ +#ifdef __ASSEMBLY__ +#define _ATYPE_ +#define _ATYPE32_ +#define _ATYPE64_ +#define _CONST64_(x) x +#else +#define _ATYPE_ __PTRDIFF_TYPE__ +#define _ATYPE32_ int +#define _ATYPE64_ __s64 +#ifdef CONFIG_64BIT +#define _CONST64_(x) x ## L +#else +#define _CONST64_(x) x ## LL +#endif +#endif + +/* + * 32-bit MIPS address spaces + */ +#ifdef __ASSEMBLY__ +#define _ACAST32_ +#define _ACAST64_ +#else +#define _ACAST32_ (_ATYPE_)(_ATYPE32_) /* widen if necessary */ +#define _ACAST64_ (_ATYPE64_) /* do _not_ narrow */ +#endif + +/* + * Returns the kernel segment base of a given address + */ +#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000) + +/* + * Returns the physical address of a CKSEGx / XKPHYS address + */ +#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) +#define XPHYSADDR(a) ((_ACAST64_(a)) & \ + _CONST64_(0x0000ffffffffffff)) + +#ifdef CONFIG_64BIT + +/* + * Memory segments (64bit kernel mode addresses) + * The compatibility segments use the full 64-bit sign extended value. Note + * the R8000 doesn't have them so don't reference these in generic MIPS code. + */ +#define XKUSEG _CONST64_(0x0000000000000000) +#define XKSSEG _CONST64_(0x4000000000000000) +#define XKPHYS _CONST64_(0x8000000000000000) +#define XKSEG _CONST64_(0xc000000000000000) +#define CKSEG0 _CONST64_(0xffffffff80000000) +#define CKSEG1 _CONST64_(0xffffffffa0000000) +#define CKSSEG _CONST64_(0xffffffffc0000000) +#define CKSEG3 _CONST64_(0xffffffffe0000000) + +#define CKSEG0ADDR(a) (CPHYSADDR(a) | CKSEG0) +#define CKSEG1ADDR(a) (CPHYSADDR(a) | CKSEG1) +#define CKSEG2ADDR(a) (CPHYSADDR(a) | CKSEG2) +#define CKSEG3ADDR(a) (CPHYSADDR(a) | CKSEG3) + +#else + +#define CKSEG0ADDR(a) (CPHYSADDR(a) | KSEG0) +#define CKSEG1ADDR(a) (CPHYSADDR(a) | KSEG1) +#define CKSEG2ADDR(a) (CPHYSADDR(a) | KSEG2) +#define CKSEG3ADDR(a) (CPHYSADDR(a) | KSEG3) + +/* + * Map an address to a certain kernel segment + */ +#define KSEG0ADDR(a) (CPHYSADDR(a) | KSEG0) +#define KSEG1ADDR(a) (CPHYSADDR(a) | KSEG1) +#define KSEG2ADDR(a) (CPHYSADDR(a) | KSEG2) +#define KSEG3ADDR(a) (CPHYSADDR(a) | KSEG3) + +/* + * Memory segments (32bit kernel mode addresses) + * These are the traditional names used in the 32-bit universe. + */ +#define KUSEG 0x00000000 +#define KSEG0 0x80000000 +#define KSEG1 0xa0000000 +#define KSEG2 0xc0000000 +#define KSEG3 0xe0000000 + +#define CKUSEG 0x00000000 +#define CKSEG0 0x80000000 +#define CKSEG1 0xa0000000 +#define CKSEG2 0xc0000000 +#define CKSEG3 0xe0000000 + +#endif + +/* + * Cache modes for XKPHYS address conversion macros + */ +#define K_CALG_COH_EXCL1_NOL2 0 +#define K_CALG_COH_SHRL1_NOL2 1 +#define K_CALG_UNCACHED 2 +#define K_CALG_NONCOHERENT 3 +#define K_CALG_COH_EXCL 4 +#define K_CALG_COH_SHAREABLE 5 +#define K_CALG_NOTUSED 6 +#define K_CALG_UNCACHED_ACCEL 7 + +/* + * 64-bit address conversions + */ +#define PHYS_TO_XKSEG_UNCACHED(p) PHYS_TO_XKPHYS(K_CALG_UNCACHED, (p)) +#define PHYS_TO_XKSEG_CACHED(p) PHYS_TO_XKPHYS(K_CALG_COH_SHAREABLE, (p)) +#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK) +#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \ + (_CONST64_(cm) << 59) | (a)) + +/* + * Returns the uncached address of a sdram address + */ +#ifndef __ASSEMBLY__ +#if defined(CONFIG_TB0229) +/* We use a 36 bit physical address map here and + cannot access physical memory directly from core */ +#define UNCACHED_SDRAM(a) (((unsigned long)(a)) | 0x20000000) +#else /* !CONFIG_TB0229 */ +#define UNCACHED_SDRAM(a) CKSEG1ADDR(a) +#endif /* CONFIG_TB0229 */ +#endif /* __ASSEMBLY__ */ + +/* + * The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting + * the region, 3 bits for the CCA mode. This leaves 59 bits of which the + * R8000 implements most with its 48-bit physical address space. + */ +#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */ + +#ifndef CONFIG_CPU_R8000 + +/* + * The R8000 doesn't have the 32-bit compat spaces so we don't define them + * in order to catch bugs in the source code. + */ + +#define COMPAT_K1BASE32 _CONST64_(0xffffffffa0000000) +#define PHYS_TO_COMPATK1(x) ((x) | COMPAT_K1BASE32) /* 32-bit compat k1 */ + +#endif + +#define KDM_TO_PHYS(x) (_ACAST64_ (x) & TO_PHYS_MASK) +#define PHYS_TO_K0(x) (_ACAST64_ (x) | CAC_BASE) + +#endif /* _ASM_ADDRSPACE_H */ diff --git a/arch/mips/include/asm/asm-offsets.h b/arch/mips/include/asm/asm-offsets.h new file mode 100644 index 0000000..b722499 --- /dev/null +++ b/arch/mips/include/asm/asm-offsets.h @@ -0,0 +1,3 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#include diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h new file mode 100644 index 0000000..7abcf6d --- /dev/null +++ b/arch/mips/include/asm/asm.h @@ -0,0 +1,424 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1995, 1996, 1997, 1999, 2001 by Ralf Baechle + * Copyright (C) 1999 by Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + * + * Some useful macros for MIPS assembler code + * + * Some of the routines below contain useless nops that will be optimized + * away by gas in -O mode. These nops are however required to fill delay + * slots in noreorder mode. + */ +#ifndef __ASM_ASM_H +#define __ASM_ASM_H + +#include + +#ifndef CAT +#ifdef __STDC__ +#define __CAT(str1, str2) str1##str2 +#else +#define __CAT(str1, str2) str1/**/str2 +#endif +#define CAT(str1, str2) __CAT(str1, str2) +#endif + +/* + * PIC specific declarations + * Not used for the kernel but here seems to be the right place. + */ +#ifdef __PIC__ +#define CPRESTORE(register) \ + .cprestore register +#define CPADD(register) \ + .cpadd register +#define CPLOAD(register) \ + .cpload register +#else +#define CPRESTORE(register) +#define CPADD(register) +#define CPLOAD(register) +#endif + +#define ENTRY(symbol) \ + .globl symbol; \ + .type symbol, @function; \ + .ent symbol, 0; \ +symbol: + +/* + * LEAF - declare leaf routine + */ +#define LEAF(symbol) \ + .globl symbol; \ + .align 2; \ + .type symbol, @function; \ + .ent symbol, 0; \ + .section .text.symbol, "x"; \ +symbol: .frame sp, 0, ra + +/* + * NESTED - declare nested routine entry point + */ +#define NESTED(symbol, framesize, rpc) \ + .globl symbol; \ + .align 2; \ + .type symbol, @function; \ + .ent symbol, 0; \ + .section .text.symbol, "x"; \ +symbol: .frame sp, framesize, rpc + +/* + * END - mark end of function + */ +#define END(function) \ + .end function; \ + .size function, .-function + +/* + * EXPORT - export definition of symbol + */ +#define EXPORT(symbol) \ + .globl symbol; \ +symbol: + +/* + * FEXPORT - export definition of a function symbol + */ +#define FEXPORT(symbol) \ + .globl symbol; \ + .type symbol, @function; \ +symbol: + +/* + * ABS - export absolute symbol + */ +#define ABS(symbol,value) \ + .globl symbol; \ +symbol = value + +#define PANIC(msg) \ + .set push; \ + .set reorder; \ + PTR_LA a0, 8f; \ + jal panic; \ +9: b 9b; \ + .set pop; \ + TEXT(msg) + +/* + * Print formatted string + */ +#ifdef CONFIG_PRINTK +#define PRINT(string) \ + .set push; \ + .set reorder; \ + PTR_LA a0, 8f; \ + jal printk; \ + .set pop; \ + TEXT(string) +#else +#define PRINT(string) +#endif + +#define TEXT(msg) \ + .pushsection .data; \ +8: .asciiz msg; \ + .popsection; + +/* + * Build text tables + */ +#define TTABLE(string) \ + .pushsection .text; \ + .word 1f; \ + .popsection \ + .pushsection .data; \ +1: .asciiz string; \ + .popsection + +/* + * MIPS IV pref instruction. + * Use with .set noreorder only! + * + * MIPS IV implementations are free to treat this as a nop. The R5000 + * is one of them. So we should have an option not to use this instruction. + */ +#ifdef CONFIG_CPU_HAS_PREFETCH + +#define PREF(hint, addr) \ + .set push; \ + .set arch=r5000; \ + pref hint, addr; \ + .set pop + +#define PREFE(hint, addr) \ + .set push; \ + .set mips0; \ + .set eva; \ + prefe hint, addr; \ + .set pop + +#define PREFX(hint, addr) \ + .set push; \ + .set arch=r5000; \ + prefx hint, addr; \ + .set pop + +#else /* !CONFIG_CPU_HAS_PREFETCH */ + +#define PREF(hint, addr) +#define PREFE(hint, addr) +#define PREFX(hint, addr) + +#endif /* !CONFIG_CPU_HAS_PREFETCH */ + +/* + * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs. + */ +#if (_MIPS_ISA == _MIPS_ISA_MIPS1) +#define MOVN(rd, rs, rt) \ + .set push; \ + .set reorder; \ + beqz rt, 9f; \ + move rd, rs; \ + .set pop; \ +9: +#define MOVZ(rd, rs, rt) \ + .set push; \ + .set reorder; \ + bnez rt, 9f; \ + move rd, rs; \ + .set pop; \ +9: +#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */ +#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) +#define MOVN(rd, rs, rt) \ + .set push; \ + .set noreorder; \ + bnezl rt, 9f; \ + move rd, rs; \ + .set pop; \ +9: +#define MOVZ(rd, rs, rt) \ + .set push; \ + .set noreorder; \ + beqzl rt, 9f; \ + move rd, rs; \ + .set pop; \ +9: +#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */ +#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \ + (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64) +#define MOVN(rd, rs, rt) \ + movn rd, rs, rt +#define MOVZ(rd, rs, rt) \ + movz rd, rs, rt +#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */ + +/* + * Stack alignment + */ +#if (_MIPS_SIM == _MIPS_SIM_ABI32) +#define ALSZ 7 +#define ALMASK ~7 +#endif +#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) +#define ALSZ 15 +#define ALMASK ~15 +#endif + +/* + * Macros to handle different pointer/register sizes for 32/64-bit code + */ + +/* + * Size of a register + */ +#ifdef __mips64 +#define SZREG 8 +#else +#define SZREG 4 +#endif + +/* + * Use the following macros in assemblercode to load/store registers, + * pointers etc. + */ +#if (_MIPS_SIM == _MIPS_SIM_ABI32) +#define REG_S sw +#define REG_L lw +#define REG_SUBU subu +#define REG_ADDU addu +#endif +#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) +#define REG_S sd +#define REG_L ld +#define REG_SUBU dsubu +#define REG_ADDU daddu +#endif + +/* + * How to add/sub/load/store/shift C int variables. + */ +#if (_MIPS_SZINT == 32) +#define INT_ADD add +#define INT_ADDU addu +#define INT_ADDI addi +#define INT_ADDIU addiu +#define INT_SUB sub +#define INT_SUBU subu +#define INT_L lw +#define INT_S sw +#define INT_SLL sll +#define INT_SLLV sllv +#define INT_SRL srl +#define INT_SRLV srlv +#define INT_SRA sra +#define INT_SRAV srav +#endif + +#if (_MIPS_SZINT == 64) +#define INT_ADD dadd +#define INT_ADDU daddu +#define INT_ADDI daddi +#define INT_ADDIU daddiu +#define INT_SUB dsub +#define INT_SUBU dsubu +#define INT_L ld +#define INT_S sd +#define INT_SLL dsll +#define INT_SLLV dsllv +#define INT_SRL dsrl +#define INT_SRLV dsrlv +#define INT_SRA dsra +#define INT_SRAV dsrav +#endif + +/* + * How to add/sub/load/store/shift C long variables. + */ +#if (_MIPS_SZLONG == 32) +#define LONG_ADD add +#define LONG_ADDU addu +#define LONG_ADDI addi +#define LONG_ADDIU addiu +#define LONG_SUB sub +#define LONG_SUBU subu +#define LONG_L lw +#define LONG_S sw +#define LONG_SP swp +#define LONG_SLL sll +#define LONG_SLLV sllv +#define LONG_SRL srl +#define LONG_SRLV srlv +#define LONG_SRA sra +#define LONG_SRAV srav + +#define LONG .word +#define LONGSIZE 4 +#define LONGMASK 3 +#define LONGLOG 2 +#endif + +#if (_MIPS_SZLONG == 64) +#define LONG_ADD dadd +#define LONG_ADDU daddu +#define LONG_ADDI daddi +#define LONG_ADDIU daddiu +#define LONG_SUB dsub +#define LONG_SUBU dsubu +#define LONG_L ld +#define LONG_S sd +#define LONG_SP sdp +#define LONG_SLL dsll +#define LONG_SLLV dsllv +#define LONG_SRL dsrl +#define LONG_SRLV dsrlv +#define LONG_SRA dsra +#define LONG_SRAV dsrav + +#define LONG .dword +#define LONGSIZE 8 +#define LONGMASK 7 +#define LONGLOG 3 +#endif + +/* + * How to add/sub/load/store/shift pointers. + */ +#if (_MIPS_SZPTR == 32) +#define PTR_ADD add +#define PTR_ADDU addu +#define PTR_ADDI addi +#define PTR_ADDIU addiu +#define PTR_SUB sub +#define PTR_SUBU subu +#define PTR_L lw +#define PTR_S sw +#define PTR_LA la +#define PTR_LI li +#define PTR_SLL sll +#define PTR_SLLV sllv +#define PTR_SRL srl +#define PTR_SRLV srlv +#define PTR_SRA sra +#define PTR_SRAV srav + +#define PTR_SCALESHIFT 2 + +#define PTR .word +#define PTRSIZE 4 +#define PTRLOG 2 +#endif + +#if (_MIPS_SZPTR == 64) +#define PTR_ADD dadd +#define PTR_ADDU daddu +#define PTR_ADDI daddi +#define PTR_ADDIU daddiu +#define PTR_SUB dsub +#define PTR_SUBU dsubu +#define PTR_L ld +#define PTR_S sd +#define PTR_LA dla +#define PTR_LI dli +#define PTR_SLL dsll +#define PTR_SLLV dsllv +#define PTR_SRL dsrl +#define PTR_SRLV dsrlv +#define PTR_SRA dsra +#define PTR_SRAV dsrav + +#define PTR_SCALESHIFT 3 + +#define PTR .dword +#define PTRSIZE 8 +#define PTRLOG 3 +#endif + +/* + * Some cp0 registers were extended to 64bit for MIPS III. + */ +#if (_MIPS_SIM == _MIPS_SIM_ABI32) +#define MFC0 mfc0 +#define MTC0 mtc0 +#endif +#if (_MIPS_SIM == _MIPS_SIM_NABI32) || (_MIPS_SIM == _MIPS_SIM_ABI64) +#define MFC0 dmfc0 +#define MTC0 dmtc0 +#endif + +#define SSNOP sll zero, zero, 1 + +#ifdef CONFIG_SGI_IP28 +/* Inhibit speculative stores to volatile (e.g.DMA) or invalid addresses. */ +#include +#define R10KCBARRIER(addr) cache CACHE_BARRIER, addr; +#else +#define R10KCBARRIER(addr) +#endif + +#endif /* __ASM_ASM_H */ diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h new file mode 100644 index 0000000..c4f08b7 --- /dev/null +++ b/arch/mips/include/asm/atomic.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2016 Cadence Design Systems Inc. + */ + +#ifndef _MIPS_ATOMIC_H +#define _MIPS_ATOMIC_H + +#include +#include + +#endif diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h new file mode 100644 index 0000000..321d322 --- /dev/null +++ b/arch/mips/include/asm/bitops.h @@ -0,0 +1,903 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org) + * Copyright (c) 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_BITOPS_H +#define _ASM_BITOPS_H + +#include +#include /* sigh ... */ + +#ifdef __KERNEL__ + +#include +#include + +#include +#include +#include +#include + +/* + * clear_bit() doesn't provide any barrier for the compiler. + */ +#define smp_mb__before_clear_bit() barrier() +#define smp_mb__after_clear_bit() barrier() + +/* + * Only disable interrupt for kernel mode stuff to keep usermode stuff + * that dares to use kernel include files alive. + */ +#define __bi_flags unsigned long flags +#define __bi_cli() __cli() +#define __bi_save_flags(x) __save_flags(x) +#define __bi_save_and_cli(x) __save_and_cli(x) +#define __bi_restore_flags(x) __restore_flags(x) +#else +#define __bi_flags +#define __bi_cli() +#define __bi_save_flags(x) +#define __bi_save_and_cli(x) +#define __bi_restore_flags(x) +#endif /* __KERNEL__ */ + +#ifdef CONFIG_CPU_HAS_LLSC + +#include + +/* + * These functions for MIPS ISA > 1 are interrupt and SMP proof and + * interrupt friendly + */ + +/* + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void +set_bit(int nr, volatile void *addr) +{ + unsigned long *m = ((unsigned long *) addr) + (nr >> 5); + unsigned long temp; + + __asm__ __volatile__( + "1:\tll\t%0, %1\t\t# set_bit\n\t" + "or\t%0, %2\n\t" + "sc\t%0, %1\n\t" + "beqz\t%0, 1b" + : "=&r" (temp), "=m" (*m) + : "ir" (1UL << (nr & 0x1f)), "m" (*m)); +} + +/* + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __set_bit(int nr, volatile void * addr) +{ + unsigned long * m = ((unsigned long *) addr) + (nr >> 5); + + *m |= 1UL << (nr & 31); +} +#define PLATFORM__SET_BIT + +/* + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __inline__ void +clear_bit(int nr, volatile void *addr) +{ + unsigned long *m = ((unsigned long *) addr) + (nr >> 5); + unsigned long temp; + + __asm__ __volatile__( + "1:\tll\t%0, %1\t\t# clear_bit\n\t" + "and\t%0, %2\n\t" + "sc\t%0, %1\n\t" + "beqz\t%0, 1b\n\t" + : "=&r" (temp), "=m" (*m) + : "ir" (~(1UL << (nr & 0x1f))), "m" (*m)); +} + +/* + * change_bit - Toggle a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void +change_bit(int nr, volatile void *addr) +{ + unsigned long *m = ((unsigned long *) addr) + (nr >> 5); + unsigned long temp; + + __asm__ __volatile__( + "1:\tll\t%0, %1\t\t# change_bit\n\t" + "xor\t%0, %2\n\t" + "sc\t%0, %1\n\t" + "beqz\t%0, 1b" + : "=&r" (temp), "=m" (*m) + : "ir" (1UL << (nr & 0x1f)), "m" (*m)); +} + +/* + * __change_bit - Toggle a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __change_bit(int nr, volatile void * addr) +{ + unsigned long * m = ((unsigned long *) addr) + (nr >> 5); + + *m ^= 1UL << (nr & 31); +} + +/* + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int +test_and_set_bit(int nr, volatile void *addr) +{ + unsigned long *m = ((unsigned long *) addr) + (nr >> 5); + unsigned long temp, res; + + __asm__ __volatile__( + ".set\tnoreorder\t\t# test_and_set_bit\n" + "1:\tll\t%0, %1\n\t" + "or\t%2, %0, %3\n\t" + "sc\t%2, %1\n\t" + "beqz\t%2, 1b\n\t" + " and\t%2, %0, %3\n\t" + ".set\treorder" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << (nr & 0x1f)), "m" (*m) + : "memory"); + + return res != 0; +} + +/* + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a |= mask; + + return retval; +} + +/* + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int +test_and_clear_bit(int nr, volatile void *addr) +{ + unsigned long *m = ((unsigned long *) addr) + (nr >> 5); + unsigned long temp, res; + + __asm__ __volatile__( + ".set\tnoreorder\t\t# test_and_clear_bit\n" + "1:\tll\t%0, %1\n\t" + "or\t%2, %0, %3\n\t" + "xor\t%2, %3\n\t" + "sc\t%2, %1\n\t" + "beqz\t%2, 1b\n\t" + " and\t%2, %0, %3\n\t" + ".set\treorder" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << (nr & 0x1f)), "m" (*m) + : "memory"); + + return res != 0; +} + +/* + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a &= ~mask; + + return retval; +} + +/* + * test_and_change_bit - Change a bit and return its new value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int +test_and_change_bit(int nr, volatile void *addr) +{ + unsigned long *m = ((unsigned long *) addr) + (nr >> 5); + unsigned long temp, res; + + __asm__ __volatile__( + ".set\tnoreorder\t\t# test_and_change_bit\n" + "1:\tll\t%0, %1\n\t" + "xor\t%2, %0, %3\n\t" + "sc\t%2, %1\n\t" + "beqz\t%2, 1b\n\t" + " and\t%2, %0, %3\n\t" + ".set\treorder" + : "=&r" (temp), "=m" (*m), "=&r" (res) + : "r" (1UL << (nr & 0x1f)), "m" (*m) + : "memory"); + + return res != 0; +} + +/* + * __test_and_change_bit - Change a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a ^= mask; + + return retval; +} + +#else /* MIPS I */ + +/* + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void set_bit(int nr, volatile void * addr) +{ + int mask; + volatile int *a = addr; + __bi_flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __bi_save_and_cli(flags); + *a |= mask; + __bi_restore_flags(flags); +} + +/* + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __set_bit(int nr, volatile void * addr) +{ + int mask; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + *a |= mask; +} + +/* + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static __inline__ void clear_bit(int nr, volatile void * addr) +{ + int mask; + volatile int *a = addr; + __bi_flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __bi_save_and_cli(flags); + *a &= ~mask; + __bi_restore_flags(flags); +} + +/* + * change_bit - Toggle a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static __inline__ void change_bit(int nr, volatile void * addr) +{ + int mask; + volatile int *a = addr; + __bi_flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __bi_save_and_cli(flags); + *a ^= mask; + __bi_restore_flags(flags); +} + +/* + * __change_bit - Toggle a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike change_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static __inline__ void __change_bit(int nr, volatile void * addr) +{ + unsigned long * m = ((unsigned long *) addr) + (nr >> 5); + + *m ^= 1UL << (nr & 31); +} + +/* + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + __bi_flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __bi_save_and_cli(flags); + retval = (mask & *a) != 0; + *a |= mask; + __bi_restore_flags(flags); + + return retval; +} + +/* + * __test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_set_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a |= mask; + + return retval; +} + +/* + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + __bi_flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __bi_save_and_cli(flags); + retval = (mask & *a) != 0; + *a &= ~mask; + __bi_restore_flags(flags); + + return retval; +} + +/* + * __test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a &= ~mask; + + return retval; +} + +/* + * test_and_change_bit - Change a bit and return its new value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static __inline__ int test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + __bi_flags; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + __bi_save_and_cli(flags); + retval = (mask & *a) != 0; + *a ^= mask; + __bi_restore_flags(flags); + + return retval; +} + +/* + * __test_and_change_bit - Change a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is non-atomic and can be reordered. + * If two examples of this operation race, one can appear to succeed + * but actually fail. You must protect multiple accesses with a lock. + */ +static __inline__ int __test_and_change_bit(int nr, volatile void * addr) +{ + int mask, retval; + volatile int *a = addr; + + a += nr >> 5; + mask = 1 << (nr & 0x1f); + retval = (mask & *a) != 0; + *a ^= mask; + + return retval; +} + +#undef __bi_flags +#undef __bi_cli +#undef __bi_save_flags +#undef __bi_restore_flags + +#endif /* MIPS I */ + +/* + * test_bit - Determine whether a bit is set + * @nr: bit number to test + * @addr: Address to start counting from + */ +static __inline__ int test_bit(int nr, const volatile void *addr) +{ + return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0; +} + +#ifndef __MIPSEB__ + +/* Little endian versions. */ + +/* + * find_first_zero_bit - find the first zero bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first zero bit, not the number of the byte + * containing a bit. + */ +static __inline__ int find_first_zero_bit (void *addr, unsigned size) +{ + unsigned long dummy; + int res; + + if (!size) + return 0; + + __asm__ (".set\tnoreorder\n\t" + ".set\tnoat\n" + "1:\tsubu\t$1,%6,%0\n\t" + "blez\t$1,2f\n\t" + "lw\t$1,(%5)\n\t" + "addiu\t%5,4\n\t" +#if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \ + (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \ + (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64) + "beql\t%1,$1,1b\n\t" + "addiu\t%0,32\n\t" +#else + "addiu\t%0,32\n\t" + "beq\t%1,$1,1b\n\t" + "nop\n\t" + "subu\t%0,32\n\t" +#endif +#ifdef __MIPSEB__ +#error "Fix this for big endian" +#endif /* __MIPSEB__ */ + "li\t%1,1\n" + "1:\tand\t%2,$1,%1\n\t" + "beqz\t%2,2f\n\t" + "sll\t%1,%1,1\n\t" + "bnez\t%1,1b\n\t" + "add\t%0,%0,1\n\t" + ".set\tat\n\t" + ".set\treorder\n" + "2:" + : "=r" (res), "=r" (dummy), "=r" (addr) + : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff), + "2" (addr), "r" (size) + : "$1"); + + return res; +} + +/* + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +static __inline__ int find_next_zero_bit (void * addr, int size, int offset) +{ + unsigned int *p = ((unsigned int *) addr) + (offset >> 5); + int set = 0, bit = offset & 31, res; + unsigned long dummy; + + if (bit) { + /* + * Look for zero in first byte + */ +#ifdef __MIPSEB__ +#error "Fix this for big endian byte order" +#endif + __asm__(".set\tnoreorder\n\t" + ".set\tnoat\n" + "1:\tand\t$1,%4,%1\n\t" + "beqz\t$1,1f\n\t" + "sll\t%1,%1,1\n\t" + "bnez\t%1,1b\n\t" + "addiu\t%0,1\n\t" + ".set\tat\n\t" + ".set\treorder\n" + "1:" + : "=r" (set), "=r" (dummy) + : "0" (0), "1" (1 << bit), "r" (*p) + : "$1"); + if (set < (32 - bit)) + return set + offset; + set = 32 - bit; + p++; + } + /* + * No zero yet, search remaining full bytes for a zero + */ + res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr)); + return offset + set + res; +} + +#endif /* !(__MIPSEB__) */ + +/* + * ffz - find first zero in word. + * @word: The word to search + * + * Undefined if no zero exists, so code should check against ~0UL first. + */ +static __inline__ unsigned long ffz(unsigned long word) +{ + unsigned int __res; + unsigned int mask = 1; + + __asm__ ( + ".set\tnoreorder\n\t" + ".set\tnoat\n\t" + "move\t%0,$0\n" + "1:\tand\t$1,%2,%1\n\t" + "beqz\t$1,2f\n\t" + "sll\t%1,1\n\t" + "bnez\t%1,1b\n\t" + "addiu\t%0,1\n\t" + ".set\tat\n\t" + ".set\treorder\n" + "2:\n\t" + : "=&r" (__res), "=r" (mask) + : "r" (word), "1" (mask) + : "$1"); + + return __res; +} + +#ifdef __KERNEL__ + +/* + * hweightN - returns the hamming weight of a N-bit word + * @x: the word to weigh + * + * The Hamming Weight of a number is the total number of bits set in it. + */ + +#define hweight32(x) generic_hweight32(x) +#define hweight16(x) generic_hweight16(x) +#define hweight8(x) generic_hweight8(x) + +#endif /* __KERNEL__ */ + +#ifdef __MIPSEB__ +/* + * find_next_zero_bit - find the first zero bit in a memory region + * @addr: The address to base the search on + * @offset: The bitnumber to start searching at + * @size: The maximum size to search + */ +static __inline__ int find_next_zero_bit(void *addr, int size, int offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if (offset) { + tmp = *(p++); + tmp |= ~0UL >> (32-offset); + if (size < 32) + goto found_first; + if (~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while (size & ~31UL) { + if (~(tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if (!size) + return result; + tmp = *p; + +found_first: + tmp |= ~0UL << size; +found_middle: + return result + ffz(tmp); +} + +/* Linus sez that gcc can optimize the following correctly, we'll see if this + * holds on the Sparc as it does for the ALPHA. + */ + +#if 0 /* Fool kernel-doc since it doesn't do macros yet */ +/* + * find_first_zero_bit - find the first zero bit in a memory region + * @addr: The address to start the search at + * @size: The maximum size to search + * + * Returns the bit-number of the first zero bit, not the number of the byte + * containing a bit. + */ +static int find_first_zero_bit (void *addr, unsigned size); +#endif + +#define find_first_zero_bit(addr, size) \ + find_next_zero_bit((addr), (size), 0) + +#endif /* (__MIPSEB__) */ + +/* Now for the ext2 filesystem bit operations and helper routines. */ + +#ifdef __MIPSEB__ +static __inline__ int ext2_set_bit(int nr, void * addr) +{ + int mask, retval, flags; + unsigned char *ADDR = (unsigned char *) addr; + + ADDR += nr >> 3; + mask = 1 << (nr & 0x07); + save_and_cli(flags); + retval = (mask & *ADDR) != 0; + *ADDR |= mask; + restore_flags(flags); + return retval; +} + +static __inline__ int ext2_clear_bit(int nr, void * addr) +{ + int mask, retval, flags; + unsigned char *ADDR = (unsigned char *) addr; + + ADDR += nr >> 3; + mask = 1 << (nr & 0x07); + save_and_cli(flags); + retval = (mask & *ADDR) != 0; + *ADDR &= ~mask; + restore_flags(flags); + return retval; +} + +static __inline__ int ext2_test_bit(int nr, const void * addr) +{ + int mask; + const unsigned char *ADDR = (const unsigned char *) addr; + + ADDR += nr >> 3; + mask = 1 << (nr & 0x07); + return ((mask & *ADDR) != 0); +} + +#define ext2_find_first_zero_bit(addr, size) \ + ext2_find_next_zero_bit((addr), (size), 0) + +static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) +{ + unsigned long *p = ((unsigned long *) addr) + (offset >> 5); + unsigned long result = offset & ~31UL; + unsigned long tmp; + + if (offset >= size) + return size; + size -= result; + offset &= 31UL; + if(offset) { + /* We hold the little endian value in tmp, but then the + * shift is illegal. So we could keep a big endian value + * in tmp, like this: + * + * tmp = __swab32(*(p++)); + * tmp |= ~0UL >> (32-offset); + * + * but this would decrease preformance, so we change the + * shift: + */ + tmp = *(p++); + tmp |= __swab32(~0UL >> (32-offset)); + if(size < 32) + goto found_first; + if(~tmp) + goto found_middle; + size -= 32; + result += 32; + } + while(size & ~31UL) { + if(~(tmp = *(p++))) + goto found_middle; + result += 32; + size -= 32; + } + if(!size) + return result; + tmp = *p; + +found_first: + /* tmp is little endian, so we would have to swab the shift, + * see above. But then we have to swab tmp below for ffz, so + * we might as well do this here. + */ + return result + ffz(__swab32(tmp) | (~0UL << size)); +found_middle: + return result + ffz(__swab32(tmp)); +} +#else /* !(__MIPSEB__) */ + +/* Native ext2 byte ordering, just collapse using defines. */ +#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) +#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) +#define ext2_test_bit(nr, addr) test_bit((nr), (addr)) +#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) +#define ext2_find_next_zero_bit(addr, size, offset) \ + find_next_zero_bit((addr), (size), (offset)) + +#endif /* !(__MIPSEB__) */ + +/* + * Bitmap functions for the minix filesystem. + * FIXME: These assume that Minix uses the native byte/bitorder. + * This limits the Minix filesystem's value for data exchange very much. + */ +#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) +#define minix_set_bit(nr,addr) set_bit(nr,addr) +#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) +#define minix_test_bit(nr,addr) test_bit(nr,addr) +#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) + +#endif /* _ASM_BITOPS_H */ diff --git a/arch/mips/include/asm/byteorder.h b/arch/mips/include/asm/byteorder.h new file mode 100644 index 0000000..eef17b8 --- /dev/null +++ b/arch/mips/include/asm/byteorder.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1996, 99, 2003 by Ralf Baechle + */ +#ifndef _ASM_BYTEORDER_H +#define _ASM_BYTEORDER_H + +#include + +#ifdef __GNUC__ + +#ifdef CONFIG_CPU_MIPSR2 + +static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 x) +{ + __asm__( + " wsbh %0, %1 \n" + : "=r" (x) + : "r" (x)); + + return x; +} +#define __arch__swab16(x) ___arch__swab16(x) + +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 x) +{ + __asm__( + " wsbh %0, %1 \n" + " rotr %0, %0, 16 \n" + : "=r" (x) + : "r" (x)); + + return x; +} +#define __arch__swab32(x) ___arch__swab32(x) + +#ifdef CONFIG_CPU_MIPS64_R2 + +static __inline__ __attribute_const__ __u64 ___arch__swab64(__u64 x) +{ + __asm__( + " dsbh %0, %1 \n" + " dshd %0, %0 \n" + " drotr %0, %0, 32 \n" + : "=r" (x) + : "r" (x)); + + return x; +} + +#define __arch__swab64(x) ___arch__swab64(x) + +#endif /* CONFIG_CPU_MIPS64_R2 */ + +#endif /* CONFIG_CPU_MIPSR2 */ + +#if !defined(__STRICT_ANSI__) || defined(__KERNEL__) +# define __BYTEORDER_HAS_U64__ +# define __SWAB_64_THRU_32__ +#endif + +#endif /* __GNUC__ */ + +#if defined(__MIPSEB__) +# include +#elif defined(__MIPSEL__) +# include +#else +# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???" +#endif + +#endif /* _ASM_BYTEORDER_H */ diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h new file mode 100644 index 0000000..00696e6 --- /dev/null +++ b/arch/mips/include/asm/cache.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2011 The Chromium OS Authors. + */ + +#ifndef __MIPS_CACHE_H__ +#define __MIPS_CACHE_H__ + +#define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define ARCH_DMA_MINALIGN (L1_CACHE_BYTES) + +/* + * CONFIG_SYS_CACHELINE_SIZE is still used in various drivers primarily for + * DMA buffer alignment. Satisfy those drivers by providing it as a synonym + * of ARCH_DMA_MINALIGN for now. + */ +#define CONFIG_SYS_CACHELINE_SIZE ARCH_DMA_MINALIGN + +#ifndef __ASSEMBLY__ +/** + * mips_cache_probe() - Probe the properties of the caches + * + * Call this to probe the properties such as line sizes of the caches + * present in the system, if any. This must be done before cache maintenance + * functions such as flush_cache may be called. + */ +void mips_cache_probe(void); +#endif + +#endif /* __MIPS_CACHE_H__ */ diff --git a/arch/mips/include/asm/cachectl.h b/arch/mips/include/asm/cachectl.h new file mode 100644 index 0000000..805da6f --- /dev/null +++ b/arch/mips/include/asm/cachectl.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994, 1995, 1996 by Ralf Baechle + */ +#ifndef _ASM_CACHECTL +#define _ASM_CACHECTL + +/* + * Options for cacheflush system call + */ +#define ICACHE (1<<0) /* flush instruction cache */ +#define DCACHE (1<<1) /* writeback and flush data cache */ +#define BCACHE (ICACHE|DCACHE) /* flush both caches */ + +/* + * Caching modes for the cachectl(2) call + * + * cachectl(2) is currently not supported and returns ENOSYS. + */ +#define CACHEABLE 0 /* make pages cacheable */ +#define UNCACHEABLE 1 /* make pages uncacheable */ + +#endif /* _ASM_CACHECTL */ diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h new file mode 100644 index 0000000..98b67cc --- /dev/null +++ b/arch/mips/include/asm/cacheops.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Cache operations for the cache instruction. + * + * (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle + * (C) Copyright 1999 Silicon Graphics, Inc. + */ +#ifndef __ASM_CACHEOPS_H +#define __ASM_CACHEOPS_H + +#ifndef __ASSEMBLY__ + +static inline void mips_cache(int op, const volatile void *addr) +{ +#ifdef __GCC_HAVE_BUILTIN_MIPS_CACHE + __builtin_mips_cache(op, addr); +#else + __asm__ __volatile__("cache %0, 0(%1)" : : "i"(op), "r"(addr)); +#endif +} + +#define MIPS32_WHICH_ICACHE 0x0 +#define MIPS32_FETCH_AND_LOCK 0x7 + +#define ICACHE_LOAD_LOCK (MIPS32_WHICH_ICACHE | (MIPS32_FETCH_AND_LOCK << 2)) + +/* Prefetch and lock instructions into cache */ +static inline void icache_lock(void *func, size_t len) +{ + int i, lines = ((len - 1) / ARCH_DMA_MINALIGN) + 1; + + for (i = 0; i < lines; i++) { + asm volatile (" cache %0, %1(%2)" + : /* No Output */ + : "I" ICACHE_LOAD_LOCK, + "n" (i * ARCH_DMA_MINALIGN), + "r" (func) + : /* No Clobbers */); + } +} +#endif /* !__ASSEMBLY__ */ + +/* + * Cache Operations available on all MIPS processors with R4000-style caches + */ +#define INDEX_INVALIDATE_I 0x00 +#define INDEX_WRITEBACK_INV_D 0x01 +#define INDEX_LOAD_TAG_I 0x04 +#define INDEX_LOAD_TAG_D 0x05 +#define INDEX_STORE_TAG_I 0x08 +#define INDEX_STORE_TAG_D 0x09 +#if defined(CONFIG_CPU_LOONGSON2) +#define HIT_INVALIDATE_I 0x00 +#else +#define HIT_INVALIDATE_I 0x10 +#endif +#define HIT_INVALIDATE_D 0x11 +#define HIT_WRITEBACK_INV_D 0x15 + +/* + * R4000-specific cacheops + */ +#define CREATE_DIRTY_EXCL_D 0x0d +#define FILL 0x14 +#define HIT_WRITEBACK_I 0x18 +#define HIT_WRITEBACK_D 0x19 + +/* + * R4000SC and R4400SC-specific cacheops + */ +#define INDEX_INVALIDATE_SI 0x02 +#define INDEX_WRITEBACK_INV_SD 0x03 +#define INDEX_LOAD_TAG_SI 0x06 +#define INDEX_LOAD_TAG_SD 0x07 +#define INDEX_STORE_TAG_SI 0x0A +#define INDEX_STORE_TAG_SD 0x0B +#define CREATE_DIRTY_EXCL_SD 0x0f +#define HIT_INVALIDATE_SI 0x12 +#define HIT_INVALIDATE_SD 0x13 +#define HIT_WRITEBACK_INV_SD 0x17 +#define HIT_WRITEBACK_SD 0x1b +#define HIT_SET_VIRTUAL_SI 0x1e +#define HIT_SET_VIRTUAL_SD 0x1f + +/* + * R5000-specific cacheops + */ +#define R5K_PAGE_INVALIDATE_S 0x17 + +/* + * RM7000-specific cacheops + */ +#define PAGE_INVALIDATE_T 0x16 + +/* + * R10000-specific cacheops + * + * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused. + * Most of the _S cacheops are identical to the R4000SC _SD cacheops. + */ +#define INDEX_WRITEBACK_INV_S 0x03 +#define INDEX_LOAD_TAG_S 0x07 +#define INDEX_STORE_TAG_S 0x0B +#define HIT_INVALIDATE_S 0x13 +#define CACHE_BARRIER 0x14 +#define HIT_WRITEBACK_INV_S 0x17 +#define INDEX_LOAD_DATA_I 0x18 +#define INDEX_LOAD_DATA_D 0x19 +#define INDEX_LOAD_DATA_S 0x1b +#define INDEX_STORE_DATA_I 0x1c +#define INDEX_STORE_DATA_D 0x1d +#define INDEX_STORE_DATA_S 0x1f + +#endif /* __ASM_CACHEOPS_H */ diff --git a/arch/mips/include/asm/cm.h b/arch/mips/include/asm/cm.h new file mode 100644 index 0000000..8f37471 --- /dev/null +++ b/arch/mips/include/asm/cm.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * MIPS Coherence Manager (CM) Register Definitions + * + * Copyright (c) 2016 Imagination Technologies Ltd. + */ +#ifndef __MIPS_ASM_CM_H__ +#define __MIPS_ASM_CM_H__ + +/* Global Control Register (GCR) offsets */ +#define GCR_BASE 0x0008 +#define GCR_BASE_UPPER 0x000c +#define GCR_REV 0x0030 +#define GCR_L2_CONFIG 0x0130 +#define GCR_L2_TAG_ADDR 0x0600 +#define GCR_L2_TAG_ADDR_UPPER 0x0604 +#define GCR_L2_TAG_STATE 0x0608 +#define GCR_L2_TAG_STATE_UPPER 0x060c +#define GCR_L2_DATA 0x0610 +#define GCR_L2_DATA_UPPER 0x0614 +#define GCR_Cx_COHERENCE 0x2008 + +/* GCR_REV CM versions */ +#define GCR_REV_CM3 0x0800 + +/* GCR_L2_CONFIG fields */ +#define GCR_L2_CONFIG_ASSOC_SHIFT 0 +#define GCR_L2_CONFIG_ASSOC_BITS 8 +#define GCR_L2_CONFIG_LINESZ_SHIFT 8 +#define GCR_L2_CONFIG_LINESZ_BITS 4 +#define GCR_L2_CONFIG_SETSZ_SHIFT 12 +#define GCR_L2_CONFIG_SETSZ_BITS 4 +#define GCR_L2_CONFIG_BYPASS (1 << 20) + +/* GCR_Cx_COHERENCE */ +#define GCR_Cx_COHERENCE_DOM_EN (0xff << 0) +#define GCR_Cx_COHERENCE_EN (0x1 << 0) + +#ifndef __ASSEMBLY__ + +#include + +static inline void *mips_cm_base(void) +{ + return (void *)CKSEG1ADDR(CONFIG_MIPS_CM_BASE); +} + +static inline unsigned long mips_cm_l2_line_size(void) +{ + unsigned long l2conf, line_sz; + + l2conf = __raw_readl(mips_cm_base() + GCR_L2_CONFIG); + + line_sz = l2conf >> GCR_L2_CONFIG_LINESZ_SHIFT; + line_sz &= GENMASK(GCR_L2_CONFIG_LINESZ_BITS - 1, 0); + return line_sz ? (2 << line_sz) : 0; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* __MIPS_ASM_CM_H__ */ diff --git a/arch/mips/include/asm/config.h b/arch/mips/include/asm/config.h new file mode 100644 index 0000000..7ea4436 --- /dev/null +++ b/arch/mips/include/asm/config.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2009 Freescale Semiconductor, Inc. + */ + +#ifndef _ASM_CONFIG_H_ +#define _ASM_CONFIG_H_ + +#define CONFIG_LMB +#define CONFIG_SYS_BOOT_RAMDISK_HIGH + +#endif diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h new file mode 100644 index 0000000..11a577c --- /dev/null +++ b/arch/mips/include/asm/cpu-features.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + */ +#ifndef __ASM_CPU_FEATURES_H +#define __ASM_CPU_FEATURES_H + +#include + +#ifdef CONFIG_32BIT +# ifndef cpu_has_64bits +# define cpu_has_64bits 0 +# endif +# ifndef cpu_has_64bit_addresses +# define cpu_has_64bit_addresses 0 +# endif +#endif + +#ifdef CONFIG_64BIT +# ifndef cpu_has_64bits +# define cpu_has_64bits 1 +# endif +# ifndef cpu_has_64bit_addresses +# define cpu_has_64bit_addresses 1 +# endif +#endif + +#endif /* __ASM_CPU_FEATURES_H */ diff --git a/arch/mips/include/asm/global_data.h b/arch/mips/include/asm/global_data.h new file mode 100644 index 0000000..7b4ad08 --- /dev/null +++ b/arch/mips/include/asm/global_data.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2002-2010 + * Wolfgang Denk, DENX Software Engineering, wd@denx.de. + */ + +#ifndef __ASM_GBL_DATA_H +#define __ASM_GBL_DATA_H + +#include + +/* Architecture-specific global data */ +struct arch_global_data { +#ifdef CONFIG_DYNAMIC_IO_PORT_BASE + unsigned long io_port_base; +#endif +#ifdef CONFIG_ARCH_ATH79 + unsigned long id; + unsigned long soc; + unsigned long rev; + unsigned long ver; +#endif +#ifdef CONFIG_SYS_CACHE_SIZE_AUTO + unsigned short l1i_line_size; + unsigned short l1d_line_size; +#endif +#ifdef CONFIG_MIPS_L2_CACHE + unsigned short l2_line_size; +#endif +}; + +#include + +#define DECLARE_GLOBAL_DATA_PTR register volatile gd_t *gd asm ("k0") + +#endif /* __ASM_GBL_DATA_H */ diff --git a/arch/mips/include/asm/gpio.h b/arch/mips/include/asm/gpio.h new file mode 100644 index 0000000..306ab4c --- /dev/null +++ b/arch/mips/include/asm/gpio.h @@ -0,0 +1 @@ +#include diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h new file mode 100644 index 0000000..7c40e41 --- /dev/null +++ b/arch/mips/include/asm/io.h @@ -0,0 +1,585 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994, 1995 Waldorf GmbH + * Copyright (C) 1994 - 2000, 06 Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki + */ +#ifndef _ASM_IO_H +#define _ASM_IO_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Raw operations are never swapped in software. OTOH values that raw + * operations are working on may or may not have been swapped by the bus + * hardware. An example use would be for flash memory that's used for + * execute in place. + */ +# define __raw_ioswabb(a, x) (x) +# define __raw_ioswabw(a, x) (x) +# define __raw_ioswabl(a, x) (x) +# define __raw_ioswabq(a, x) (x) +# define ____raw_ioswabq(a, x) (x) + +/* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */ + +#define IO_SPACE_LIMIT 0xffff + +#ifdef CONFIG_DYNAMIC_IO_PORT_BASE + +static inline ulong mips_io_port_base(void) +{ + DECLARE_GLOBAL_DATA_PTR; + + return gd->arch.io_port_base; +} + +static inline void set_io_port_base(unsigned long base) +{ + DECLARE_GLOBAL_DATA_PTR; + + gd->arch.io_port_base = base; + barrier(); +} + +#else /* !CONFIG_DYNAMIC_IO_PORT_BASE */ + +static inline ulong mips_io_port_base(void) +{ + return 0; +} + +static inline void set_io_port_base(unsigned long base) +{ + BUG_ON(base); +} + +#endif /* !CONFIG_DYNAMIC_IO_PORT_BASE */ + +/* + * virt_to_phys - map virtual addresses to physical + * @address: address to remap + * + * The returned physical address is the physical (CPU) mapping for + * the memory address given. It is only valid to use this function on + * addresses directly mapped or allocated via kmalloc. + * + * This function does not give bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline unsigned long virt_to_phys(volatile const void *address) +{ + unsigned long addr = (unsigned long)address; + + /* this corresponds to kernel implementation of __pa() */ +#ifdef CONFIG_64BIT + if (addr < CKSEG0) + return XPHYSADDR(addr); +#endif + return CPHYSADDR(addr); +} +#define virt_to_phys virt_to_phys + +/* + * phys_to_virt - map physical address to virtual + * @address: address to remap + * + * The returned virtual address is a current CPU mapping for + * the memory address given. It is only valid to use this function on + * addresses that have a kernel mapping + * + * This function does not handle bus mappings for DMA transfers. In + * almost all conceivable cases a device driver should not be using + * this function + */ +static inline void *phys_to_virt(unsigned long address) +{ + return (void *)(address + PAGE_OFFSET - PHYS_OFFSET); +} +#define phys_to_virt phys_to_virt + +/* + * ISA I/O bus memory addresses are 1:1 with the physical address. + */ +static inline unsigned long isa_virt_to_bus(volatile void *address) +{ + return (unsigned long)address - PAGE_OFFSET; +} + +static inline void *isa_bus_to_virt(unsigned long address) +{ + return (void *)(address + PAGE_OFFSET); +} + +#define isa_page_to_bus page_to_phys + +/* + * However PCI ones are not necessarily 1:1 and therefore these interfaces + * are forbidden in portable PCI drivers. + * + * Allow them for x86 for legacy drivers, though. + */ +#define virt_to_bus virt_to_phys +#define bus_to_virt phys_to_virt + +static inline void __iomem *__ioremap_mode(phys_addr_t offset, unsigned long size, + unsigned long flags) +{ + void __iomem *addr; + phys_addr_t phys_addr; + + addr = plat_ioremap(offset, size, flags); + if (addr) + return addr; + + phys_addr = fixup_bigphys_addr(offset, size); + return (void __iomem *)(unsigned long)CKSEG1ADDR(phys_addr); +} + +/* + * ioremap - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + */ +#define ioremap(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED) + +/* + * ioremap_nocache - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked uncachable + * on the CPU as well as honouring existing caching rules from things like + * the PCI bus. Note that there are other caches and buffers on many + * busses. In particular driver authors should read up on PCI writes + * + * It's useful if some control registers are in such an area and + * write combining or read caching is not desirable: + */ +#define ioremap_nocache(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED) +#define ioremap_uc ioremap_nocache + +/* + * ioremap_cachable - map bus memory into CPU space + * @offset: bus address of the memory + * @size: size of the resource to map + * + * ioremap_nocache performs a platform specific sequence of operations to + * make bus memory CPU accessible via the readb/readw/readl/writeb/ + * writew/writel functions and the other mmio helpers. The returned + * address is not guaranteed to be usable directly as a virtual + * address. + * + * This version of ioremap ensures that the memory is marked cachable by + * the CPU. Also enables full write-combining. Useful for some + * memory-like regions on I/O busses. + */ +#define ioremap_cachable(offset, size) \ + __ioremap_mode((offset), (size), _page_cachable_default) + +/* + * These two are MIPS specific ioremap variant. ioremap_cacheable_cow + * requests a cachable mapping, ioremap_uncached_accelerated requests a + * mapping using the uncached accelerated mode which isn't supported on + * all processors. + */ +#define ioremap_cacheable_cow(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_CACHABLE_COW) +#define ioremap_uncached_accelerated(offset, size) \ + __ioremap_mode((offset), (size), _CACHE_UNCACHED_ACCELERATED) + +static inline void iounmap(const volatile void __iomem *addr) +{ + plat_iounmap(addr); +} + +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define war_octeon_io_reorder_wmb() wmb() +#else +#define war_octeon_io_reorder_wmb() do { } while (0) +#endif + +#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \ + \ +static inline void pfx##write##bwlq(type val, \ + volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + war_octeon_io_reorder_wmb(); \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + __val = pfx##ioswab##bwlq(__mem, val); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + *__mem = __val; \ + else if (cpu_has_64bits) { \ + type __tmp; \ + \ + __asm__ __volatile__( \ + ".set arch=r4000" "\t\t# __writeq""\n\t" \ + "dsll32 %L0, %L0, 0" "\n\t" \ + "dsrl32 %L0, %L0, 0" "\n\t" \ + "dsll32 %M0, %M0, 0" "\n\t" \ + "or %L0, %L0, %M0" "\n\t" \ + "sd %L0, %2" "\n\t" \ + ".set mips0" "\n" \ + : "=r" (__tmp) \ + : "0" (__val), "m" (*__mem)); \ + } else \ + BUG(); \ +} \ + \ +static inline type pfx##read##bwlq(const volatile void __iomem *mem) \ +{ \ + volatile type *__mem; \ + type __val; \ + \ + __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \ + \ + if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \ + __val = *__mem; \ + else if (cpu_has_64bits) { \ + __asm__ __volatile__( \ + ".set arch=r4000" "\t\t# __readq" "\n\t" \ + "ld %L0, %1" "\n\t" \ + "dsra32 %M0, %L0, 0" "\n\t" \ + "sll %L0, %L0, 0" "\n\t" \ + ".set mips0" "\n" \ + : "=r" (__val) \ + : "m" (*__mem)); \ + } else { \ + __val = 0; \ + BUG(); \ + } \ + \ + return pfx##ioswab##bwlq(__mem, __val); \ +} + +#define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \ + \ +static inline void pfx##out##bwlq##p(type val, unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + war_octeon_io_reorder_wmb(); \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \ + \ + __val = pfx##ioswab##bwlq(__addr, val); \ + \ + /* Really, we want this to be atomic */ \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + *__addr = __val; \ +} \ + \ +static inline type pfx##in##bwlq##p(unsigned long port) \ +{ \ + volatile type *__addr; \ + type __val; \ + \ + __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base() + port); \ + \ + BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \ + \ + __val = *__addr; \ + \ + return pfx##ioswab##bwlq(__addr, __val); \ +} + +#define __BUILD_MEMORY_PFX(bus, bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(bus, bwlq, type, 1) + +#define BUILDIO_MEM(bwlq, type) \ + \ +__BUILD_MEMORY_PFX(__raw_, bwlq, type) \ +__BUILD_MEMORY_PFX(, bwlq, type) \ +__BUILD_MEMORY_PFX(__mem_, bwlq, type) \ + +BUILDIO_MEM(b, u8) +BUILDIO_MEM(w, u16) +BUILDIO_MEM(l, u32) +BUILDIO_MEM(q, u64) + +#define __BUILD_IOPORT_PFX(bus, bwlq, type) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, ) \ + __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p) + +#define BUILDIO_IOPORT(bwlq, type) \ + __BUILD_IOPORT_PFX(, bwlq, type) \ + __BUILD_IOPORT_PFX(__mem_, bwlq, type) + +BUILDIO_IOPORT(b, u8) +BUILDIO_IOPORT(w, u16) +BUILDIO_IOPORT(l, u32) +#ifdef CONFIG_64BIT +BUILDIO_IOPORT(q, u64) +#endif + +#define __BUILDIO(bwlq, type) \ + \ +__BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0) + +__BUILDIO(q, u64) + +#define readb_relaxed readb +#define readw_relaxed readw +#define readl_relaxed readl +#define readq_relaxed readq + +#define writeb_relaxed writeb +#define writew_relaxed writew +#define writel_relaxed writel +#define writeq_relaxed writeq + +#define readb_be(addr) \ + __raw_readb((__force unsigned *)(addr)) +#define readw_be(addr) \ + be16_to_cpu(__raw_readw((__force unsigned *)(addr))) +#define readl_be(addr) \ + be32_to_cpu(__raw_readl((__force unsigned *)(addr))) +#define readq_be(addr) \ + be64_to_cpu(__raw_readq((__force unsigned *)(addr))) + +#define writeb_be(val, addr) \ + __raw_writeb((val), (__force unsigned *)(addr)) +#define writew_be(val, addr) \ + __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr)) +#define writel_be(val, addr) \ + __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr)) +#define writeq_be(val, addr) \ + __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr)) + +/* + * Some code tests for these symbols + */ +#define readq readq +#define writeq writeq + +#define __BUILD_MEMORY_STRING(bwlq, type) \ + \ +static inline void writes##bwlq(volatile void __iomem *mem, \ + const void *addr, unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_write##bwlq(*__addr, mem); \ + __addr++; \ + } \ +} \ + \ +static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_read##bwlq(mem); \ + __addr++; \ + } \ +} + +#define __BUILD_IOPORT_STRING(bwlq, type) \ + \ +static inline void outs##bwlq(unsigned long port, const void *addr, \ + unsigned int count) \ +{ \ + const volatile type *__addr = addr; \ + \ + while (count--) { \ + __mem_out##bwlq(*__addr, port); \ + __addr++; \ + } \ +} \ + \ +static inline void ins##bwlq(unsigned long port, void *addr, \ + unsigned int count) \ +{ \ + volatile type *__addr = addr; \ + \ + while (count--) { \ + *__addr = __mem_in##bwlq(port); \ + __addr++; \ + } \ +} + +#define BUILDSTRING(bwlq, type) \ + \ +__BUILD_MEMORY_STRING(bwlq, type) \ +__BUILD_IOPORT_STRING(bwlq, type) + +BUILDSTRING(b, u8) +BUILDSTRING(w, u16) +BUILDSTRING(l, u32) +#ifdef CONFIG_64BIT +BUILDSTRING(q, u64) +#endif + + +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define mmiowb() wmb() +#else +/* Depends on MIPS II instruction set */ +#define mmiowb() asm volatile ("sync" ::: "memory") +#endif + +static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count) +{ + memset((void __force *)addr, val, count); +} +static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count) +{ + memcpy(dst, (void __force *)src, count); +} +static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count) +{ + memcpy((void __force *)dst, src, count); +} + +/* + * Read a 32-bit register that requires a 64-bit read cycle on the bus. + * Avoid interrupt mucking, just adjust the address for 4-byte access. + * Assume the addresses are 8-byte aligned. + */ +#ifdef __MIPSEB__ +#define __CSR_32_ADJUST 4 +#else +#define __CSR_32_ADJUST 0 +#endif + +#define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v)) +#define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST)) + +/* + * U-Boot specific + */ +#define sync() mmiowb() + +#define MAP_NOCACHE 1 + +static inline void * +map_physmem(phys_addr_t paddr, unsigned long len, unsigned long flags) +{ + if (flags == MAP_NOCACHE) + return ioremap(paddr, len); + + return (void *)CKSEG0ADDR(paddr); +} +#define map_physmem map_physmem + +#define __BUILD_CLRBITS(bwlq, sfx, end, type) \ + \ +static inline void clrbits_##sfx(volatile void __iomem *mem, type clr) \ +{ \ + type __val = __raw_read##bwlq(mem); \ + __val = end##_to_cpu(__val); \ + __val &= ~clr; \ + __val = cpu_to_##end(__val); \ + __raw_write##bwlq(__val, mem); \ +} + +#define __BUILD_SETBITS(bwlq, sfx, end, type) \ + \ +static inline void setbits_##sfx(volatile void __iomem *mem, type set) \ +{ \ + type __val = __raw_read##bwlq(mem); \ + __val = end##_to_cpu(__val); \ + __val |= set; \ + __val = cpu_to_##end(__val); \ + __raw_write##bwlq(__val, mem); \ +} + +#define __BUILD_CLRSETBITS(bwlq, sfx, end, type) \ + \ +static inline void clrsetbits_##sfx(volatile void __iomem *mem, \ + type clr, type set) \ +{ \ + type __val = __raw_read##bwlq(mem); \ + __val = end##_to_cpu(__val); \ + __val &= ~clr; \ + __val |= set; \ + __val = cpu_to_##end(__val); \ + __raw_write##bwlq(__val, mem); \ +} + +#define BUILD_CLRSETBITS(bwlq, sfx, end, type) \ + \ +__BUILD_CLRBITS(bwlq, sfx, end, type) \ +__BUILD_SETBITS(bwlq, sfx, end, type) \ +__BUILD_CLRSETBITS(bwlq, sfx, end, type) + +#define __to_cpu(v) (v) +#define cpu_to__(v) (v) + +#define out_arch(type, endian, a, v) __raw_write##type(cpu_to_##endian(v),a) +#define in_arch(type, endian, a) endian##_to_cpu(__raw_read##type(a)) + +#define out_le64(a, v) out_arch(q, le64, a, v) +#define out_le32(a, v) out_arch(l, le32, a, v) +#define out_le16(a, v) out_arch(w, le16, a, v) + +#define in_le64(a) in_arch(q, le64, a) +#define in_le32(a) in_arch(l, le32, a) +#define in_le16(a) in_arch(w, le16, a) + +#define out_be64(a, v) out_arch(q, be64, a, v) +#define out_be32(a, v) out_arch(l, be32, a, v) +#define out_be16(a, v) out_arch(w, be16, a, v) + +#define in_be64(a) in_arch(q, be64, a) +#define in_be32(a) in_arch(l, be32, a) +#define in_be16(a) in_arch(w, be16, a) + +#define out_8(a, v) __raw_writeb(v, a) +#define in_8(a) __raw_readb(a) + +BUILD_CLRSETBITS(b, 8, _, u8) +BUILD_CLRSETBITS(w, le16, le16, u16) +BUILD_CLRSETBITS(w, be16, be16, u16) +BUILD_CLRSETBITS(w, 16, _, u16) +BUILD_CLRSETBITS(l, le32, le32, u32) +BUILD_CLRSETBITS(l, be32, be32, u32) +BUILD_CLRSETBITS(l, 32, _, u32) +BUILD_CLRSETBITS(q, le64, le64, u64) +BUILD_CLRSETBITS(q, be64, be64, u64) +BUILD_CLRSETBITS(q, 64, _, u64) + +#include + +#endif /* _ASM_IO_H */ diff --git a/arch/mips/include/asm/isadep.h b/arch/mips/include/asm/isadep.h new file mode 100644 index 0000000..d168320 --- /dev/null +++ b/arch/mips/include/asm/isadep.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Various ISA level dependent constants. + * Most of the following constants reflect the different layout + * of Coprocessor 0 registers. + * + * Copyright (c) 1998 Harald Koerfgen + */ + +#ifndef __ASM_ISADEP_H +#define __ASM_ISADEP_H + +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) +/* + * R2000 or R3000 + */ + +/* + * kernel or user mode? (CP0_STATUS) + */ +#define KU_MASK 0x08 +#define KU_USER 0x08 +#define KU_KERN 0x00 + +#else +/* + * kernel or user mode? + */ +#define KU_MASK 0x18 +#define KU_USER 0x10 +#define KU_KERN 0x00 + +#endif + +#endif /* __ASM_ISADEP_H */ diff --git a/arch/mips/include/asm/linkage.h b/arch/mips/include/asm/linkage.h new file mode 100644 index 0000000..e69de29 diff --git a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h new file mode 100644 index 0000000..38f2302 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2003 Ralf Baechle + */ +#ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H +#define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H + +/* Intentionally empty file ... */ + +#endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */ diff --git a/arch/mips/include/asm/mach-generic/ioremap.h b/arch/mips/include/asm/mach-generic/ioremap.h new file mode 100644 index 0000000..d6258f5 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/ioremap.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MACH_GENERIC_IOREMAP_H +#define __ASM_MACH_GENERIC_IOREMAP_H + +#include + +/* + * Allow physical addresses to be fixed up to help peripherals located + * outside the low 32-bit range -- generic pass-through version. + */ +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, + phys_addr_t size) +{ + return phys_addr; +} + +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size, + unsigned long flags) +{ + return NULL; +} + +static inline int plat_iounmap(const volatile void __iomem *addr) +{ + return 0; +} + +#define _page_cachable_default _CACHE_CACHABLE_NONCOHERENT + +#endif /* __ASM_MACH_GENERIC_IOREMAP_H */ diff --git a/arch/mips/include/asm/mach-generic/mangle-port.h b/arch/mips/include/asm/mach-generic/mangle-port.h new file mode 100644 index 0000000..3f95bbc --- /dev/null +++ b/arch/mips/include/asm/mach-generic/mangle-port.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2003, 2004 Ralf Baechle + */ +#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H +#define __ASM_MACH_GENERIC_MANGLE_PORT_H + +#define __swizzle_addr_b(port) (port) +#define __swizzle_addr_w(port) (port) +#define __swizzle_addr_l(port) (port) +#define __swizzle_addr_q(port) (port) + +/* + * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware; + * less sane hardware forces software to fiddle with this... + * + * Regardless, if the host bus endianness mismatches that of PCI/ISA, then + * you can't have the numerical value of data and byte addresses within + * multibyte quantities both preserved at the same time. Hence two + * variations of functions: non-prefixed ones that preserve the value + * and prefixed ones that preserve byte addresses. The latters are + * typically used for moving raw data between a peripheral and memory (cf. + * string I/O functions), hence the "__mem_" prefix. + */ +#if defined(CONFIG_SWAP_IO_SPACE) + +# define ioswabb(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define ioswabw(a, x) le16_to_cpu(x) +# define __mem_ioswabw(a, x) (x) +# define ioswabl(a, x) le32_to_cpu(x) +# define __mem_ioswabl(a, x) (x) +# define ioswabq(a, x) le64_to_cpu(x) +# define __mem_ioswabq(a, x) (x) + +#else + +# define ioswabb(a, x) (x) +# define __mem_ioswabb(a, x) (x) +# define ioswabw(a, x) (x) +# define __mem_ioswabw(a, x) cpu_to_le16(x) +# define ioswabl(a, x) (x) +# define __mem_ioswabl(a, x) cpu_to_le32(x) +# define ioswabq(a, x) (x) +# define __mem_ioswabq(a, x) cpu_to_le32(x) + +#endif + +#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */ diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h new file mode 100644 index 0000000..539d0a5 --- /dev/null +++ b/arch/mips/include/asm/mach-generic/spaces.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle + * Copyright (C) 2000, 2002 Maciej W. Rozycki + * Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_MACH_GENERIC_SPACES_H +#define _ASM_MACH_GENERIC_SPACES_H + +#include + +/* + * This gives the physical RAM offset. + */ +#ifndef PHYS_OFFSET +#define PHYS_OFFSET _AC(0, UL) +#endif + +#ifdef CONFIG_32BIT +#ifdef CONFIG_KVM_GUEST +#define CAC_BASE _AC(0x40000000, UL) +#else +#define CAC_BASE _AC(0x80000000, UL) +#endif +#ifndef IO_BASE +#define IO_BASE _AC(0xa0000000, UL) +#endif +#ifndef UNCAC_BASE +#define UNCAC_BASE _AC(0xa0000000, UL) +#endif + +#ifndef MAP_BASE +#ifdef CONFIG_KVM_GUEST +#define MAP_BASE _AC(0x60000000, UL) +#else +#define MAP_BASE _AC(0xc0000000, UL) +#endif +#endif + +/* + * Memory above this physical address will be considered highmem. + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START _AC(0x20000000, UL) +#endif + +#endif /* CONFIG_32BIT */ + +#ifdef CONFIG_64BIT + +#ifndef CAC_BASE +#ifdef CONFIG_DMA_NONCOHERENT +#define CAC_BASE _AC(0x9800000000000000, UL) +#else +#define CAC_BASE _AC(0xa800000000000000, UL) +#endif +#endif + +#ifndef IO_BASE +#define IO_BASE _AC(0x9000000000000000, UL) +#endif + +#ifndef UNCAC_BASE +#define UNCAC_BASE _AC(0x9000000000000000, UL) +#endif + +#ifndef MAP_BASE +#define MAP_BASE _AC(0xc000000000000000, UL) +#endif + +/* + * Memory above this physical address will be considered highmem. + * Fixme: 59 bits is a fictive number and makes assumptions about processors + * in the distant future. Nobody will care for a few years :-) + */ +#ifndef HIGHMEM_START +#define HIGHMEM_START (_AC(1, UL) << _AC(59, UL)) +#endif + +#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK)) +#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK)) +#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK)) + +#endif /* CONFIG_64BIT */ + +/* + * This handles the memory map. + */ +#ifndef PAGE_OFFSET +#define PAGE_OFFSET (CAC_BASE + PHYS_OFFSET) +#endif + +#ifndef FIXADDR_TOP +#ifdef CONFIG_KVM_GUEST +#define FIXADDR_TOP ((unsigned long)(long)(int)0x7ffe0000) +#else +#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000) +#endif +#endif + +#endif /* __ASM_MACH_GENERIC_SPACES_H */ diff --git a/arch/mips/include/asm/malta.h b/arch/mips/include/asm/malta.h new file mode 100644 index 0000000..5c56539 --- /dev/null +++ b/arch/mips/include/asm/malta.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2013 Gabor Juhos + * Copyright (C) 2013 Imagination Technologies + */ + +#ifndef _MIPS_ASM_MALTA_H +#define _MIPS_ASM_MALTA_H + +#define MALTA_GT_BASE 0x1be00000 +#define MALTA_GT_PCIIO_BASE 0x18000000 +#define MALTA_GT_UART0_BASE (MALTA_GT_PCIIO_BASE + 0x3f8) + +#define MALTA_MSC01_BIU_BASE 0x1bc80000 +#define MALTA_MSC01_PCI_BASE 0x1bd00000 +#define MALTA_MSC01_PBC_BASE 0x1bd40000 +#define MALTA_MSC01_IP1_BASE 0x1bc00000 +#define MALTA_MSC01_IP1_SIZE 0x00400000 +#define MALTA_MSC01_IP2_BASE1 0x10000000 +#define MALTA_MSC01_IP2_SIZE1 0x08000000 +#define MALTA_MSC01_IP2_BASE2 0x18000000 +#define MALTA_MSC01_IP2_SIZE2 0x04000000 +#define MALTA_MSC01_IP3_BASE 0x1c000000 +#define MALTA_MSC01_IP3_SIZE 0x04000000 +#define MALTA_MSC01_PCIMEM_BASE 0x10000000 +#define MALTA_MSC01_PCIMEM_SIZE 0x10000000 +#define MALTA_MSC01_PCIMEM_MAP 0x10000000 +#define MALTA_MSC01_PCIIO_BASE 0x1b000000 +#define MALTA_MSC01_PCIIO_SIZE 0x00800000 +#define MALTA_MSC01_PCIIO_MAP 0x00000000 +#define MALTA_MSC01_UART0_BASE (MALTA_MSC01_PCIIO_BASE + 0x3f8) + +#define MALTA_ASCIIWORD 0x1f000410 +#define MALTA_ASCIIPOS0 0x1f000418 +#define MALTA_ASCIIPOS1 0x1f000420 +#define MALTA_ASCIIPOS2 0x1f000428 +#define MALTA_ASCIIPOS3 0x1f000430 +#define MALTA_ASCIIPOS4 0x1f000438 +#define MALTA_ASCIIPOS5 0x1f000440 +#define MALTA_ASCIIPOS6 0x1f000448 +#define MALTA_ASCIIPOS7 0x1f000450 + +#define MALTA_RESET_BASE 0x1f000500 +#define GORESET 0x42 + +#define MALTA_FLASH_BASE 0x1e000000 + +#define MALTA_REVISION 0x1fc00010 +#define MALTA_REVISION_CORID_SHF 10 +#define MALTA_REVISION_CORID_MSK (0x3f << MALTA_REVISION_CORID_SHF) +#define MALTA_REVISION_CORID_CORE_LV 1 +#define MALTA_REVISION_CORID_CORE_FPGA6 14 + +#define PCI_CFG_PIIX4_PIRQRCA 0x60 +#define PCI_CFG_PIIX4_PIRQRCB 0x61 +#define PCI_CFG_PIIX4_PIRQRCC 0x62 +#define PCI_CFG_PIIX4_PIRQRCD 0x63 +#define PCI_CFG_PIIX4_SERIRQC 0x64 +#define PCI_CFG_PIIX4_GENCFG 0xb0 + +#define PCI_CFG_PIIX4_SERIRQC_EN (1 << 7) +#define PCI_CFG_PIIX4_SERIRQC_CONT (1 << 6) + +#define PCI_CFG_PIIX4_GENCFG_SERIRQ (1 << 16) + +#define PCI_CFG_PIIX4_IDETIM_PRI 0x40 +#define PCI_CFG_PIIX4_IDETIM_SEC 0x42 + +#define PCI_CFG_PIIX4_IDETIM_IDE (1 << 15) + +#endif /* _MIPS_ASM_MALTA_H */ diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h new file mode 100644 index 0000000..f80311e --- /dev/null +++ b/arch/mips/include/asm/mipsregs.h @@ -0,0 +1,2014 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994, 1995, 1996, 1997, 2000, 2001 by Ralf Baechle + * Copyright (C) 2000 Silicon Graphics, Inc. + * Modified for further R[236]000 support by Paul M. Antoine, 1996. + * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000, 07 MIPS Technologies, Inc. + * Copyright (C) 2003, 2004 Maciej W. Rozycki + */ +#ifndef _ASM_MIPSREGS_H +#define _ASM_MIPSREGS_H + +/* + * The following macros are especially useful for __asm__ + * inline assembler. + */ +#ifndef __STR +#define __STR(x) #x +#endif +#ifndef STR +#define STR(x) __STR(x) +#endif + +/* + * Configure language + */ +#ifdef __ASSEMBLY__ +#define _ULCAST_ +#else +#define _ULCAST_ (unsigned long) +#endif + +/* + * Coprocessor 0 register names + */ +#define CP0_INDEX $0 +#define CP0_RANDOM $1 +#define CP0_ENTRYLO0 $2 +#define CP0_ENTRYLO1 $3 +#define CP0_CONF $3 +#define CP0_GLOBALNUMBER $3, 1 +#define CP0_CONTEXT $4 +#define CP0_PAGEMASK $5 +#define CP0_WIRED $6 +#define CP0_INFO $7 +#define CP0_HWRENA $7, 0 +#define CP0_BADVADDR $8 +#define CP0_BADINSTR $8, 1 +#define CP0_COUNT $9 +#define CP0_ENTRYHI $10 +#define CP0_COMPARE $11 +#define CP0_STATUS $12 +#define CP0_CAUSE $13 +#define CP0_EPC $14 +#define CP0_PRID $15 +#define CP0_EBASE $15, 1 +#define CP0_CMGCRBASE $15, 3 +#define CP0_CONFIG $16 +#define CP0_CONFIG3 $16, 3 +#define CP0_CONFIG5 $16, 5 +#define CP0_LLADDR $17 +#define CP0_WATCHLO $18 +#define CP0_WATCHHI $19 +#define CP0_XCONTEXT $20 +#define CP0_FRAMEMASK $21 +#define CP0_DIAGNOSTIC $22 +#define CP0_DEBUG $23 +#define CP0_DEPC $24 +#define CP0_PERFORMANCE $25 +#define CP0_ECC $26 +#define CP0_CACHEERR $27 +#define CP0_TAGLO $28 +#define CP0_TAGHI $29 +#define CP0_ERROREPC $30 +#define CP0_DESAVE $31 + +/* + * R4640/R4650 cp0 register names. These registers are listed + * here only for completeness; without MMU these CPUs are not useable + * by Linux. A future ELKS port might take make Linux run on them + * though ... + */ +#define CP0_IBASE $0 +#define CP0_IBOUND $1 +#define CP0_DBASE $2 +#define CP0_DBOUND $3 +#define CP0_CALG $17 +#define CP0_IWATCH $18 +#define CP0_DWATCH $19 + +/* + * Coprocessor 0 Set 1 register names + */ +#define CP0_S1_DERRADDR0 $26 +#define CP0_S1_DERRADDR1 $27 +#define CP0_S1_INTCONTROL $20 + +/* + * Coprocessor 0 Set 2 register names + */ +#define CP0_S2_SRSCTL $12 /* MIPSR2 */ + +/* + * Coprocessor 0 Set 3 register names + */ +#define CP0_S3_SRSMAP $12 /* MIPSR2 */ + +/* + * TX39 Series + */ +#define CP0_TX39_CACHE $7 + + +/* Generic EntryLo bit definitions */ +#define ENTRYLO_G (_ULCAST_(1) << 0) +#define ENTRYLO_V (_ULCAST_(1) << 1) +#define ENTRYLO_D (_ULCAST_(1) << 2) +#define ENTRYLO_C_SHIFT 3 +#define ENTRYLO_C (_ULCAST_(7) << ENTRYLO_C_SHIFT) + +/* R3000 EntryLo bit definitions */ +#define R3K_ENTRYLO_G (_ULCAST_(1) << 8) +#define R3K_ENTRYLO_V (_ULCAST_(1) << 9) +#define R3K_ENTRYLO_D (_ULCAST_(1) << 10) +#define R3K_ENTRYLO_N (_ULCAST_(1) << 11) + +/* MIPS32/64 EntryLo bit definitions */ +#define MIPS_ENTRYLO_PFN_SHIFT 6 +#define MIPS_ENTRYLO_XI (_ULCAST_(1) << (BITS_PER_LONG - 2)) +#define MIPS_ENTRYLO_RI (_ULCAST_(1) << (BITS_PER_LONG - 1)) + +/* + * Values for PageMask register + */ +#ifdef CONFIG_CPU_VR41XX + +/* Why doesn't stupidity hurt ... */ + +#define PM_1K 0x00000000 +#define PM_4K 0x00001800 +#define PM_16K 0x00007800 +#define PM_64K 0x0001f800 +#define PM_256K 0x0007f800 + +#else + +#define PM_4K 0x00000000 +#define PM_8K 0x00002000 +#define PM_16K 0x00006000 +#define PM_32K 0x0000e000 +#define PM_64K 0x0001e000 +#define PM_128K 0x0003e000 +#define PM_256K 0x0007e000 +#define PM_512K 0x000fe000 +#define PM_1M 0x001fe000 +#define PM_2M 0x003fe000 +#define PM_4M 0x007fe000 +#define PM_8M 0x00ffe000 +#define PM_16M 0x01ffe000 +#define PM_32M 0x03ffe000 +#define PM_64M 0x07ffe000 +#define PM_256M 0x1fffe000 +#define PM_1G 0x7fffe000 + +#endif + +/* + * Values used for computation of new tlb entries + */ +#define PL_4K 12 +#define PL_16K 14 +#define PL_64K 16 +#define PL_256K 18 +#define PL_1M 20 +#define PL_4M 22 +#define PL_16M 24 +#define PL_64M 26 +#define PL_256M 28 + +/* + * PageGrain bits + */ +#define PG_RIE (_ULCAST_(1) << 31) +#define PG_XIE (_ULCAST_(1) << 30) +#define PG_ELPA (_ULCAST_(1) << 29) +#define PG_ESP (_ULCAST_(1) << 28) +#define PG_IEC (_ULCAST_(1) << 27) + +/* MIPS32/64 EntryHI bit definitions */ +#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10) + +/* + * R4x00 interrupt enable / cause bits + */ +#define IE_SW0 (_ULCAST_(1) << 8) +#define IE_SW1 (_ULCAST_(1) << 9) +#define IE_IRQ0 (_ULCAST_(1) << 10) +#define IE_IRQ1 (_ULCAST_(1) << 11) +#define IE_IRQ2 (_ULCAST_(1) << 12) +#define IE_IRQ3 (_ULCAST_(1) << 13) +#define IE_IRQ4 (_ULCAST_(1) << 14) +#define IE_IRQ5 (_ULCAST_(1) << 15) + +/* + * R4x00 interrupt cause bits + */ +#define C_SW0 (_ULCAST_(1) << 8) +#define C_SW1 (_ULCAST_(1) << 9) +#define C_IRQ0 (_ULCAST_(1) << 10) +#define C_IRQ1 (_ULCAST_(1) << 11) +#define C_IRQ2 (_ULCAST_(1) << 12) +#define C_IRQ3 (_ULCAST_(1) << 13) +#define C_IRQ4 (_ULCAST_(1) << 14) +#define C_IRQ5 (_ULCAST_(1) << 15) + +/* + * Bitfields in the R4xx0 cp0 status register + */ +#define ST0_IE 0x00000001 +#define ST0_EXL 0x00000002 +#define ST0_ERL 0x00000004 +#define ST0_KSU 0x00000018 +# define KSU_USER 0x00000010 +# define KSU_SUPERVISOR 0x00000008 +# define KSU_KERNEL 0x00000000 +#define ST0_UX 0x00000020 +#define ST0_SX 0x00000040 +#define ST0_KX 0x00000080 +#define ST0_DE 0x00010000 +#define ST0_CE 0x00020000 + +/* + * Setting c0_status.co enables Hit_Writeback and Hit_Writeback_Invalidate + * cacheops in userspace. This bit exists only on RM7000 and RM9000 + * processors. + */ +#define ST0_CO 0x08000000 + +/* + * Bitfields in the R[23]000 cp0 status register. + */ +#define ST0_IEC 0x00000001 +#define ST0_KUC 0x00000002 +#define ST0_IEP 0x00000004 +#define ST0_KUP 0x00000008 +#define ST0_IEO 0x00000010 +#define ST0_KUO 0x00000020 +/* bits 6 & 7 are reserved on R[23]000 */ +#define ST0_ISC 0x00010000 +#define ST0_SWC 0x00020000 +#define ST0_CM 0x00080000 + +/* + * Bits specific to the R4640/R4650 + */ +#define ST0_UM (_ULCAST_(1) << 4) +#define ST0_IL (_ULCAST_(1) << 23) +#define ST0_DL (_ULCAST_(1) << 24) + +/* + * Enable the MIPS MDMX and DSP ASEs + */ +#define ST0_MX 0x01000000 + +/* + * Status register bits available in all MIPS CPUs. + */ +#define ST0_IM 0x0000ff00 +#define STATUSB_IP0 8 +#define STATUSF_IP0 (_ULCAST_(1) << 8) +#define STATUSB_IP1 9 +#define STATUSF_IP1 (_ULCAST_(1) << 9) +#define STATUSB_IP2 10 +#define STATUSF_IP2 (_ULCAST_(1) << 10) +#define STATUSB_IP3 11 +#define STATUSF_IP3 (_ULCAST_(1) << 11) +#define STATUSB_IP4 12 +#define STATUSF_IP4 (_ULCAST_(1) << 12) +#define STATUSB_IP5 13 +#define STATUSF_IP5 (_ULCAST_(1) << 13) +#define STATUSB_IP6 14 +#define STATUSF_IP6 (_ULCAST_(1) << 14) +#define STATUSB_IP7 15 +#define STATUSF_IP7 (_ULCAST_(1) << 15) +#define STATUSB_IP8 0 +#define STATUSF_IP8 (_ULCAST_(1) << 0) +#define STATUSB_IP9 1 +#define STATUSF_IP9 (_ULCAST_(1) << 1) +#define STATUSB_IP10 2 +#define STATUSF_IP10 (_ULCAST_(1) << 2) +#define STATUSB_IP11 3 +#define STATUSF_IP11 (_ULCAST_(1) << 3) +#define STATUSB_IP12 4 +#define STATUSF_IP12 (_ULCAST_(1) << 4) +#define STATUSB_IP13 5 +#define STATUSF_IP13 (_ULCAST_(1) << 5) +#define STATUSB_IP14 6 +#define STATUSF_IP14 (_ULCAST_(1) << 6) +#define STATUSB_IP15 7 +#define STATUSF_IP15 (_ULCAST_(1) << 7) +#define ST0_IMPL (_ULCAST_(3) << 16) +#define ST0_CH 0x00040000 +#define ST0_NMI 0x00080000 +#define ST0_SR 0x00100000 +#define ST0_TS 0x00200000 +#define ST0_BEV 0x00400000 +#define ST0_RE 0x02000000 +#define ST0_FR 0x04000000 +#define ST0_CU 0xf0000000 +#define ST0_CU0 0x10000000 +#define ST0_CU1 0x20000000 +#define ST0_CU2 0x40000000 +#define ST0_CU3 0x80000000 +#define ST0_XX 0x80000000 /* MIPS IV naming */ + +/* + * Bitfields and bit numbers in the coprocessor 0 IntCtl register. (MIPSR2) + */ +#define INTCTLB_IPFDC 23 +#define INTCTLF_IPFDC (_ULCAST_(7) << INTCTLB_IPFDC) +#define INTCTLB_IPPCI 26 +#define INTCTLF_IPPCI (_ULCAST_(7) << INTCTLB_IPPCI) +#define INTCTLB_IPTI 29 +#define INTCTLF_IPTI (_ULCAST_(7) << INTCTLB_IPTI) + +/* + * Bitfields and bit numbers in the coprocessor 0 cause register. + * + * Refer to your MIPS R4xx0 manual, chapter 5 for explanation. + */ +#define CAUSEB_EXCCODE 2 +#define CAUSEF_EXCCODE (_ULCAST_(31) << 2) +#define CAUSEB_IP 8 +#define CAUSEF_IP (_ULCAST_(255) << 8) +#define CAUSEB_IP0 8 +#define CAUSEF_IP0 (_ULCAST_(1) << 8) +#define CAUSEB_IP1 9 +#define CAUSEF_IP1 (_ULCAST_(1) << 9) +#define CAUSEB_IP2 10 +#define CAUSEF_IP2 (_ULCAST_(1) << 10) +#define CAUSEB_IP3 11 +#define CAUSEF_IP3 (_ULCAST_(1) << 11) +#define CAUSEB_IP4 12 +#define CAUSEF_IP4 (_ULCAST_(1) << 12) +#define CAUSEB_IP5 13 +#define CAUSEF_IP5 (_ULCAST_(1) << 13) +#define CAUSEB_IP6 14 +#define CAUSEF_IP6 (_ULCAST_(1) << 14) +#define CAUSEB_IP7 15 +#define CAUSEF_IP7 (_ULCAST_(1) << 15) +#define CAUSEB_FDCI 21 +#define CAUSEF_FDCI (_ULCAST_(1) << 21) +#define CAUSEB_IV 23 +#define CAUSEF_IV (_ULCAST_(1) << 23) +#define CAUSEB_PCI 26 +#define CAUSEF_PCI (_ULCAST_(1) << 26) +#define CAUSEB_CE 28 +#define CAUSEF_CE (_ULCAST_(3) << 28) +#define CAUSEB_TI 30 +#define CAUSEF_TI (_ULCAST_(1) << 30) +#define CAUSEB_BD 31 +#define CAUSEF_BD (_ULCAST_(1) << 31) + +/* + * Bits in the coprocessor 0 EBase register. + */ +#define EBASE_CPUNUM 0x3ff + +/* + * Bits in the coprocessor 0 config register. + */ +/* Generic bits. */ +#define CONF_CM_CACHABLE_NO_WA 0 +#define CONF_CM_CACHABLE_WA 1 +#define CONF_CM_UNCACHED 2 +#define CONF_CM_CACHABLE_NONCOHERENT 3 +#define CONF_CM_CACHABLE_CE 4 +#define CONF_CM_CACHABLE_COW 5 +#define CONF_CM_CACHABLE_CUW 6 +#define CONF_CM_CACHABLE_ACCELERATED 7 +#define CONF_CM_CMASK 7 +#define CONF_BE (_ULCAST_(1) << 15) + +/* Bits common to various processors. */ +#define CONF_CU (_ULCAST_(1) << 3) +#define CONF_DB (_ULCAST_(1) << 4) +#define CONF_IB (_ULCAST_(1) << 5) +#define CONF_DC (_ULCAST_(7) << 6) +#define CONF_IC (_ULCAST_(7) << 9) +#define CONF_EB (_ULCAST_(1) << 13) +#define CONF_EM (_ULCAST_(1) << 14) +#define CONF_SM (_ULCAST_(1) << 16) +#define CONF_SC (_ULCAST_(1) << 17) +#define CONF_EW (_ULCAST_(3) << 18) +#define CONF_EP (_ULCAST_(15) << 24) +#define CONF_EC (_ULCAST_(7) << 28) +#define CONF_CM (_ULCAST_(1) << 31) + +/* Bits specific to the R4xx0. */ +#define R4K_CONF_SW (_ULCAST_(1) << 20) +#define R4K_CONF_SS (_ULCAST_(1) << 21) +#define R4K_CONF_SB (_ULCAST_(3) << 22) + +/* Bits specific to the R5000. */ +#define R5K_CONF_SE (_ULCAST_(1) << 12) +#define R5K_CONF_SS (_ULCAST_(3) << 20) + +/* Bits specific to the RM7000. */ +#define RM7K_CONF_SE (_ULCAST_(1) << 3) +#define RM7K_CONF_TE (_ULCAST_(1) << 12) +#define RM7K_CONF_CLK (_ULCAST_(1) << 16) +#define RM7K_CONF_TC (_ULCAST_(1) << 17) +#define RM7K_CONF_SI (_ULCAST_(3) << 20) +#define RM7K_CONF_SC (_ULCAST_(1) << 31) + +/* Bits specific to the R10000. */ +#define R10K_CONF_DN (_ULCAST_(3) << 3) +#define R10K_CONF_CT (_ULCAST_(1) << 5) +#define R10K_CONF_PE (_ULCAST_(1) << 6) +#define R10K_CONF_PM (_ULCAST_(3) << 7) +#define R10K_CONF_EC (_ULCAST_(15) << 9) +#define R10K_CONF_SB (_ULCAST_(1) << 13) +#define R10K_CONF_SK (_ULCAST_(1) << 14) +#define R10K_CONF_SS (_ULCAST_(7) << 16) +#define R10K_CONF_SC (_ULCAST_(7) << 19) +#define R10K_CONF_DC (_ULCAST_(7) << 26) +#define R10K_CONF_IC (_ULCAST_(7) << 29) + +/* Bits specific to the VR41xx. */ +#define VR41_CONF_CS (_ULCAST_(1) << 12) +#define VR41_CONF_P4K (_ULCAST_(1) << 13) +#define VR41_CONF_BP (_ULCAST_(1) << 16) +#define VR41_CONF_M16 (_ULCAST_(1) << 20) +#define VR41_CONF_AD (_ULCAST_(1) << 23) + +/* Bits specific to the R30xx. */ +#define R30XX_CONF_FDM (_ULCAST_(1) << 19) +#define R30XX_CONF_REV (_ULCAST_(1) << 22) +#define R30XX_CONF_AC (_ULCAST_(1) << 23) +#define R30XX_CONF_RF (_ULCAST_(1) << 24) +#define R30XX_CONF_HALT (_ULCAST_(1) << 25) +#define R30XX_CONF_FPINT (_ULCAST_(7) << 26) +#define R30XX_CONF_DBR (_ULCAST_(1) << 29) +#define R30XX_CONF_SB (_ULCAST_(1) << 30) +#define R30XX_CONF_LOCK (_ULCAST_(1) << 31) + +/* Bits specific to the TX49. */ +#define TX49_CONF_DC (_ULCAST_(1) << 16) +#define TX49_CONF_IC (_ULCAST_(1) << 17) /* conflict with CONF_SC */ +#define TX49_CONF_HALT (_ULCAST_(1) << 18) +#define TX49_CONF_CWFON (_ULCAST_(1) << 27) + +/* Bits specific to the MIPS32/64 PRA. */ +#define MIPS_CONF_MT (_ULCAST_(7) << 7) +#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7) +#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7) +#define MIPS_CONF_AR (_ULCAST_(7) << 10) +#define MIPS_CONF_AT (_ULCAST_(3) << 13) +#define MIPS_CONF_IMPL (_ULCAST_(0x1ff) << 16) +#define MIPS_CONF_M (_ULCAST_(1) << 31) + +/* + * Bits in the MIPS32/64 PRA coprocessor 0 config registers 1 and above. + */ +#define MIPS_CONF1_FP (_ULCAST_(1) << 0) +#define MIPS_CONF1_EP (_ULCAST_(1) << 1) +#define MIPS_CONF1_CA (_ULCAST_(1) << 2) +#define MIPS_CONF1_WR (_ULCAST_(1) << 3) +#define MIPS_CONF1_PC (_ULCAST_(1) << 4) +#define MIPS_CONF1_MD (_ULCAST_(1) << 5) +#define MIPS_CONF1_C2 (_ULCAST_(1) << 6) +#define MIPS_CONF1_DA_SHF 7 +#define MIPS_CONF1_DA_SZ 3 +#define MIPS_CONF1_DA (_ULCAST_(7) << 7) +#define MIPS_CONF1_DL_SHF 10 +#define MIPS_CONF1_DL_SZ 3 +#define MIPS_CONF1_DL (_ULCAST_(7) << 10) +#define MIPS_CONF1_DS_SHF 13 +#define MIPS_CONF1_DS_SZ 3 +#define MIPS_CONF1_DS (_ULCAST_(7) << 13) +#define MIPS_CONF1_IA_SHF 16 +#define MIPS_CONF1_IA_SZ 3 +#define MIPS_CONF1_IA (_ULCAST_(7) << 16) +#define MIPS_CONF1_IL_SHF 19 +#define MIPS_CONF1_IL_SZ 3 +#define MIPS_CONF1_IL (_ULCAST_(7) << 19) +#define MIPS_CONF1_IS_SHF 22 +#define MIPS_CONF1_IS_SZ 3 +#define MIPS_CONF1_IS (_ULCAST_(7) << 22) +#define MIPS_CONF1_TLBS_SHIFT (25) +#define MIPS_CONF1_TLBS_SIZE (6) +#define MIPS_CONF1_TLBS (_ULCAST_(63) << MIPS_CONF1_TLBS_SHIFT) + +#define MIPS_CONF2_SA_SHF 0 +#define MIPS_CONF2_SA (_ULCAST_(15) << 0) +#define MIPS_CONF2_SL_SHF 4 +#define MIPS_CONF2_SL (_ULCAST_(15) << 4) +#define MIPS_CONF2_SS_SHF 8 +#define MIPS_CONF2_SS (_ULCAST_(15) << 8) +#define MIPS_CONF2_L2B (_ULCAST_(1) << 12) +#define MIPS_CONF2_SU (_ULCAST_(15) << 12) +#define MIPS_CONF2_TA (_ULCAST_(15) << 16) +#define MIPS_CONF2_TL (_ULCAST_(15) << 20) +#define MIPS_CONF2_TS (_ULCAST_(15) << 24) +#define MIPS_CONF2_TU (_ULCAST_(7) << 28) + +#define MIPS_CONF3_TL (_ULCAST_(1) << 0) +#define MIPS_CONF3_SM (_ULCAST_(1) << 1) +#define MIPS_CONF3_MT (_ULCAST_(1) << 2) +#define MIPS_CONF3_CDMM (_ULCAST_(1) << 3) +#define MIPS_CONF3_SP (_ULCAST_(1) << 4) +#define MIPS_CONF3_VINT (_ULCAST_(1) << 5) +#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6) +#define MIPS_CONF3_LPA (_ULCAST_(1) << 7) +#define MIPS_CONF3_ITL (_ULCAST_(1) << 8) +#define MIPS_CONF3_CTXTC (_ULCAST_(1) << 9) +#define MIPS_CONF3_DSP (_ULCAST_(1) << 10) +#define MIPS_CONF3_DSP2P (_ULCAST_(1) << 11) +#define MIPS_CONF3_RXI (_ULCAST_(1) << 12) +#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) +#define MIPS_CONF3_ISA (_ULCAST_(3) << 14) +#define MIPS_CONF3_ISA_OE (_ULCAST_(1) << 16) +#define MIPS_CONF3_MCU (_ULCAST_(1) << 17) +#define MIPS_CONF3_MMAR (_ULCAST_(7) << 18) +#define MIPS_CONF3_IPLW (_ULCAST_(3) << 21) +#define MIPS_CONF3_VZ (_ULCAST_(1) << 23) +#define MIPS_CONF3_PW (_ULCAST_(1) << 24) +#define MIPS_CONF3_SC (_ULCAST_(1) << 25) +#define MIPS_CONF3_BI (_ULCAST_(1) << 26) +#define MIPS_CONF3_BP (_ULCAST_(1) << 27) +#define MIPS_CONF3_MSA (_ULCAST_(1) << 28) +#define MIPS_CONF3_CMGCR (_ULCAST_(1) << 29) +#define MIPS_CONF3_BPG (_ULCAST_(1) << 30) + +#define MIPS_CONF4_MMUSIZEEXT_SHIFT (0) +#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) +#define MIPS_CONF4_FTLBSETS_SHIFT (0) +#define MIPS_CONF4_FTLBSETS (_ULCAST_(15) << MIPS_CONF4_FTLBSETS_SHIFT) +#define MIPS_CONF4_FTLBWAYS_SHIFT (4) +#define MIPS_CONF4_FTLBWAYS (_ULCAST_(15) << MIPS_CONF4_FTLBWAYS_SHIFT) +#define MIPS_CONF4_FTLBPAGESIZE_SHIFT (8) +/* bits 10:8 in FTLB-only configurations */ +#define MIPS_CONF4_FTLBPAGESIZE (_ULCAST_(7) << MIPS_CONF4_FTLBPAGESIZE_SHIFT) +/* bits 12:8 in VTLB-FTLB only configurations */ +#define MIPS_CONF4_VFTLBPAGESIZE (_ULCAST_(31) << MIPS_CONF4_FTLBPAGESIZE_SHIFT) +#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14) +#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14) +#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT (_ULCAST_(2) << 14) +#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT (_ULCAST_(3) << 14) +#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << 16) +#define MIPS_CONF4_VTLBSIZEEXT_SHIFT (24) +#define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT) +#define MIPS_CONF4_AE (_ULCAST_(1) << 28) +#define MIPS_CONF4_IE (_ULCAST_(3) << 29) +#define MIPS_CONF4_TLBINV (_ULCAST_(2) << 29) + +#define MIPS_CONF5_NF (_ULCAST_(1) << 0) +#define MIPS_CONF5_UFR (_ULCAST_(1) << 2) +#define MIPS_CONF5_MRP (_ULCAST_(1) << 3) +#define MIPS_CONF5_LLB (_ULCAST_(1) << 4) +#define MIPS_CONF5_MVH (_ULCAST_(1) << 5) +#define MIPS_CONF5_VP (_ULCAST_(1) << 7) +#define MIPS_CONF5_FRE (_ULCAST_(1) << 8) +#define MIPS_CONF5_UFE (_ULCAST_(1) << 9) +#define MIPS_CONF5_L2C (_ULCAST_(1) << 10) +#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) +#define MIPS_CONF5_EVA (_ULCAST_(1) << 28) +#define MIPS_CONF5_CV (_ULCAST_(1) << 29) +#define MIPS_CONF5_K (_ULCAST_(1) << 30) + +#define MIPS_CONF6_SYND (_ULCAST_(1) << 13) +/* proAptiv FTLB on/off bit */ +#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15) +/* FTLB probability bits */ +#define MIPS_CONF6_FTLBP_SHIFT (16) + +#define MIPS_CONF7_WII (_ULCAST_(1) << 31) + +#define MIPS_CONF7_RPS (_ULCAST_(1) << 2) + +#define MIPS_CONF7_IAR (_ULCAST_(1) << 10) +#define MIPS_CONF7_AR (_ULCAST_(1) << 16) +/* FTLB probability bits for R6 */ +#define MIPS_CONF7_FTLBP_SHIFT (18) + +/* MAAR bit definitions */ +#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12) +#define MIPS_MAAR_ADDR_SHIFT 12 +#define MIPS_MAAR_S (_ULCAST_(1) << 1) +#define MIPS_MAAR_V (_ULCAST_(1) << 0) + +/* CMGCRBase bit definitions */ +#define MIPS_CMGCRB_BASE 11 +#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1)) + +/* + * Bits in the MIPS32 Memory Segmentation registers. + */ +#define MIPS_SEGCFG_PA_SHIFT 9 +#define MIPS_SEGCFG_PA (_ULCAST_(127) << MIPS_SEGCFG_PA_SHIFT) +#define MIPS_SEGCFG_AM_SHIFT 4 +#define MIPS_SEGCFG_AM (_ULCAST_(7) << MIPS_SEGCFG_AM_SHIFT) +#define MIPS_SEGCFG_EU_SHIFT 3 +#define MIPS_SEGCFG_EU (_ULCAST_(1) << MIPS_SEGCFG_EU_SHIFT) +#define MIPS_SEGCFG_C_SHIFT 0 +#define MIPS_SEGCFG_C (_ULCAST_(7) << MIPS_SEGCFG_C_SHIFT) + +#define MIPS_SEGCFG_UUSK _ULCAST_(7) +#define MIPS_SEGCFG_USK _ULCAST_(5) +#define MIPS_SEGCFG_MUSUK _ULCAST_(4) +#define MIPS_SEGCFG_MUSK _ULCAST_(3) +#define MIPS_SEGCFG_MSK _ULCAST_(2) +#define MIPS_SEGCFG_MK _ULCAST_(1) +#define MIPS_SEGCFG_UK _ULCAST_(0) + +#define MIPS_PWFIELD_GDI_SHIFT 24 +#define MIPS_PWFIELD_GDI_MASK 0x3f000000 +#define MIPS_PWFIELD_UDI_SHIFT 18 +#define MIPS_PWFIELD_UDI_MASK 0x00fc0000 +#define MIPS_PWFIELD_MDI_SHIFT 12 +#define MIPS_PWFIELD_MDI_MASK 0x0003f000 +#define MIPS_PWFIELD_PTI_SHIFT 6 +#define MIPS_PWFIELD_PTI_MASK 0x00000fc0 +#define MIPS_PWFIELD_PTEI_SHIFT 0 +#define MIPS_PWFIELD_PTEI_MASK 0x0000003f + +#define MIPS_PWSIZE_GDW_SHIFT 24 +#define MIPS_PWSIZE_GDW_MASK 0x3f000000 +#define MIPS_PWSIZE_UDW_SHIFT 18 +#define MIPS_PWSIZE_UDW_MASK 0x00fc0000 +#define MIPS_PWSIZE_MDW_SHIFT 12 +#define MIPS_PWSIZE_MDW_MASK 0x0003f000 +#define MIPS_PWSIZE_PTW_SHIFT 6 +#define MIPS_PWSIZE_PTW_MASK 0x00000fc0 +#define MIPS_PWSIZE_PTEW_SHIFT 0 +#define MIPS_PWSIZE_PTEW_MASK 0x0000003f + +#define MIPS_PWCTL_PWEN_SHIFT 31 +#define MIPS_PWCTL_PWEN_MASK 0x80000000 +#define MIPS_PWCTL_DPH_SHIFT 7 +#define MIPS_PWCTL_DPH_MASK 0x00000080 +#define MIPS_PWCTL_HUGEPG_SHIFT 6 +#define MIPS_PWCTL_HUGEPG_MASK 0x00000060 +#define MIPS_PWCTL_PSN_SHIFT 0 +#define MIPS_PWCTL_PSN_MASK 0x0000003f + +/* CDMMBase register bit definitions */ +#define MIPS_CDMMBASE_SIZE_SHIFT 0 +#define MIPS_CDMMBASE_SIZE (_ULCAST_(511) << MIPS_CDMMBASE_SIZE_SHIFT) +#define MIPS_CDMMBASE_CI (_ULCAST_(1) << 9) +#define MIPS_CDMMBASE_EN (_ULCAST_(1) << 10) +#define MIPS_CDMMBASE_ADDR_SHIFT 11 +#define MIPS_CDMMBASE_ADDR_START 15 + +/* + * Bitfields in the TX39 family CP0 Configuration Register 3 + */ +#define TX39_CONF_ICS_SHIFT 19 +#define TX39_CONF_ICS_MASK 0x00380000 +#define TX39_CONF_ICS_1KB 0x00000000 +#define TX39_CONF_ICS_2KB 0x00080000 +#define TX39_CONF_ICS_4KB 0x00100000 +#define TX39_CONF_ICS_8KB 0x00180000 +#define TX39_CONF_ICS_16KB 0x00200000 + +#define TX39_CONF_DCS_SHIFT 16 +#define TX39_CONF_DCS_MASK 0x00070000 +#define TX39_CONF_DCS_1KB 0x00000000 +#define TX39_CONF_DCS_2KB 0x00010000 +#define TX39_CONF_DCS_4KB 0x00020000 +#define TX39_CONF_DCS_8KB 0x00030000 +#define TX39_CONF_DCS_16KB 0x00040000 + +#define TX39_CONF_CWFON 0x00004000 +#define TX39_CONF_WBON 0x00002000 +#define TX39_CONF_RF_SHIFT 10 +#define TX39_CONF_RF_MASK 0x00000c00 +#define TX39_CONF_DOZE 0x00000200 +#define TX39_CONF_HALT 0x00000100 +#define TX39_CONF_LOCK 0x00000080 +#define TX39_CONF_ICE 0x00000020 +#define TX39_CONF_DCE 0x00000010 +#define TX39_CONF_IRSIZE_SHIFT 2 +#define TX39_CONF_IRSIZE_MASK 0x0000000c +#define TX39_CONF_DRSIZE_SHIFT 0 +#define TX39_CONF_DRSIZE_MASK 0x00000003 + +/* + * Interesting Bits in the R10K CP0 Branch Diagnostic Register + */ +/* Disable Branch Target Address Cache */ +#define R10K_DIAG_D_BTAC (_ULCAST_(1) << 27) +/* Enable Branch Prediction Global History */ +#define R10K_DIAG_E_GHIST (_ULCAST_(1) << 26) +/* Disable Branch Return Cache */ +#define R10K_DIAG_D_BRC (_ULCAST_(1) << 22) + +/* + * Coprocessor 1 (FPU) register names + */ +#define CP1_REVISION $0 +#define CP1_UFR $1 +#define CP1_UNFR $4 +#define CP1_FCCR $25 +#define CP1_FEXR $26 +#define CP1_FENR $28 +#define CP1_STATUS $31 + + +/* + * Bits in the MIPS32/64 coprocessor 1 (FPU) revision register. + */ +#define MIPS_FPIR_S (_ULCAST_(1) << 16) +#define MIPS_FPIR_D (_ULCAST_(1) << 17) +#define MIPS_FPIR_PS (_ULCAST_(1) << 18) +#define MIPS_FPIR_3D (_ULCAST_(1) << 19) +#define MIPS_FPIR_W (_ULCAST_(1) << 20) +#define MIPS_FPIR_L (_ULCAST_(1) << 21) +#define MIPS_FPIR_F64 (_ULCAST_(1) << 22) +#define MIPS_FPIR_HAS2008 (_ULCAST_(1) << 23) +#define MIPS_FPIR_UFRP (_ULCAST_(1) << 28) +#define MIPS_FPIR_FREP (_ULCAST_(1) << 29) + +/* + * Bits in the MIPS32/64 coprocessor 1 (FPU) condition codes register. + */ +#define MIPS_FCCR_CONDX_S 0 +#define MIPS_FCCR_CONDX (_ULCAST_(255) << MIPS_FCCR_CONDX_S) +#define MIPS_FCCR_COND0_S 0 +#define MIPS_FCCR_COND0 (_ULCAST_(1) << MIPS_FCCR_COND0_S) +#define MIPS_FCCR_COND1_S 1 +#define MIPS_FCCR_COND1 (_ULCAST_(1) << MIPS_FCCR_COND1_S) +#define MIPS_FCCR_COND2_S 2 +#define MIPS_FCCR_COND2 (_ULCAST_(1) << MIPS_FCCR_COND2_S) +#define MIPS_FCCR_COND3_S 3 +#define MIPS_FCCR_COND3 (_ULCAST_(1) << MIPS_FCCR_COND3_S) +#define MIPS_FCCR_COND4_S 4 +#define MIPS_FCCR_COND4 (_ULCAST_(1) << MIPS_FCCR_COND4_S) +#define MIPS_FCCR_COND5_S 5 +#define MIPS_FCCR_COND5 (_ULCAST_(1) << MIPS_FCCR_COND5_S) +#define MIPS_FCCR_COND6_S 6 +#define MIPS_FCCR_COND6 (_ULCAST_(1) << MIPS_FCCR_COND6_S) +#define MIPS_FCCR_COND7_S 7 +#define MIPS_FCCR_COND7 (_ULCAST_(1) << MIPS_FCCR_COND7_S) + +/* + * Bits in the MIPS32/64 coprocessor 1 (FPU) enables register. + */ +#define MIPS_FENR_FS_S 2 +#define MIPS_FENR_FS (_ULCAST_(1) << MIPS_FENR_FS_S) + +/* + * FPU Status Register Values + */ +#define FPU_CSR_COND_S 23 /* $fcc0 */ +#define FPU_CSR_COND (_ULCAST_(1) << FPU_CSR_COND_S) + +#define FPU_CSR_FS_S 24 /* flush denormalised results to 0 */ +#define FPU_CSR_FS (_ULCAST_(1) << FPU_CSR_FS_S) + +#define FPU_CSR_CONDX_S 25 /* $fcc[7:1] */ +#define FPU_CSR_CONDX (_ULCAST_(127) << FPU_CSR_CONDX_S) +#define FPU_CSR_COND1_S 25 /* $fcc1 */ +#define FPU_CSR_COND1 (_ULCAST_(1) << FPU_CSR_COND1_S) +#define FPU_CSR_COND2_S 26 /* $fcc2 */ +#define FPU_CSR_COND2 (_ULCAST_(1) << FPU_CSR_COND2_S) +#define FPU_CSR_COND3_S 27 /* $fcc3 */ +#define FPU_CSR_COND3 (_ULCAST_(1) << FPU_CSR_COND3_S) +#define FPU_CSR_COND4_S 28 /* $fcc4 */ +#define FPU_CSR_COND4 (_ULCAST_(1) << FPU_CSR_COND4_S) +#define FPU_CSR_COND5_S 29 /* $fcc5 */ +#define FPU_CSR_COND5 (_ULCAST_(1) << FPU_CSR_COND5_S) +#define FPU_CSR_COND6_S 30 /* $fcc6 */ +#define FPU_CSR_COND6 (_ULCAST_(1) << FPU_CSR_COND6_S) +#define FPU_CSR_COND7_S 31 /* $fcc7 */ +#define FPU_CSR_COND7 (_ULCAST_(1) << FPU_CSR_COND7_S) + +/* + * Bits 22:20 of the FPU Status Register will be read as 0, + * and should be written as zero. + */ +#define FPU_CSR_RSVD (_ULCAST_(7) << 20) + +#define FPU_CSR_ABS2008 (_ULCAST_(1) << 19) +#define FPU_CSR_NAN2008 (_ULCAST_(1) << 18) + +/* + * X the exception cause indicator + * E the exception enable + * S the sticky/flag bit +*/ +#define FPU_CSR_ALL_X 0x0003f000 +#define FPU_CSR_UNI_X 0x00020000 +#define FPU_CSR_INV_X 0x00010000 +#define FPU_CSR_DIV_X 0x00008000 +#define FPU_CSR_OVF_X 0x00004000 +#define FPU_CSR_UDF_X 0x00002000 +#define FPU_CSR_INE_X 0x00001000 + +#define FPU_CSR_ALL_E 0x00000f80 +#define FPU_CSR_INV_E 0x00000800 +#define FPU_CSR_DIV_E 0x00000400 +#define FPU_CSR_OVF_E 0x00000200 +#define FPU_CSR_UDF_E 0x00000100 +#define FPU_CSR_INE_E 0x00000080 + +#define FPU_CSR_ALL_S 0x0000007c +#define FPU_CSR_INV_S 0x00000040 +#define FPU_CSR_DIV_S 0x00000020 +#define FPU_CSR_OVF_S 0x00000010 +#define FPU_CSR_UDF_S 0x00000008 +#define FPU_CSR_INE_S 0x00000004 + +/* Bits 0 and 1 of FPU Status Register specify the rounding mode */ +#define FPU_CSR_RM 0x00000003 +#define FPU_CSR_RN 0x0 /* nearest */ +#define FPU_CSR_RZ 0x1 /* towards zero */ +#define FPU_CSR_RU 0x2 /* towards +Infinity */ +#define FPU_CSR_RD 0x3 /* towards -Infinity */ + + +#ifndef __ASSEMBLY__ + +/* + * Macros for handling the ISA mode bit for MIPS16 and microMIPS. + */ +#if defined(CONFIG_SYS_SUPPORTS_MIPS16) || \ + defined(CONFIG_SYS_SUPPORTS_MICROMIPS) +#define get_isa16_mode(x) ((x) & 0x1) +#define msk_isa16_mode(x) ((x) & ~0x1) +#define set_isa16_mode(x) do { (x) |= 0x1; } while (0) +#else +#define get_isa16_mode(x) 0 +#define msk_isa16_mode(x) (x) +#define set_isa16_mode(x) do { } while (0) +#endif + +/* + * microMIPS instructions can be 16-bit or 32-bit in length. This + * returns a 1 if the instruction is 16-bit and a 0 if 32-bit. + */ +static inline int mm_insn_16bit(u16 insn) +{ + u16 opcode = (insn >> 10) & 0x7; + + return (opcode >= 1 && opcode <= 3) ? 1 : 0; +} + +/* + * TLB Invalidate Flush + */ +static inline void tlbinvf(void) +{ + __asm__ __volatile__( + ".set push\n\t" + ".set noreorder\n\t" + ".word 0x42000004\n\t" /* tlbinvf */ + ".set pop"); +} + + +/* + * Functions to access the R10000 performance counters. These are basically + * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit + * performance counter number encoded into bits 1 ... 5 of the instruction. + * Only performance counters 0 to 1 actually exist, so for a non-R10000 aware + * disassembler these will look like an access to sel 0 or 1. + */ +#define read_r10k_perf_cntr(counter) \ +({ \ + unsigned int __res; \ + __asm__ __volatile__( \ + "mfpc\t%0, %1" \ + : "=r" (__res) \ + : "i" (counter)); \ + \ + __res; \ +}) + +#define write_r10k_perf_cntr(counter,val) \ +do { \ + __asm__ __volatile__( \ + "mtpc\t%0, %1" \ + : \ + : "r" (val), "i" (counter)); \ +} while (0) + +#define read_r10k_perf_event(counter) \ +({ \ + unsigned int __res; \ + __asm__ __volatile__( \ + "mfps\t%0, %1" \ + : "=r" (__res) \ + : "i" (counter)); \ + \ + __res; \ +}) + +#define write_r10k_perf_cntl(counter,val) \ +do { \ + __asm__ __volatile__( \ + "mtps\t%0, %1" \ + : \ + : "r" (val), "i" (counter)); \ +} while (0) + + +/* + * Macros to access the system control coprocessor + */ + +#define __read_32bit_c0_register(source, sel) \ +({ unsigned int __res; \ + if (sel == 0) \ + __asm__ __volatile__( \ + "mfc0\t%0, " #source "\n\t" \ + : "=r" (__res)); \ + else \ + __asm__ __volatile__( \ + ".set\tmips32\n\t" \ + "mfc0\t%0, " #source ", " #sel "\n\t" \ + ".set\tmips0\n\t" \ + : "=r" (__res)); \ + __res; \ +}) + +#define __read_64bit_c0_register(source, sel) \ +({ unsigned long long __res; \ + if (sizeof(unsigned long) == 4) \ + __res = __read_64bit_c0_split(source, sel); \ + else if (sel == 0) \ + __asm__ __volatile__( \ + ".set\tmips3\n\t" \ + "dmfc0\t%0, " #source "\n\t" \ + ".set\tmips0" \ + : "=r" (__res)); \ + else \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dmfc0\t%0, " #source ", " #sel "\n\t" \ + ".set\tmips0" \ + : "=r" (__res)); \ + __res; \ +}) + +#define __write_32bit_c0_register(register, sel, value) \ +do { \ + if (sel == 0) \ + __asm__ __volatile__( \ + "mtc0\t%z0, " #register "\n\t" \ + : : "Jr" ((unsigned int)(value))); \ + else \ + __asm__ __volatile__( \ + ".set\tmips32\n\t" \ + "mtc0\t%z0, " #register ", " #sel "\n\t" \ + ".set\tmips0" \ + : : "Jr" ((unsigned int)(value))); \ +} while (0) + +#define __write_64bit_c0_register(register, sel, value) \ +do { \ + if (sizeof(unsigned long) == 4) \ + __write_64bit_c0_split(register, sel, value); \ + else if (sel == 0) \ + __asm__ __volatile__( \ + ".set\tmips3\n\t" \ + "dmtc0\t%z0, " #register "\n\t" \ + ".set\tmips0" \ + : : "Jr" (value)); \ + else \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dmtc0\t%z0, " #register ", " #sel "\n\t" \ + ".set\tmips0" \ + : : "Jr" (value)); \ +} while (0) + +#define __read_ulong_c0_register(reg, sel) \ + ((sizeof(unsigned long) == 4) ? \ + (unsigned long) __read_32bit_c0_register(reg, sel) : \ + (unsigned long) __read_64bit_c0_register(reg, sel)) + +#define __write_ulong_c0_register(reg, sel, val) \ +do { \ + if (sizeof(unsigned long) == 4) \ + __write_32bit_c0_register(reg, sel, val); \ + else \ + __write_64bit_c0_register(reg, sel, val); \ +} while (0) + +/* + * On RM7000/RM9000 these are uses to access cop0 set 1 registers + */ +#define __read_32bit_c0_ctrl_register(source) \ +({ unsigned int __res; \ + __asm__ __volatile__( \ + "cfc0\t%0, " #source "\n\t" \ + : "=r" (__res)); \ + __res; \ +}) + +#define __write_32bit_c0_ctrl_register(register, value) \ +do { \ + __asm__ __volatile__( \ + "ctc0\t%z0, " #register "\n\t" \ + : : "Jr" ((unsigned int)(value))); \ +} while (0) + +/* + * These versions are only needed for systems with more than 38 bits of + * physical address space running the 32-bit kernel. That's none atm :-) + */ +#define __read_64bit_c0_split(source, sel) \ +({ \ + unsigned long long __val; \ + \ + if (sel == 0) \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dmfc0\t%M0, " #source "\n\t" \ + "dsll\t%L0, %M0, 32\n\t" \ + "dsra\t%M0, %M0, 32\n\t" \ + "dsra\t%L0, %L0, 32\n\t" \ + ".set\tmips0" \ + : "=r" (__val)); \ + else \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dmfc0\t%M0, " #source ", " #sel "\n\t" \ + "dsll\t%L0, %M0, 32\n\t" \ + "dsra\t%M0, %M0, 32\n\t" \ + "dsra\t%L0, %L0, 32\n\t" \ + ".set\tmips0" \ + : "=r" (__val)); \ + \ + __val; \ +}) + +#define __write_64bit_c0_split(source, sel, val) \ +do { \ + if (sel == 0) \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dsll\t%L0, %L0, 32\n\t" \ + "dsrl\t%L0, %L0, 32\n\t" \ + "dsll\t%M0, %M0, 32\n\t" \ + "or\t%L0, %L0, %M0\n\t" \ + "dmtc0\t%L0, " #source "\n\t" \ + ".set\tmips0" \ + : : "r" (val)); \ + else \ + __asm__ __volatile__( \ + ".set\tmips64\n\t" \ + "dsll\t%L0, %L0, 32\n\t" \ + "dsrl\t%L0, %L0, 32\n\t" \ + "dsll\t%M0, %M0, 32\n\t" \ + "or\t%L0, %L0, %M0\n\t" \ + "dmtc0\t%L0, " #source ", " #sel "\n\t" \ + ".set\tmips0" \ + : : "r" (val)); \ +} while (0) + +#define __readx_32bit_c0_register(source) \ +({ \ + unsigned int __res; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips32r2 \n" \ + " .insn \n" \ + " # mfhc0 $1, %1 \n" \ + " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__res) \ + : "i" (source)); \ + __res; \ +}) + +#define __writex_32bit_c0_register(register, value) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .set mips32r2 \n" \ + " move $1, %0 \n" \ + " # mthc0 $1, %1 \n" \ + " .insn \n" \ + " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \ + " .set pop \n" \ + : \ + : "r" (value), "i" (register)); \ +}) + +#define read_c0_index() __read_32bit_c0_register($0, 0) +#define write_c0_index(val) __write_32bit_c0_register($0, 0, val) + +#define read_c0_random() __read_32bit_c0_register($1, 0) +#define write_c0_random(val) __write_32bit_c0_register($1, 0, val) + +#define read_c0_entrylo0() __read_ulong_c0_register($2, 0) +#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val) + +#define readx_c0_entrylo0() __readx_32bit_c0_register(2) +#define writex_c0_entrylo0(val) __writex_32bit_c0_register(2, val) + +#define read_c0_entrylo1() __read_ulong_c0_register($3, 0) +#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val) + +#define readx_c0_entrylo1() __readx_32bit_c0_register(3) +#define writex_c0_entrylo1(val) __writex_32bit_c0_register(3, val) + +#define read_c0_conf() __read_32bit_c0_register($3, 0) +#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) + +#define read_c0_context() __read_ulong_c0_register($4, 0) +#define write_c0_context(val) __write_ulong_c0_register($4, 0, val) + +#define read_c0_userlocal() __read_ulong_c0_register($4, 2) +#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val) + +#define read_c0_pagemask() __read_32bit_c0_register($5, 0) +#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val) + +#define read_c0_pagegrain() __read_32bit_c0_register($5, 1) +#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val) + +#define read_c0_wired() __read_32bit_c0_register($6, 0) +#define write_c0_wired(val) __write_32bit_c0_register($6, 0, val) + +#define read_c0_info() __read_32bit_c0_register($7, 0) + +#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */ +#define write_c0_cache(val) __write_32bit_c0_register($7, 0, val) + +#define read_c0_badvaddr() __read_ulong_c0_register($8, 0) +#define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val) + +#define read_c0_count() __read_32bit_c0_register($9, 0) +#define write_c0_count(val) __write_32bit_c0_register($9, 0, val) + +#define read_c0_count2() __read_32bit_c0_register($9, 6) /* pnx8550 */ +#define write_c0_count2(val) __write_32bit_c0_register($9, 6, val) + +#define read_c0_count3() __read_32bit_c0_register($9, 7) /* pnx8550 */ +#define write_c0_count3(val) __write_32bit_c0_register($9, 7, val) + +#define read_c0_entryhi() __read_ulong_c0_register($10, 0) +#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val) + +#define read_c0_compare() __read_32bit_c0_register($11, 0) +#define write_c0_compare(val) __write_32bit_c0_register($11, 0, val) + +#define read_c0_compare2() __read_32bit_c0_register($11, 6) /* pnx8550 */ +#define write_c0_compare2(val) __write_32bit_c0_register($11, 6, val) + +#define read_c0_compare3() __read_32bit_c0_register($11, 7) /* pnx8550 */ +#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) + +#define read_c0_status() __read_32bit_c0_register($12, 0) + +#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) + +#define read_c0_cause() __read_32bit_c0_register($13, 0) +#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) + +#define read_c0_epc() __read_ulong_c0_register($14, 0) +#define write_c0_epc(val) __write_ulong_c0_register($14, 0, val) + +#define read_c0_prid() __read_32bit_c0_register($15, 0) + +#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3) + +#define read_c0_config() __read_32bit_c0_register($16, 0) +#define read_c0_config1() __read_32bit_c0_register($16, 1) +#define read_c0_config2() __read_32bit_c0_register($16, 2) +#define read_c0_config3() __read_32bit_c0_register($16, 3) +#define read_c0_config4() __read_32bit_c0_register($16, 4) +#define read_c0_config5() __read_32bit_c0_register($16, 5) +#define read_c0_config6() __read_32bit_c0_register($16, 6) +#define read_c0_config7() __read_32bit_c0_register($16, 7) +#define write_c0_config(val) __write_32bit_c0_register($16, 0, val) +#define write_c0_config1(val) __write_32bit_c0_register($16, 1, val) +#define write_c0_config2(val) __write_32bit_c0_register($16, 2, val) +#define write_c0_config3(val) __write_32bit_c0_register($16, 3, val) +#define write_c0_config4(val) __write_32bit_c0_register($16, 4, val) +#define write_c0_config5(val) __write_32bit_c0_register($16, 5, val) +#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) +#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) + +#define read_c0_lladdr() __read_ulong_c0_register($17, 0) +#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val) +#define read_c0_maar() __read_ulong_c0_register($17, 1) +#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) +#define read_c0_maari() __read_32bit_c0_register($17, 2) +#define write_c0_maari(val) __write_32bit_c0_register($17, 2, val) + +/* + * The WatchLo register. There may be up to 8 of them. + */ +#define read_c0_watchlo0() __read_ulong_c0_register($18, 0) +#define read_c0_watchlo1() __read_ulong_c0_register($18, 1) +#define read_c0_watchlo2() __read_ulong_c0_register($18, 2) +#define read_c0_watchlo3() __read_ulong_c0_register($18, 3) +#define read_c0_watchlo4() __read_ulong_c0_register($18, 4) +#define read_c0_watchlo5() __read_ulong_c0_register($18, 5) +#define read_c0_watchlo6() __read_ulong_c0_register($18, 6) +#define read_c0_watchlo7() __read_ulong_c0_register($18, 7) +#define write_c0_watchlo0(val) __write_ulong_c0_register($18, 0, val) +#define write_c0_watchlo1(val) __write_ulong_c0_register($18, 1, val) +#define write_c0_watchlo2(val) __write_ulong_c0_register($18, 2, val) +#define write_c0_watchlo3(val) __write_ulong_c0_register($18, 3, val) +#define write_c0_watchlo4(val) __write_ulong_c0_register($18, 4, val) +#define write_c0_watchlo5(val) __write_ulong_c0_register($18, 5, val) +#define write_c0_watchlo6(val) __write_ulong_c0_register($18, 6, val) +#define write_c0_watchlo7(val) __write_ulong_c0_register($18, 7, val) + +/* + * The WatchHi register. There may be up to 8 of them. + */ +#define read_c0_watchhi0() __read_32bit_c0_register($19, 0) +#define read_c0_watchhi1() __read_32bit_c0_register($19, 1) +#define read_c0_watchhi2() __read_32bit_c0_register($19, 2) +#define read_c0_watchhi3() __read_32bit_c0_register($19, 3) +#define read_c0_watchhi4() __read_32bit_c0_register($19, 4) +#define read_c0_watchhi5() __read_32bit_c0_register($19, 5) +#define read_c0_watchhi6() __read_32bit_c0_register($19, 6) +#define read_c0_watchhi7() __read_32bit_c0_register($19, 7) + +#define write_c0_watchhi0(val) __write_32bit_c0_register($19, 0, val) +#define write_c0_watchhi1(val) __write_32bit_c0_register($19, 1, val) +#define write_c0_watchhi2(val) __write_32bit_c0_register($19, 2, val) +#define write_c0_watchhi3(val) __write_32bit_c0_register($19, 3, val) +#define write_c0_watchhi4(val) __write_32bit_c0_register($19, 4, val) +#define write_c0_watchhi5(val) __write_32bit_c0_register($19, 5, val) +#define write_c0_watchhi6(val) __write_32bit_c0_register($19, 6, val) +#define write_c0_watchhi7(val) __write_32bit_c0_register($19, 7, val) + +#define read_c0_xcontext() __read_ulong_c0_register($20, 0) +#define write_c0_xcontext(val) __write_ulong_c0_register($20, 0, val) + +#define read_c0_intcontrol() __read_32bit_c0_ctrl_register($20) +#define write_c0_intcontrol(val) __write_32bit_c0_ctrl_register($20, val) + +#define read_c0_framemask() __read_32bit_c0_register($21, 0) +#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val) + +#define read_c0_diag() __read_32bit_c0_register($22, 0) +#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val) + +/* R10K CP0 Branch Diagnostic register is 64bits wide */ +#define read_c0_r10k_diag() __read_64bit_c0_register($22, 0) +#define write_c0_r10k_diag(val) __write_64bit_c0_register($22, 0, val) + +#define read_c0_diag1() __read_32bit_c0_register($22, 1) +#define write_c0_diag1(val) __write_32bit_c0_register($22, 1, val) + +#define read_c0_diag2() __read_32bit_c0_register($22, 2) +#define write_c0_diag2(val) __write_32bit_c0_register($22, 2, val) + +#define read_c0_diag3() __read_32bit_c0_register($22, 3) +#define write_c0_diag3(val) __write_32bit_c0_register($22, 3, val) + +#define read_c0_diag4() __read_32bit_c0_register($22, 4) +#define write_c0_diag4(val) __write_32bit_c0_register($22, 4, val) + +#define read_c0_diag5() __read_32bit_c0_register($22, 5) +#define write_c0_diag5(val) __write_32bit_c0_register($22, 5, val) + +#define read_c0_debug() __read_32bit_c0_register($23, 0) +#define write_c0_debug(val) __write_32bit_c0_register($23, 0, val) + +#define read_c0_depc() __read_ulong_c0_register($24, 0) +#define write_c0_depc(val) __write_ulong_c0_register($24, 0, val) + +/* + * MIPS32 / MIPS64 performance counters + */ +#define read_c0_perfctrl0() __read_32bit_c0_register($25, 0) +#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val) +#define read_c0_perfcntr0() __read_32bit_c0_register($25, 1) +#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val) +#define read_c0_perfcntr0_64() __read_64bit_c0_register($25, 1) +#define write_c0_perfcntr0_64(val) __write_64bit_c0_register($25, 1, val) +#define read_c0_perfctrl1() __read_32bit_c0_register($25, 2) +#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val) +#define read_c0_perfcntr1() __read_32bit_c0_register($25, 3) +#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val) +#define read_c0_perfcntr1_64() __read_64bit_c0_register($25, 3) +#define write_c0_perfcntr1_64(val) __write_64bit_c0_register($25, 3, val) +#define read_c0_perfctrl2() __read_32bit_c0_register($25, 4) +#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val) +#define read_c0_perfcntr2() __read_32bit_c0_register($25, 5) +#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val) +#define read_c0_perfcntr2_64() __read_64bit_c0_register($25, 5) +#define write_c0_perfcntr2_64(val) __write_64bit_c0_register($25, 5, val) +#define read_c0_perfctrl3() __read_32bit_c0_register($25, 6) +#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val) +#define read_c0_perfcntr3() __read_32bit_c0_register($25, 7) +#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val) +#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7) +#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val) + +#define read_c0_ecc() __read_32bit_c0_register($26, 0) +#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val) + +#define read_c0_derraddr0() __read_ulong_c0_register($26, 1) +#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val) + +#define read_c0_cacheerr() __read_32bit_c0_register($27, 0) + +#define read_c0_derraddr1() __read_ulong_c0_register($27, 1) +#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val) + +#define read_c0_taglo() __read_32bit_c0_register($28, 0) +#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val) + +#define read_c0_dtaglo() __read_32bit_c0_register($28, 2) +#define write_c0_dtaglo(val) __write_32bit_c0_register($28, 2, val) + +#define read_c0_ddatalo() __read_32bit_c0_register($28, 3) +#define write_c0_ddatalo(val) __write_32bit_c0_register($28, 3, val) + +#define read_c0_staglo() __read_32bit_c0_register($28, 4) +#define write_c0_staglo(val) __write_32bit_c0_register($28, 4, val) + +#define read_c0_taghi() __read_32bit_c0_register($29, 0) +#define write_c0_taghi(val) __write_32bit_c0_register($29, 0, val) + +#define read_c0_errorepc() __read_ulong_c0_register($30, 0) +#define write_c0_errorepc(val) __write_ulong_c0_register($30, 0, val) + +/* MIPSR2 */ +#define read_c0_hwrena() __read_32bit_c0_register($7, 0) +#define write_c0_hwrena(val) __write_32bit_c0_register($7, 0, val) + +#define read_c0_intctl() __read_32bit_c0_register($12, 1) +#define write_c0_intctl(val) __write_32bit_c0_register($12, 1, val) + +#define read_c0_srsctl() __read_32bit_c0_register($12, 2) +#define write_c0_srsctl(val) __write_32bit_c0_register($12, 2, val) + +#define read_c0_srsmap() __read_32bit_c0_register($12, 3) +#define write_c0_srsmap(val) __write_32bit_c0_register($12, 3, val) + +#define read_c0_ebase() __read_32bit_c0_register($15, 1) +#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val) + +#define read_c0_cdmmbase() __read_ulong_c0_register($15, 2) +#define write_c0_cdmmbase(val) __write_ulong_c0_register($15, 2, val) + +/* MIPSR3 */ +#define read_c0_segctl0() __read_32bit_c0_register($5, 2) +#define write_c0_segctl0(val) __write_32bit_c0_register($5, 2, val) + +#define read_c0_segctl1() __read_32bit_c0_register($5, 3) +#define write_c0_segctl1(val) __write_32bit_c0_register($5, 3, val) + +#define read_c0_segctl2() __read_32bit_c0_register($5, 4) +#define write_c0_segctl2(val) __write_32bit_c0_register($5, 4, val) + +/* Hardware Page Table Walker */ +#define read_c0_pwbase() __read_ulong_c0_register($5, 5) +#define write_c0_pwbase(val) __write_ulong_c0_register($5, 5, val) + +#define read_c0_pwfield() __read_ulong_c0_register($5, 6) +#define write_c0_pwfield(val) __write_ulong_c0_register($5, 6, val) + +#define read_c0_pwsize() __read_ulong_c0_register($5, 7) +#define write_c0_pwsize(val) __write_ulong_c0_register($5, 7, val) + +#define read_c0_pwctl() __read_32bit_c0_register($6, 6) +#define write_c0_pwctl(val) __write_32bit_c0_register($6, 6, val) + +/* Cavium OCTEON (cnMIPS) */ +#define read_c0_cvmcount() __read_ulong_c0_register($9, 6) +#define write_c0_cvmcount(val) __write_ulong_c0_register($9, 6, val) + +#define read_c0_cvmctl() __read_64bit_c0_register($9, 7) +#define write_c0_cvmctl(val) __write_64bit_c0_register($9, 7, val) + +#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7) +#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val) +/* + * The cacheerr registers are not standardized. On OCTEON, they are + * 64 bits wide. + */ +#define read_octeon_c0_icacheerr() __read_64bit_c0_register($27, 0) +#define write_octeon_c0_icacheerr(val) __write_64bit_c0_register($27, 0, val) + +#define read_octeon_c0_dcacheerr() __read_64bit_c0_register($27, 1) +#define write_octeon_c0_dcacheerr(val) __write_64bit_c0_register($27, 1, val) + +/* BMIPS3300 */ +#define read_c0_brcm_config_0() __read_32bit_c0_register($22, 0) +#define write_c0_brcm_config_0(val) __write_32bit_c0_register($22, 0, val) + +#define read_c0_brcm_bus_pll() __read_32bit_c0_register($22, 4) +#define write_c0_brcm_bus_pll(val) __write_32bit_c0_register($22, 4, val) + +#define read_c0_brcm_reset() __read_32bit_c0_register($22, 5) +#define write_c0_brcm_reset(val) __write_32bit_c0_register($22, 5, val) + +/* BMIPS43xx */ +#define read_c0_brcm_cmt_intr() __read_32bit_c0_register($22, 1) +#define write_c0_brcm_cmt_intr(val) __write_32bit_c0_register($22, 1, val) + +#define read_c0_brcm_cmt_ctrl() __read_32bit_c0_register($22, 2) +#define write_c0_brcm_cmt_ctrl(val) __write_32bit_c0_register($22, 2, val) + +#define read_c0_brcm_cmt_local() __read_32bit_c0_register($22, 3) +#define write_c0_brcm_cmt_local(val) __write_32bit_c0_register($22, 3, val) + +#define read_c0_brcm_config_1() __read_32bit_c0_register($22, 5) +#define write_c0_brcm_config_1(val) __write_32bit_c0_register($22, 5, val) + +#define read_c0_brcm_cbr() __read_32bit_c0_register($22, 6) +#define write_c0_brcm_cbr(val) __write_32bit_c0_register($22, 6, val) + +/* BMIPS5000 */ +#define read_c0_brcm_config() __read_32bit_c0_register($22, 0) +#define write_c0_brcm_config(val) __write_32bit_c0_register($22, 0, val) + +#define read_c0_brcm_mode() __read_32bit_c0_register($22, 1) +#define write_c0_brcm_mode(val) __write_32bit_c0_register($22, 1, val) + +#define read_c0_brcm_action() __read_32bit_c0_register($22, 2) +#define write_c0_brcm_action(val) __write_32bit_c0_register($22, 2, val) + +#define read_c0_brcm_edsp() __read_32bit_c0_register($22, 3) +#define write_c0_brcm_edsp(val) __write_32bit_c0_register($22, 3, val) + +#define read_c0_brcm_bootvec() __read_32bit_c0_register($22, 4) +#define write_c0_brcm_bootvec(val) __write_32bit_c0_register($22, 4, val) + +#define read_c0_brcm_sleepcount() __read_32bit_c0_register($22, 7) +#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val) + +/* + * Macros to access the floating point coprocessor control registers + */ +#define _read_32bit_cp1_register(source, gas_hardfloat) \ +({ \ + unsigned int __res; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set reorder \n" \ + " # gas fails to assemble cfc1 for some archs, \n" \ + " # like Octeon. \n" \ + " .set mips1 \n" \ + " "STR(gas_hardfloat)" \n" \ + " cfc1 %0,"STR(source)" \n" \ + " .set pop \n" \ + : "=r" (__res)); \ + __res; \ +}) + +#define _write_32bit_cp1_register(dest, val, gas_hardfloat) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set reorder \n" \ + " "STR(gas_hardfloat)" \n" \ + " ctc1 %0,"STR(dest)" \n" \ + " .set pop \n" \ + : : "r" (val)); \ +}) + +#ifdef GAS_HAS_SET_HARDFLOAT +#define read_32bit_cp1_register(source) \ + _read_32bit_cp1_register(source, .set hardfloat) +#define write_32bit_cp1_register(dest, val) \ + _write_32bit_cp1_register(dest, val, .set hardfloat) +#else +#define read_32bit_cp1_register(source) \ + _read_32bit_cp1_register(source, ) +#define write_32bit_cp1_register(dest, val) \ + _write_32bit_cp1_register(dest, val, ) +#endif + +#ifdef HAVE_AS_DSP +#define rddsp(mask) \ +({ \ + unsigned int __dspctl; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ + " rddsp %0, %x1 \n" \ + " .set pop \n" \ + : "=r" (__dspctl) \ + : "i" (mask)); \ + __dspctl; \ +}) + +#define wrdsp(val, mask) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set dsp \n" \ + " wrdsp %0, %x1 \n" \ + " .set pop \n" \ + : \ + : "r" (val), "i" (mask)); \ +}) + +#define mflo0() \ +({ \ + long mflo0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mflo0)); \ + mflo0; \ +}) + +#define mflo1() \ +({ \ + long mflo1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mflo1)); \ + mflo1; \ +}) + +#define mflo2() \ +({ \ + long mflo2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mflo2)); \ + mflo2; \ +}) + +#define mflo3() \ +({ \ + long mflo3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mflo %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mflo3)); \ + mflo3; \ +}) + +#define mfhi0() \ +({ \ + long mfhi0; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac0 \n" \ + " .set pop \n" \ + : "=r" (mfhi0)); \ + mfhi0; \ +}) + +#define mfhi1() \ +({ \ + long mfhi1; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac1 \n" \ + " .set pop \n" \ + : "=r" (mfhi1)); \ + mfhi1; \ +}) + +#define mfhi2() \ +({ \ + long mfhi2; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac2 \n" \ + " .set pop \n" \ + : "=r" (mfhi2)); \ + mfhi2; \ +}) + +#define mfhi3() \ +({ \ + long mfhi3; \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mfhi %0, $ac3 \n" \ + " .set pop \n" \ + : "=r" (mfhi3)); \ + mfhi3; \ +}) + + +#define mtlo0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mtlo3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mtlo %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi0(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac0 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi1(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac1 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi2(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac2 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#define mthi3(x) \ +({ \ + __asm__( \ + " .set push \n" \ + " .set dsp \n" \ + " mthi %0, $ac3 \n" \ + " .set pop \n" \ + : \ + : "r" (x)); \ +}) + +#else + +#ifdef CONFIG_CPU_MICROMIPS +#define rddsp(mask) \ +({ \ + unsigned int __res; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # rddsp $1, %x1 \n" \ + " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \ + " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__res) \ + : "i" (mask)); \ + __res; \ +}) + +#define wrdsp(val, mask) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # wrdsp $1, %x1 \n" \ + " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \ + " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \ + " .set pop \n" \ + : \ + : "r" (val), "i" (mask)); \ +}) + +#define _umips_dsp_mfxxx(ins) \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .hword 0x0001 \n" \ + " .hword %x1 \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg) \ + : "i" (ins)); \ + __treg; \ +}) + +#define _umips_dsp_mtxxx(val, ins) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " .hword 0x0001 \n" \ + " .hword %x1 \n" \ + " .set pop \n" \ + : \ + : "r" (val), "i" (ins)); \ +}) + +#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c) +#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c) + +#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c)) +#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c)) + +#define mflo0() _umips_dsp_mflo(0) +#define mflo1() _umips_dsp_mflo(1) +#define mflo2() _umips_dsp_mflo(2) +#define mflo3() _umips_dsp_mflo(3) + +#define mfhi0() _umips_dsp_mfhi(0) +#define mfhi1() _umips_dsp_mfhi(1) +#define mfhi2() _umips_dsp_mfhi(2) +#define mfhi3() _umips_dsp_mfhi(3) + +#define mtlo0(x) _umips_dsp_mtlo(x, 0) +#define mtlo1(x) _umips_dsp_mtlo(x, 1) +#define mtlo2(x) _umips_dsp_mtlo(x, 2) +#define mtlo3(x) _umips_dsp_mtlo(x, 3) + +#define mthi0(x) _umips_dsp_mthi(x, 0) +#define mthi1(x) _umips_dsp_mthi(x, 1) +#define mthi2(x) _umips_dsp_mthi(x, 2) +#define mthi3(x) _umips_dsp_mthi(x, 3) + +#else /* !CONFIG_CPU_MICROMIPS */ +#define rddsp(mask) \ +({ \ + unsigned int __res; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " # rddsp $1, %x1 \n" \ + " .word 0x7c000cb8 | (%x1 << 16) \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__res) \ + : "i" (mask)); \ + __res; \ +}) + +#define wrdsp(val, mask) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " # wrdsp $1, %x1 \n" \ + " .word 0x7c2004f8 | (%x1 << 11) \n" \ + " .set pop \n" \ + : \ + : "r" (val), "i" (mask)); \ +}) + +#define _dsp_mfxxx(ins) \ +({ \ + unsigned long __treg; \ + \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " .word (0x00000810 | %1) \n" \ + " move %0, $1 \n" \ + " .set pop \n" \ + : "=r" (__treg) \ + : "i" (ins)); \ + __treg; \ +}) + +#define _dsp_mtxxx(val, ins) \ +({ \ + __asm__ __volatile__( \ + " .set push \n" \ + " .set noat \n" \ + " move $1, %0 \n" \ + " .word (0x00200011 | %1) \n" \ + " .set pop \n" \ + : \ + : "r" (val), "i" (ins)); \ +}) + +#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002) +#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000) + +#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002)) +#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000)) + +#define mflo0() _dsp_mflo(0) +#define mflo1() _dsp_mflo(1) +#define mflo2() _dsp_mflo(2) +#define mflo3() _dsp_mflo(3) + +#define mfhi0() _dsp_mfhi(0) +#define mfhi1() _dsp_mfhi(1) +#define mfhi2() _dsp_mfhi(2) +#define mfhi3() _dsp_mfhi(3) + +#define mtlo0(x) _dsp_mtlo(x, 0) +#define mtlo1(x) _dsp_mtlo(x, 1) +#define mtlo2(x) _dsp_mtlo(x, 2) +#define mtlo3(x) _dsp_mtlo(x, 3) + +#define mthi0(x) _dsp_mthi(x, 0) +#define mthi1(x) _dsp_mthi(x, 1) +#define mthi2(x) _dsp_mthi(x, 2) +#define mthi3(x) _dsp_mthi(x, 3) + +#endif /* CONFIG_CPU_MICROMIPS */ +#endif + +/* + * TLB operations. + * + * It is responsibility of the caller to take care of any TLB hazards. + */ +static inline void tlb_probe(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbp\n\t" + ".set reorder"); +} + +static inline void tlb_read(void) +{ +#if MIPS34K_MISSED_ITLB_WAR + int res = 0; + + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + " .set noat \n" + " .set mips32r2 \n" + " .word 0x41610001 # dvpe $1 \n" + " move %0, $1 \n" + " ehb \n" + " .set pop \n" + : "=r" (res)); + + instruction_hazard(); +#endif + + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbr\n\t" + ".set reorder"); + +#if MIPS34K_MISSED_ITLB_WAR + if ((res & _ULCAST_(1))) + __asm__ __volatile__( + " .set push \n" + " .set noreorder \n" + " .set noat \n" + " .set mips32r2 \n" + " .word 0x41600021 # evpe \n" + " ehb \n" + " .set pop \n"); +#endif +} + +static inline void tlb_write_indexed(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbwi\n\t" + ".set reorder"); +} + +static inline void tlb_write_random(void) +{ + __asm__ __volatile__( + ".set noreorder\n\t" + "tlbwr\n\t" + ".set reorder"); +} + +/* + * Manipulate bits in a c0 register. + */ +#define __BUILD_SET_C0(name) \ +static inline unsigned int \ +set_c0_##name(unsigned int set) \ +{ \ + unsigned int res, new; \ + \ + res = read_c0_##name(); \ + new = res | set; \ + write_c0_##name(new); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +clear_c0_##name(unsigned int clear) \ +{ \ + unsigned int res, new; \ + \ + res = read_c0_##name(); \ + new = res & ~clear; \ + write_c0_##name(new); \ + \ + return res; \ +} \ + \ +static inline unsigned int \ +change_c0_##name(unsigned int change, unsigned int val) \ +{ \ + unsigned int res, new; \ + \ + res = read_c0_##name(); \ + new = res & ~change; \ + new |= (val & change); \ + write_c0_##name(new); \ + \ + return res; \ +} + +__BUILD_SET_C0(status) +__BUILD_SET_C0(cause) +__BUILD_SET_C0(config) +__BUILD_SET_C0(config5) +__BUILD_SET_C0(intcontrol) +__BUILD_SET_C0(intctl) +__BUILD_SET_C0(srsmap) +__BUILD_SET_C0(pagegrain) +__BUILD_SET_C0(brcm_config_0) +__BUILD_SET_C0(brcm_bus_pll) +__BUILD_SET_C0(brcm_reset) +__BUILD_SET_C0(brcm_cmt_intr) +__BUILD_SET_C0(brcm_cmt_ctrl) +__BUILD_SET_C0(brcm_config) +__BUILD_SET_C0(brcm_mode) + +/* + * Return low 10 bits of ebase. + * Note that under KVM (MIPSVZ) this returns vcpu id. + */ +static inline unsigned int get_ebase_cpunum(void) +{ + return read_c0_ebase() & 0x3ff; +} + +static inline void write_one_tlb(int index, u32 pagemask, u32 hi, u32 low0, + u32 low1) +{ + write_c0_entrylo0(low0); + write_c0_pagemask(pagemask); + write_c0_entrylo1(low1); + write_c0_entryhi(hi); + write_c0_index(index); + tlb_write_indexed(); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_MIPSREGS_H */ diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h new file mode 100644 index 0000000..481d2ef --- /dev/null +++ b/arch/mips/include/asm/pgtable-bits.h @@ -0,0 +1,282 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994 - 2002 by Ralf Baechle + * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc. + * Copyright (C) 2002 Maciej W. Rozycki + */ +#ifndef _ASM_PGTABLE_BITS_H +#define _ASM_PGTABLE_BITS_H + + +/* + * Note that we shift the lower 32bits of each EntryLo[01] entry + * 6 bits to the left. That way we can convert the PFN into the + * physical address by a single 'and' operation and gain 6 additional + * bits for storing information which isn't present in a normal + * MIPS page table. + * + * Similar to the Alpha port, we need to keep track of the ref + * and mod bits in software. We have a software "yeah you can read + * from this page" bit, and a hardware one which actually lets the + * process read from the page. On the same token we have a software + * writable bit and the real hardware one which actually lets the + * process write to the page, this keeps a mod bit via the hardware + * dirty bit. + * + * Certain revisions of the R4000 and R5000 have a bug where if a + * certain sequence occurs in the last 3 instructions of an executable + * page, and the following page is not mapped, the cpu can do + * unpredictable things. The code (when it is written) to deal with + * this problem will be in the update_mmu_cache() code for the r4k. + */ +#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) + +/* + * The following bits are implemented by the TLB hardware + */ +#define _PAGE_NO_EXEC_SHIFT 0 +#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT) +#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) +#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_MASK (7 << _CACHE_SHIFT) + +/* + * The following bits are implemented in software + */ +#define _PAGE_PRESENT_SHIFT (24) +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) + +#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) + +/* + * Bits for extended EntryLo0/EntryLo1 registers + */ +#define _PFNX_MASK 0xffffff + +#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + +/* + * The following bits are implemented in software + */ +#define _PAGE_PRESENT_SHIFT (0) +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) + +/* + * The following bits are implemented by the TLB hardware + */ +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) +#define _CACHE_MASK _CACHE_UNCACHED + +#define _PFN_SHIFT PAGE_SHIFT + +#else +/* + * Below are the "Normal" R4K cases + */ + +/* + * The following bits are implemented in software + */ +#define _PAGE_PRESENT_SHIFT 0 +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +/* R2 or later cores check for RI/XI support to determine _PAGE_READ */ +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) +#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#else +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#endif +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) + +#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) +/* Huge TLB page */ +#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) +#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) +#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) +#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */ + +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) +/* XI - page cannot be executed */ +#ifdef _PAGE_SPLITTING_SHIFT +#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1) +#else +#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#endif +#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0) + +/* RI - page cannot be read */ +#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1) +#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT)) +#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT +#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0) +#endif /* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */ + +#if defined(_PAGE_NO_READ_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) +#elif defined(_PAGE_SPLITTING_SHIFT) +#define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1) +#else +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1) +#endif +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) + +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) +#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) +#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) +#define _CACHE_MASK (7 << _CACHE_SHIFT) + +#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) + +#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ + +#ifndef _PAGE_NO_EXEC +#define _PAGE_NO_EXEC 0 +#endif +#ifndef _PAGE_NO_READ +#define _PAGE_NO_READ 0 +#endif + +#define _PAGE_SILENT_READ _PAGE_VALID +#define _PAGE_SILENT_WRITE _PAGE_DIRTY + +#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) + +/* + * The final layouts of the PTE bits are: + * + * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P + * 32-bit, R1 or earler: CCC D V G M A W R P + * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P + * 32-bit, R2 or later: CCC D V G RI/R XI M A W P + */ + + +#ifndef __ASSEMBLY__ +/* + * pte_to_entrylo converts a page table entry (PTE) into a Mips + * entrylo0/1 value. + */ +static inline uint64_t pte_to_entrylo(unsigned long pte_val) +{ +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) + if (cpu_has_rixi) { + int sa; +#ifdef CONFIG_32BIT + sa = 31 - _PAGE_NO_READ_SHIFT; +#else + sa = 63 - _PAGE_NO_READ_SHIFT; +#endif + /* + * C has no way to express that this is a DSRL + * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily + * in the fast path this is done in assembly + */ + return (pte_val >> _PAGE_GLOBAL_SHIFT) | + ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa); + } +#endif + + return pte_val >> _PAGE_GLOBAL_SHIFT; +} +#endif + +/* + * Cache attributes + */ +#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) + +#define _CACHE_CACHABLE_NONCOHERENT 0 +#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED + +#elif defined(CONFIG_CPU_SB1) + +/* No penalty for being coherent on the SB1, so just + use it for "noncoherent" spaces, too. Shouldn't hurt. */ + +#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT) + +#elif defined(CONFIG_CPU_LOONGSON3) + +/* Using COHERENT flag for NONCOHERENT doesn't hurt. */ + +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */ +#define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */ + +#elif defined(CONFIG_MACH_INGENIC) + +/* Ingenic uses the WA bit to achieve write-combine memory writes */ +#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT) + +#endif + +#ifndef _CACHE_CACHABLE_NO_WA +#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_WA +#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_UNCACHED +#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_NONCOHERENT +#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_CE +#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_COW +#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_CACHABLE_CUW +#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) +#endif +#ifndef _CACHE_UNCACHED_ACCELERATED +#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) +#endif + +#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED) +#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) + +#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PFN_MASK | _CACHE_MASK) + +#endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h new file mode 100644 index 0000000..ec5d338 --- /dev/null +++ b/arch/mips/include/asm/posix_types.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1996, 1997, 1998, 2000 by Ralf Baechle + */ +#ifndef _ASM_POSIX_TYPES_H +#define _ASM_POSIX_TYPES_H + +/* + * This file is generally used by user-level software, so you need to + * be a little careful about namespace pollution etc. Also, we cannot + * assume GCC is being used. + */ + +typedef unsigned int __kernel_dev_t; +typedef unsigned long __kernel_ino_t; +typedef unsigned int __kernel_mode_t; +typedef int __kernel_nlink_t; +typedef long __kernel_off_t; +typedef int __kernel_pid_t; +typedef int __kernel_ipc_pid_t; +typedef int __kernel_uid_t; +typedef int __kernel_gid_t; +#if _MIPS_SZLONG != 64 +typedef unsigned int __kernel_size_t; +typedef int __kernel_ssize_t; +typedef int __kernel_ptrdiff_t; +#else +typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; +typedef long __kernel_ptrdiff_t; +#endif +typedef long __kernel_time_t; +typedef long __kernel_suseconds_t; +typedef long __kernel_clock_t; +typedef long __kernel_daddr_t; +typedef char * __kernel_caddr_t; + +typedef unsigned short __kernel_uid16_t; +typedef unsigned short __kernel_gid16_t; +typedef int __kernel_uid32_t; +typedef int __kernel_gid32_t; +typedef __kernel_uid_t __kernel_old_uid_t; +typedef __kernel_gid_t __kernel_old_gid_t; + +#ifdef __GNUC__ +typedef long long __kernel_loff_t; +#endif + +typedef struct { + long val[2]; +} __kernel_fsid_t; + +#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) + +#undef __FD_SET +static __inline__ void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] |= (1UL<<__rem); +} + +#undef __FD_CLR +static __inline__ void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem); +} + +#undef __FD_ISSET +static __inline__ int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p) +{ + unsigned long __tmp = __fd / __NFDBITS; + unsigned long __rem = __fd % __NFDBITS; + return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0; +} + +/* + * This will unroll the loop for the normal constant case (8 ints, + * for a 256-bit fd_set) + */ +#undef __FD_ZERO +static __inline__ void __FD_ZERO(__kernel_fd_set *__p) +{ + unsigned long *__tmp = __p->fds_bits; + int __i; + + if (__builtin_constant_p(__FDSET_LONGS)) { + switch (__FDSET_LONGS) { + case 16: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + __tmp[ 8] = 0; __tmp[ 9] = 0; + __tmp[10] = 0; __tmp[11] = 0; + __tmp[12] = 0; __tmp[13] = 0; + __tmp[14] = 0; __tmp[15] = 0; + return; + + case 8: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + __tmp[ 4] = 0; __tmp[ 5] = 0; + __tmp[ 6] = 0; __tmp[ 7] = 0; + return; + + case 4: + __tmp[ 0] = 0; __tmp[ 1] = 0; + __tmp[ 2] = 0; __tmp[ 3] = 0; + return; + } + } + __i = __FDSET_LONGS; + while (__i) { + __i--; + *__tmp = 0; + __tmp++; + } +} + +#endif /* defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2) */ + +#endif /* _ASM_POSIX_TYPES_H */ diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h new file mode 100644 index 0000000..39a4f43 --- /dev/null +++ b/arch/mips/include/asm/processor.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994 Waldorf GMBH + * Copyright (C) 1995, 1996, 1997, 1998, 1999, 2001, 2002, 2003 Ralf Baechle + * Copyright (C) 1996 Paul M. Antoine + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_PROCESSOR_H +#define _ASM_PROCESSOR_H + +#include + +#include +#include +#include +#include + +/* + * Return current * instruction pointer ("program counter"). + */ +#define current_text_addr() ({ __label__ _l; _l: &&_l;}) + +/* + * System setup and hardware flags.. + */ +extern void (*cpu_wait)(void); + +extern unsigned int vced_count, vcei_count; + +#define NUM_FPU_REGS 32 + +typedef __u64 fpureg_t; + +/* + * It would be nice to add some more fields for emulator statistics, but there + * are a number of fixed offsets in offset.h and elsewhere that would have to + * be recalculated by hand. So the additional information will be private to + * the FPU emulator for now. See asm-mips/fpu_emulator.h. + */ + +struct mips_fpu_struct { + fpureg_t fpr[NUM_FPU_REGS]; + unsigned int fcr31; +}; + +#define NUM_DSP_REGS 6 + +typedef __u32 dspreg_t; + +struct mips_dsp_state { + dspreg_t dspr[NUM_DSP_REGS]; + unsigned int dspcontrol; +}; + +typedef struct { + unsigned long seg; +} mm_segment_t; + +#define ARCH_MIN_TASKALIGN 8 + +struct mips_abi; + +/* + * If you change thread_struct remember to change the #defines below too! + */ +struct thread_struct { + /* Saved main processor registers. */ + unsigned long reg16; + unsigned long reg17, reg18, reg19, reg20, reg21, reg22, reg23; + unsigned long reg29, reg30, reg31; + + /* Saved cp0 stuff. */ + unsigned long cp0_status; + + /* Saved fpu/fpu emulator stuff. */ + struct mips_fpu_struct fpu; +#ifdef CONFIG_MIPS_MT_FPAFF + /* Emulated instruction count */ + unsigned long emulated_fp; + /* Saved per-thread scheduler affinity mask */ + cpumask_t user_cpus_allowed; +#endif /* CONFIG_MIPS_MT_FPAFF */ + + /* Saved state of the DSP ASE, if available. */ + struct mips_dsp_state dsp; + + /* Other stuff associated with the thread. */ + unsigned long cp0_badvaddr; /* Last user fault */ + unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */ + unsigned long error_code; + unsigned long trap_no; + unsigned long irix_trampoline; /* Wheee... */ + unsigned long irix_oldctx; + struct mips_abi *abi; +}; + +struct task_struct; + +/* Free all resources held by a thread. */ +#define release_thread(thread) do { } while(0) + +/* Prepare to copy thread state - unlazy all lazy status */ +#define prepare_to_copy(tsk) do { } while (0) + +#define cpu_relax() barrier() + +/* + * Return_address is a replacement for __builtin_return_address(count) + * which on certain architectures cannot reasonably be implemented in GCC + * (MIPS, Alpha) or is unuseable with -fomit-frame-pointer (i386). + * Note that __builtin_return_address(x>=1) is forbidden because GCC + * aborts compilation on some CPUs. It's simply not possible to unwind + * some CPU's stackframes. + * + * __builtin_return_address works only for non-leaf functions. We avoid the + * overhead of a function call by forcing the compiler to save the return + * address register on the stack. + */ +#define return_address() ({__asm__ __volatile__("":::"$31");__builtin_return_address(0);}) + +#ifdef CONFIG_CPU_HAS_PREFETCH + +#define ARCH_HAS_PREFETCH + +static inline void prefetch(const void *addr) +{ + __asm__ __volatile__( + " .set mips4 \n" + " pref %0, (%1) \n" + " .set mips0 \n" + : + : "i" (Pref_Load), "r" (addr)); +} + +#endif + +#endif /* _ASM_PROCESSOR_H */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h new file mode 100644 index 0000000..cb88d6d --- /dev/null +++ b/arch/mips/include/asm/ptrace.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000 by Ralf Baechle + * Copyright (C) 1999, 2000 Silicon Graphics, Inc. + */ +#ifndef _ASM_PTRACE_H +#define _ASM_PTRACE_H + +#include +#include +#include + +/* + * This struct defines the way the registers are stored on the stack during a + * system call/exception. As usual the registers k0/k1 aren't being saved. + * + * If you add a register here, also add it to regoffset_table[] in + * arch/mips/kernel/ptrace.c. + */ +struct pt_regs { +#ifdef CONFIG_32BIT + /* Pad bytes for argument save space on the stack. */ + unsigned long pad0[8]; +#endif + + /* Saved main processor registers. */ + unsigned long regs[32]; + + /* Saved special registers. */ + unsigned long cp0_status; + unsigned long hi; + unsigned long lo; +#ifdef CONFIG_CPU_HAS_SMARTMIPS + unsigned long acx; +#endif + unsigned long cp0_badvaddr; + unsigned long cp0_cause; + unsigned long cp0_epc; +#ifdef CONFIG_CPU_CAVIUM_OCTEON + unsigned long long mpl[6]; /* MTM{0-5} */ + unsigned long long mtp[6]; /* MTP{0-5} */ +#endif + unsigned long __last[0]; +} __aligned(8); + +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->regs[31]; +} + +/* + * Don't use asm-generic/ptrace.h it defines FP accessors that don't make + * sense on MIPS. We rather want an error if they get invoked. + */ + +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->cp0_epc = val; +} + +/* Query offset/name of register from its name/offset */ +extern int regs_query_register_offset(const char *name); +#define MAX_REG_OFFSET (offsetof(struct pt_regs, __last)) + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten. + * @offset: offset number of the register. + * + * regs_get_register returns the value of a register. The @offset is the + * offset of the register in struct pt_regs address which specified by @regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +/* + * Does the process account for user or for system time? + */ +#define user_mode(regs) (((regs)->cp0_status & KU_MASK) == KU_USER) + +#define instruction_pointer(regs) ((regs)->cp0_epc) +#define profile_pc(regs) instruction_pointer(regs) + +/* Helpers for working with the user stack pointer */ + +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->regs[29]; +} + +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->regs[29] = val; +} + +#endif /* _ASM_PTRACE_H */ diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h new file mode 100644 index 0000000..e75adcf --- /dev/null +++ b/arch/mips/include/asm/reboot.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1997, 1999, 2001, 06 by Ralf Baechle + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef _ASM_REBOOT_H +#define _ASM_REBOOT_H + +extern void _machine_restart(void); + +#endif /* _ASM_REBOOT_H */ diff --git a/arch/mips/include/asm/reg.h b/arch/mips/include/asm/reg.h new file mode 100644 index 0000000..3ae5619 --- /dev/null +++ b/arch/mips/include/asm/reg.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Various register offset definitions for debuggers, core file + * examiners and whatnot. + * + * Copyright (C) 1995, 1999 by Ralf Baechle + * Copyright (C) 1995, 1999 Silicon Graphics + */ +#ifndef __ASM_MIPS_REG_H +#define __ASM_MIPS_REG_H + +#if defined(CONFIG_32BIT) || defined(WANT_COMPAT_REG_H) + +#define EF_R0 6 +#define EF_R1 7 +#define EF_R2 8 +#define EF_R3 9 +#define EF_R4 10 +#define EF_R5 11 +#define EF_R6 12 +#define EF_R7 13 +#define EF_R8 14 +#define EF_R9 15 +#define EF_R10 16 +#define EF_R11 17 +#define EF_R12 18 +#define EF_R13 19 +#define EF_R14 20 +#define EF_R15 21 +#define EF_R16 22 +#define EF_R17 23 +#define EF_R18 24 +#define EF_R19 25 +#define EF_R20 26 +#define EF_R21 27 +#define EF_R22 28 +#define EF_R23 29 +#define EF_R24 30 +#define EF_R25 31 + +/* + * k0/k1 unsaved + */ +#define EF_R26 32 +#define EF_R27 33 + +#define EF_R28 34 +#define EF_R29 35 +#define EF_R30 36 +#define EF_R31 37 + +/* + * Saved special registers + */ +#define EF_LO 38 +#define EF_HI 39 + +#define EF_CP0_EPC 40 +#define EF_CP0_BADVADDR 41 +#define EF_CP0_STATUS 42 +#define EF_CP0_CAUSE 43 +#define EF_UNUSED0 44 + +#define EF_SIZE 180 + +#endif + +#ifdef CONFIG_64BIT + +#define EF_R0 0 +#define EF_R1 1 +#define EF_R2 2 +#define EF_R3 3 +#define EF_R4 4 +#define EF_R5 5 +#define EF_R6 6 +#define EF_R7 7 +#define EF_R8 8 +#define EF_R9 9 +#define EF_R10 10 +#define EF_R11 11 +#define EF_R12 12 +#define EF_R13 13 +#define EF_R14 14 +#define EF_R15 15 +#define EF_R16 16 +#define EF_R17 17 +#define EF_R18 18 +#define EF_R19 19 +#define EF_R20 20 +#define EF_R21 21 +#define EF_R22 22 +#define EF_R23 23 +#define EF_R24 24 +#define EF_R25 25 + +/* + * k0/k1 unsaved + */ +#define EF_R26 26 +#define EF_R27 27 + +#define EF_R28 28 +#define EF_R29 29 +#define EF_R30 30 +#define EF_R31 31 + +/* + * Saved special registers + */ +#define EF_LO 32 +#define EF_HI 33 + +#define EF_CP0_EPC 34 +#define EF_CP0_BADVADDR 35 +#define EF_CP0_STATUS 36 +#define EF_CP0_CAUSE 37 + +#define EF_SIZE 304 /* size in bytes */ + +#endif /* CONFIG_64BIT */ + +#endif /* __ASM_MIPS_REG_H */ diff --git a/arch/mips/include/asm/regdef.h b/arch/mips/include/asm/regdef.h new file mode 100644 index 0000000..e713749 --- /dev/null +++ b/arch/mips/include/asm/regdef.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1985 MIPS Computer Systems, Inc. + * Copyright (C) 1994, 95, 99, 2003 by Ralf Baechle + * Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc. + * Copyright (C) 2011 Wind River Systems, + * written by Ralf Baechle + */ +#ifndef _ASM_REGDEF_H +#define _ASM_REGDEF_H + +#include + +#if _MIPS_SIM == _MIPS_SIM_ABI32 + +/* + * Symbolic register names for 32 bit ABI + */ +#define zero $0 /* wired zero */ +#define AT $1 /* assembler temp - uppercase because of ".set at" */ +#define v0 $2 /* return value */ +#define v1 $3 +#define a0 $4 /* argument registers */ +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define t0 $8 /* caller saved */ +#define t1 $9 +#define t2 $10 +#define t3 $11 +#define t4 $12 +#define ta0 $12 +#define t5 $13 +#define ta1 $13 +#define t6 $14 +#define ta2 $14 +#define t7 $15 +#define ta3 $15 +#define s0 $16 /* callee saved */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define t8 $24 /* caller saved */ +#define t9 $25 +#define jp $25 /* PIC jump register */ +#define k0 $26 /* kernel scratch */ +#define k1 $27 +#define gp $28 /* global pointer */ +#define sp $29 /* stack pointer */ +#define fp $30 /* frame pointer */ +#define s8 $30 /* same like fp! */ +#define ra $31 /* return address */ + +#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ + +#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 + +#define zero $0 /* wired zero */ +#define AT $at /* assembler temp - uppercase because of ".set at" */ +#define v0 $2 /* return value - caller saved */ +#define v1 $3 +#define a0 $4 /* argument registers */ +#define a1 $5 +#define a2 $6 +#define a3 $7 +#define a4 $8 /* arg reg 64 bit; caller saved in 32 bit */ +#define ta0 $8 +#define a5 $9 +#define ta1 $9 +#define a6 $10 +#define ta2 $10 +#define a7 $11 +#define ta3 $11 +#define t0 $12 /* caller saved */ +#define t1 $13 +#define t2 $14 +#define t3 $15 +#define s0 $16 /* callee saved */ +#define s1 $17 +#define s2 $18 +#define s3 $19 +#define s4 $20 +#define s5 $21 +#define s6 $22 +#define s7 $23 +#define t8 $24 /* caller saved */ +#define t9 $25 /* callee address for PIC/temp */ +#define jp $25 /* PIC jump register */ +#define k0 $26 /* kernel temporary */ +#define k1 $27 +#define gp $28 /* global pointer - caller saved for PIC */ +#define sp $29 /* stack pointer */ +#define fp $30 /* frame pointer */ +#define s8 $30 /* callee saved */ +#define ra $31 /* return address */ + +#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ + +#endif /* _ASM_REGDEF_H */ diff --git a/arch/mips/include/asm/relocs.h b/arch/mips/include/asm/relocs.h new file mode 100644 index 0000000..0987c4b --- /dev/null +++ b/arch/mips/include/asm/relocs.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * MIPS Relocations + * + * Copyright (c) 2017 Imagination Technologies Ltd. + */ + +#ifndef __ASM_MIPS_RELOCS_H__ +#define __ASM_MIPS_RELOCS_H__ + +#define R_MIPS_NONE 0 +#define R_MIPS_32 2 +#define R_MIPS_26 4 +#define R_MIPS_HI16 5 +#define R_MIPS_LO16 6 +#define R_MIPS_PC16 10 +#define R_MIPS_64 18 +#define R_MIPS_HIGHER 28 +#define R_MIPS_HIGHEST 29 +#define R_MIPS_PC21_S2 60 +#define R_MIPS_PC26_S2 61 + +#endif /* __ASM_MIPS_RELOCS_H__ */ diff --git a/arch/mips/include/asm/sections.h b/arch/mips/include/asm/sections.h new file mode 100644 index 0000000..93c30e9 --- /dev/null +++ b/arch/mips/include/asm/sections.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2012 The Chromium OS Authors. + */ + +#ifndef __ASM_MIPS_SECTIONS_H +#define __ASM_MIPS_SECTIONS_H + +#include + +/** + * __rel_start: Relocation data generated by the mips-relocs tool + * + * See arch/mips/lib/reloc.c for details on the format & use of this data. + */ +extern uint8_t __rel_start[]; + +#endif diff --git a/arch/mips/include/asm/sgidefs.h b/arch/mips/include/asm/sgidefs.h new file mode 100644 index 0000000..1c7a6c2 --- /dev/null +++ b/arch/mips/include/asm/sgidefs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1996, 1999, 2001 Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2001 MIPS Technologies, Inc. + */ +#ifndef __ASM_SGIDEFS_H +#define __ASM_SGIDEFS_H + +/* + * Using a Linux compiler for building Linux seems logic but not to + * everybody. + */ +#if 0 /* ndef __linux__ */ +#error Use a Linux compiler or give up. +#endif + +/* + * Definitions for the ISA levels + * + * With the introduction of MIPS32 / MIPS64 instruction sets definitions + * MIPS ISAs are no longer subsets of each other. Therefore comparisons + * on these symbols except with == may result in unexpected results and + * are forbidden! + */ +#define _MIPS_ISA_MIPS1 1 +#define _MIPS_ISA_MIPS2 2 +#define _MIPS_ISA_MIPS3 3 +#define _MIPS_ISA_MIPS4 4 +#define _MIPS_ISA_MIPS5 5 +#define _MIPS_ISA_MIPS32 6 +#define _MIPS_ISA_MIPS64 7 + +/* + * Subprogram calling convention + */ +#define _MIPS_SIM_ABI32 1 +#define _MIPS_SIM_NABI32 2 +#define _MIPS_SIM_ABI64 3 + +#endif /* __ASM_SGIDEFS_H */ diff --git a/arch/mips/include/asm/spl.h b/arch/mips/include/asm/spl.h new file mode 100644 index 0000000..0a847ed --- /dev/null +++ b/arch/mips/include/asm/spl.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2012 + * Texas Instruments, + */ +#ifndef _ASM_SPL_H_ +#define _ASM_SPL_H_ + +enum { + BOOT_DEVICE_RAM, + BOOT_DEVICE_MMC1, + BOOT_DEVICE_MMC2, + BOOT_DEVICE_MMC2_2, + BOOT_DEVICE_NAND, + BOOT_DEVICE_ONENAND, + BOOT_DEVICE_NOR, + BOOT_DEVICE_UART, + BOOT_DEVICE_SPI, + BOOT_DEVICE_USB, + BOOT_DEVICE_SATA, + BOOT_DEVICE_I2C, + BOOT_DEVICE_BOARD, + BOOT_DEVICE_DFU, + BOOT_DEVICE_XIP, + BOOT_DEVICE_BOOTROM, + BOOT_DEVICE_NONE +}; + +#ifndef CONFIG_DM +extern gd_t gdata; +#endif + +#endif diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h new file mode 100644 index 0000000..faceaf9 --- /dev/null +++ b/arch/mips/include/asm/string.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 1994, 95, 96, 97, 98, 2000, 01 Ralf Baechle + * Copyright (c) 2000 by Silicon Graphics, Inc. + * Copyright (c) 2001 MIPS Technologies, Inc. + */ +#ifndef _ASM_STRING_H +#define _ASM_STRING_H + +/* + * We don't do inline string functions, since the + * optimised inline asm versions are not small. + */ + +#undef __HAVE_ARCH_STRCPY +extern char *strcpy(char *__dest, __const__ char *__src); + +#undef __HAVE_ARCH_STRNCPY +extern char *strncpy(char *__dest, __const__ char *__src, __kernel_size_t __n); + +#undef __HAVE_ARCH_STRCMP +extern int strcmp(__const__ char *__cs, __const__ char *__ct); + +#undef __HAVE_ARCH_STRNCMP +extern int strncmp(__const__ char *__cs, __const__ char *__ct, __kernel_size_t __count); + +#undef __HAVE_ARCH_MEMSET +extern void *memset(void *__s, int __c, __kernel_size_t __count); + +#undef __HAVE_ARCH_MEMCPY +extern void *memcpy(void *__to, __const__ void *__from, __kernel_size_t __n); + +#undef __HAVE_ARCH_MEMMOVE +extern void *memmove(void *__dest, __const__ void *__src, __kernel_size_t __n); + +#endif /* _ASM_STRING_H */ diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h new file mode 100644 index 0000000..79e6388 --- /dev/null +++ b/arch/mips/include/asm/system.h @@ -0,0 +1,285 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994 - 1999 by Ralf Baechle + * Copyright (C) 1996 by Paul M. Antoine + * Copyright (C) 1994 - 1999 by Ralf Baechle + * + * Changed set_except_vector declaration to allow return of previous + * vector address value - necessary for "borrowing" vectors. + * + * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com + * Copyright (C) 2000 MIPS Technologies, Inc. + */ +#ifndef _ASM_SYSTEM_H +#define _ASM_SYSTEM_H + +#include +#include +#include +#include +#if 0 +#include +#endif + +static __inline__ void +__sti(void) +{ + __asm__ __volatile__( + ".set\tpush\n\t" + ".set\treorder\n\t" + ".set\tnoat\n\t" + "mfc0\t$1,$12\n\t" + "ori\t$1,0x1f\n\t" + "xori\t$1,0x1e\n\t" + "mtc0\t$1,$12\n\t" + ".set\tpop\n\t" + : /* no outputs */ + : /* no inputs */ + : "$1", "memory"); +} + +/* + * For cli() we have to insert nops to make shure that the new value + * has actually arrived in the status register before the end of this + * macro. + * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs + * no nops at all. + */ +static __inline__ void +__cli(void) +{ + __asm__ __volatile__( + ".set\tpush\n\t" + ".set\treorder\n\t" + ".set\tnoat\n\t" + "mfc0\t$1,$12\n\t" + "ori\t$1,1\n\t" + "xori\t$1,1\n\t" + ".set\tnoreorder\n\t" + "mtc0\t$1,$12\n\t" + "nop\n\t" + "nop\n\t" + "nop\n\t" + ".set\tpop\n\t" + : /* no outputs */ + : /* no inputs */ + : "$1", "memory"); +} + +#define __save_flags(x) \ +__asm__ __volatile__( \ + ".set\tpush\n\t" \ + ".set\treorder\n\t" \ + "mfc0\t%0,$12\n\t" \ + ".set\tpop\n\t" \ + : "=r" (x)) + +#define __save_and_cli(x) \ +__asm__ __volatile__( \ + ".set\tpush\n\t" \ + ".set\treorder\n\t" \ + ".set\tnoat\n\t" \ + "mfc0\t%0,$12\n\t" \ + "ori\t$1,%0,1\n\t" \ + "xori\t$1,1\n\t" \ + ".set\tnoreorder\n\t" \ + "mtc0\t$1,$12\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".set\tpop\n\t" \ + : "=r" (x) \ + : /* no inputs */ \ + : "$1", "memory") + +#define __restore_flags(flags) \ +do { \ + unsigned long __tmp1; \ + \ + __asm__ __volatile__( \ + ".set\tnoreorder\t\t\t# __restore_flags\n\t" \ + ".set\tnoat\n\t" \ + "mfc0\t$1, $12\n\t" \ + "andi\t%0, 1\n\t" \ + "ori\t$1, 1\n\t" \ + "xori\t$1, 1\n\t" \ + "or\t%0, $1\n\t" \ + "mtc0\t%0, $12\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + "nop\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "=r" (__tmp1) \ + : "0" (flags) \ + : "$1", "memory"); \ +} while(0) + +#ifdef CONFIG_SMP + +extern void __global_sti(void); +extern void __global_cli(void); +extern unsigned long __global_save_flags(void); +extern void __global_restore_flags(unsigned long); +# define sti() __global_sti() +# define cli() __global_cli() +# define save_flags(x) do { x = __global_save_flags(); } while (0) +# define restore_flags(x) __global_restore_flags(x) +# define save_and_cli(x) do { save_flags(x); cli(); } while(0) + +#else /* Single processor */ + +# define sti() __sti() +# define cli() __cli() +# define save_flags(x) __save_flags(x) +# define save_and_cli(x) __save_and_cli(x) +# define restore_flags(x) __restore_flags(x) + +#endif /* SMP */ + +/* For spinlocks etc */ +#define local_irq_save(x) __save_and_cli(x); +#define local_irq_restore(x) __restore_flags(x); +#define local_irq_disable() __cli(); +#define local_irq_enable() __sti(); + +/* + * These are probably defined overly paranoid ... + */ +#ifdef CONFIG_CPU_HAS_WB + +#include +#define rmb() do { } while(0) +#define wmb() wbflush() +#define mb() wbflush() + +#else /* CONFIG_CPU_HAS_WB */ + +#define mb() \ +__asm__ __volatile__( \ + "# prevent instructions being moved around\n\t" \ + ".set\tnoreorder\n\t" \ + "# 8 nops to fool the R4400 pipeline\n\t" \ + "nop;nop;nop;nop;nop;nop;nop;nop\n\t" \ + ".set\treorder" \ + : /* no output */ \ + : /* no input */ \ + : "memory") +#define rmb() mb() +#define wmb() mb() + +#endif /* CONFIG_CPU_HAS_WB */ + +#ifdef CONFIG_SMP +#define smp_mb() mb() +#define smp_rmb() rmb() +#define smp_wmb() wmb() +#else +#define smp_mb() barrier() +#define smp_rmb() barrier() +#define smp_wmb() barrier() +#endif + +#define set_mb(var, value) \ +do { var = value; mb(); } while (0) + +#define set_wmb(var, value) \ +do { var = value; wmb(); } while (0) + +#if !defined (_LANGUAGE_ASSEMBLY) +/* + * switch_to(n) should switch tasks to task nr n, first + * checking that n isn't the current task, in which case it does nothing. + */ +#if 0 +extern asmlinkage void *resume(void *last, void *next); +#endif +#endif /* !defined (_LANGUAGE_ASSEMBLY) */ + +#define prepare_to_switch() do { } while(0) +#define switch_to(prev,next,last) \ +do { \ + (last) = resume(prev, next); \ +} while(0) + +/* + * For 32 and 64 bit operands we can take advantage of ll and sc. + * FIXME: This doesn't work for R3000 machines. + */ +static __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val) +{ +#ifdef CONFIG_CPU_HAS_LLSC + unsigned long dummy; + + __asm__ __volatile__( + ".set\tnoreorder\t\t\t# xchg_u32\n\t" + ".set\tnoat\n\t" + "ll\t%0, %3\n" + "1:\tmove\t$1, %2\n\t" + "sc\t$1, %1\n\t" + "beqzl\t$1, 1b\n\t" + " ll\t%0, %3\n\t" + ".set\tat\n\t" + ".set\treorder" + : "=r" (val), "=o" (*m), "=r" (dummy) + : "o" (*m), "2" (val) + : "memory"); + + return val; +#else + unsigned long flags, retval; + + save_flags(flags); + cli(); + retval = *m; + *m = val; + restore_flags(flags); + return retval; +#endif /* Processor-dependent optimization */ +} + +#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) +#define tas(ptr) (xchg((ptr),1)) + +static __inline__ unsigned long +__xchg(unsigned long x, volatile void * ptr, int size) +{ + switch (size) { + case 4: + return xchg_u32(ptr, x); + } + return x; +} + +extern void *set_except_vector(int n, void *addr); + +extern void __die(const char *, struct pt_regs *, const char *where, + unsigned long line) __attribute__((noreturn)); +extern void __die_if_kernel(const char *, struct pt_regs *, const char *where, + unsigned long line); + +#define die(msg, regs) \ + __die(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__) +#define die_if_kernel(msg, regs) \ + __die_if_kernel(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__) + +static inline void execution_hazard_barrier(void) +{ + __asm__ __volatile__( + ".set noreorder\n" + "ehb\n" + ".set reorder"); +} + +static inline void instruction_hazard_barrier(void) +{ + unsigned long tmp; + + asm volatile( + __stringify(PTR_LA) "\t%0, 1f\n" + " jr.hb %0\n" + "1: .insn" + : "=&r"(tmp)); +} + +#endif /* _ASM_SYSTEM_H */ diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h new file mode 100644 index 0000000..925d7ef --- /dev/null +++ b/arch/mips/include/asm/types.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + */ +#ifndef _ASM_TYPES_H +#define _ASM_TYPES_H + +#include + +#ifndef __ASSEMBLY__ + +typedef unsigned short umode_t; + +#endif /* __ASSEMBLY__ */ + +/* + * These aren't exported outside the kernel to avoid name space clashes + */ +#ifdef __KERNEL__ + +#define BITS_PER_LONG _MIPS_SZLONG + +#ifndef __ASSEMBLY__ + +#if (defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) \ + || defined(CONFIG_64BIT) +typedef u64 dma_addr_t; + +typedef u64 phys_addr_t; +typedef u64 phys_size_t; + +#else +typedef u32 dma_addr_t; + +typedef u32 phys_addr_t; +typedef u32 phys_size_t; + +#endif +typedef u64 dma64_addr_t; + +/* + * Don't use phys_t. You've been warned. + */ +#ifdef CONFIG_64BIT_PHYS_ADDR +typedef unsigned long long phys_t; +#else +typedef unsigned long phys_t; +#endif + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ + +#endif /* _ASM_TYPES_H */ diff --git a/arch/mips/include/asm/u-boot-mips.h b/arch/mips/include/asm/u-boot-mips.h new file mode 100644 index 0000000..88438b9 --- /dev/null +++ b/arch/mips/include/asm/u-boot-mips.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _U_BOOT_MIPS_H_ +#define _U_BOOT_MIPS_H_ + +void exc_handler(void); +void except_vec3_generic(void); +void except_vec_ejtag_debug(void); + +int arch_misc_init(void); + +#endif /* _U_BOOT_MIPS_H_ */ diff --git a/arch/mips/include/asm/u-boot.h b/arch/mips/include/asm/u-boot.h new file mode 100644 index 0000000..4a9bbaf --- /dev/null +++ b/arch/mips/include/asm/u-boot.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2003 + * Wolfgang Denk, DENX Software Engineering, + * + ******************************************************************** + * NOTE: This header file defines an interface to U-Boot. Including + * this (unmodified) header file in another file is considered normal + * use of U-Boot, and does *not* fall under the heading of "derived + * work". + ******************************************************************** + */ + +#ifndef _U_BOOT_H_ +#define _U_BOOT_H_ 1 + +/* Use the generic board which requires a unified bd_info */ +#include +#include + +/* For image.h:image_check_target_arch() */ +#define IH_ARCH_DEFAULT IH_ARCH_MIPS + +#endif /* _U_BOOT_H_ */ diff --git a/arch/mips/include/asm/unaligned.h b/arch/mips/include/asm/unaligned.h new file mode 100644 index 0000000..debb9cf --- /dev/null +++ b/arch/mips/include/asm/unaligned.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef _ASM_MIPS_UNALIGNED_H +#define _ASM_MIPS_UNALIGNED_H + +#include +#if defined(__MIPSEB__) +#define get_unaligned __get_unaligned_be +#define put_unaligned __put_unaligned_be +#elif defined(__MIPSEL__) +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le +#else +#error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???" +#endif + +#include +#include +#include + +#endif /* _ASM_MIPS_UNALIGNED_H */ -- cgit v1.2.3