summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPatrick Georgi <patrick@georgi-clan.de>2015-03-07 10:57:25 +0100
committerPatrick Georgi <pgeorgi@google.com>2015-03-08 13:56:08 +0100
commitf0bbc95f12c99ac956c9d8a85bac38db4ad6bcb4 (patch)
tree12ee9ef9acfb5307b068396d9a0de0db3aae4dff
parent53c388fe6dfb4fc4ffcee6c58345d353c6ec33bf (diff)
downloadcoreboot-f0bbc95f12c99ac956c9d8a85bac38db4ad6bcb4.tar.xz
crossgcc: Add RISC-V support
Change-Id: If1e0f7ed21f67d7a185dad251ede81ddbc18c4e5 Signed-off-by: Patrick Georgi <patrick@georgi-clan.de> Reviewed-on: http://review.coreboot.org/8629 Tested-by: build bot (Jenkins) Reviewed-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
-rw-r--r--Makefile.inc14
-rw-r--r--util/crossgcc/Makefile9
-rwxr-xr-xutil/crossgcc/buildgcc1
-rw-r--r--util/crossgcc/patches/binutils-2.25_riscv.patch9755
-rw-r--r--util/crossgcc/patches/gcc-4.9.2_riscv.patch11296
5 files changed, 21070 insertions, 5 deletions
diff --git a/Makefile.inc b/Makefile.inc
index 89a05534d6..656d988668 100644
--- a/Makefile.inc
+++ b/Makefile.inc
@@ -424,9 +424,9 @@ gitconfig:
git config remote.origin.push HEAD:refs/for/master
(git config --global user.name >/dev/null && git config --global user.email >/dev/null) || (printf 'Please configure your name and email in git:\n\n git config --global user.name "Your Name Comes Here"\n git config --global user.email your.email@example.com\n'; exit 1)
-crossgcc: crossgcc-i386 crossgcc-arm crossgcc-aarch64 crossgcc-mips
+crossgcc: crossgcc-i386 crossgcc-arm crossgcc-aarch64 crossgcc-mips crossgcc-riscv
-.PHONY: crossgcc-i386 crossgcc-arm crossgcc-aarch64 crossgcc-mips
+.PHONY: crossgcc-i386 crossgcc-arm crossgcc-aarch64 crossgcc-mips crossgcc-riscv
crossgcc-i386: clean-for-update
$(MAKE) -C util/crossgcc build-i386-without-gdb
@@ -439,9 +439,12 @@ crossgcc-aarch64: clean-for-update
crossgcc-mips: clean-for-update
$(MAKE) -C util/crossgcc build-mips-without-gdb
-crosstools: crosstools-i386 crosstools-arm crosstools-aarch64 crosstools-mips
+crossgcc-riscv: clean-for-update
+ $(MAKE) -C util/crossgcc build-riscv-without-gdb
-.PHONY: crosstools-i386 crosstools-arm crosstools-aarch64 crosstools-mips
+crosstools: crosstools-i386 crosstools-arm crosstools-aarch64 crosstools-mips crosstools-riscv
+
+.PHONY: crosstools-i386 crosstools-arm crosstools-aarch64 crosstools-mips crosstools-riscv
crosstools-i386: clean-for-update
$(MAKE) -C util/crossgcc build-i386
@@ -454,6 +457,9 @@ crosstools-aarch64: clean-for-update
crosstools-mips: clean-for-update
$(MAKE) -C util/crossgcc build-mips
+crosstools-riscv: clean-for-update
+ $(MAKE) -C util/crossgcc build-riscv
+
crossgcc-clean: clean-for-update
$(MAKE) -C util/crossgcc clean
diff --git a/util/crossgcc/Makefile b/util/crossgcc/Makefile
index 0785b65cca..4c3e19e43f 100644
--- a/util/crossgcc/Makefile
+++ b/util/crossgcc/Makefile
@@ -1,4 +1,4 @@
-all: build-i386 build-armv7a build-aarch64 build-mips
+all: build-i386 build-armv7a build-aarch64 build-mips build-riscv
build-i386:
bash ./buildgcc -G -p i386-elf
@@ -12,6 +12,9 @@ build-aarch64:
build-mips:
bash ./buildgcc -G -p mips-elf
+build-mips:
+ bash ./buildgcc -G -p riscv-elf
+
.PHONY: build-i386-without-gdb
build-i386-without-gdb:
bash ./buildgcc -p i386-elf
@@ -28,6 +31,10 @@ build-aarch64-without-gdb:
build-mips-without-gdb:
bash ./buildgcc -p mips-elf
+.PHONY: build-riscv-without-gdb
+build-riscv-without-gdb:
+ bash ./buildgcc -p riscv-elf
+
clean:
rm -rf xgcc
diff --git a/util/crossgcc/buildgcc b/util/crossgcc/buildgcc
index e9936dbaae..8ddeea0f46 100755
--- a/util/crossgcc/buildgcc
+++ b/util/crossgcc/buildgcc
@@ -230,6 +230,7 @@ case "$TARGETARCH" in
i386-elf) ;;
i386-mingw32) ;;
mipsel-elf) ;;
+ riscv-elf) ;;
i386*) TARGETARCH=i386-elf;;
arm*) TARGETARCH=armv7-a-eabi;;
aarch64*) TARGETARCH=aarch64-elf;;
diff --git a/util/crossgcc/patches/binutils-2.25_riscv.patch b/util/crossgcc/patches/binutils-2.25_riscv.patch
new file mode 100644
index 0000000000..66d42e4ab4
--- /dev/null
+++ b/util/crossgcc/patches/binutils-2.25_riscv.patch
@@ -0,0 +1,9755 @@
+Created from https://github.com/riscv/riscv-gnu-toolchain,
+commit ddce5d17f14831f4957e57c415aca77817c2a82c
+
+diff -urN original-binutils/bfd/archures.c binutils/bfd/archures.c
+--- original-binutils/bfd/archures.c 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/bfd/archures.c 2015-03-07 09:55:02.355135671 +0100
+@@ -597,6 +597,7 @@
+ extern const bfd_arch_info_type bfd_plugin_arch;
+ extern const bfd_arch_info_type bfd_powerpc_archs[];
+ #define bfd_powerpc_arch bfd_powerpc_archs[0]
++extern const bfd_arch_info_type bfd_riscv_arch;
+ extern const bfd_arch_info_type bfd_rs6000_arch;
+ extern const bfd_arch_info_type bfd_rl78_arch;
+ extern const bfd_arch_info_type bfd_rx_arch;
+@@ -683,6 +684,7 @@
+ &bfd_or1k_arch,
+ &bfd_pdp11_arch,
+ &bfd_powerpc_arch,
++ &bfd_riscv_arch,
+ &bfd_rs6000_arch,
+ &bfd_rl78_arch,
+ &bfd_rx_arch,
+diff -urN original-binutils/bfd/bfd-in2.h binutils/bfd/bfd-in2.h
+--- original-binutils/bfd/bfd-in2.h 2014-11-04 10:54:41.000000000 +0100
++++ binutils-2.25/bfd/bfd-in2.h 2015-03-07 09:55:02.359135671 +0100
+@@ -2043,6 +2043,9 @@
+ #define bfd_mach_ppc_e6500 5007
+ #define bfd_mach_ppc_titan 83
+ #define bfd_mach_ppc_vle 84
++ bfd_arch_riscv, /* RISC-V */
++#define bfd_mach_riscv32 132
++#define bfd_mach_riscv64 164
+ bfd_arch_rs6000, /* IBM RS/6000 */
+ #define bfd_mach_rs6k 6000
+ #define bfd_mach_rs6k_rs1 6001
+@@ -5531,6 +5534,41 @@
+ value in a word. The relocation is relative offset from */
+ BFD_RELOC_MICROBLAZE_32_GOTOFF,
+
++/* RISC-V relocations */
++ BFD_RELOC_RISCV_HI20,
++ BFD_RELOC_RISCV_PCREL_HI20,
++ BFD_RELOC_RISCV_PCREL_LO12_I,
++ BFD_RELOC_RISCV_PCREL_LO12_S,
++ BFD_RELOC_RISCV_LO12_I,
++ BFD_RELOC_RISCV_LO12_S,
++ BFD_RELOC_RISCV_GPREL12_I,
++ BFD_RELOC_RISCV_GPREL12_S,
++ BFD_RELOC_RISCV_TPREL_HI20,
++ BFD_RELOC_RISCV_TPREL_LO12_I,
++ BFD_RELOC_RISCV_TPREL_LO12_S,
++ BFD_RELOC_RISCV_TPREL_ADD,
++ BFD_RELOC_RISCV_CALL,
++ BFD_RELOC_RISCV_CALL_PLT,
++ BFD_RELOC_RISCV_ADD8,
++ BFD_RELOC_RISCV_ADD16,
++ BFD_RELOC_RISCV_ADD32,
++ BFD_RELOC_RISCV_ADD64,
++ BFD_RELOC_RISCV_SUB8,
++ BFD_RELOC_RISCV_SUB16,
++ BFD_RELOC_RISCV_SUB32,
++ BFD_RELOC_RISCV_SUB64,
++ BFD_RELOC_RISCV_GOT_HI20,
++ BFD_RELOC_RISCV_TLS_GOT_HI20,
++ BFD_RELOC_RISCV_TLS_GD_HI20,
++ BFD_RELOC_RISCV_JMP,
++ BFD_RELOC_RISCV_TLS_DTPMOD32,
++ BFD_RELOC_RISCV_TLS_DTPREL32,
++ BFD_RELOC_RISCV_TLS_DTPMOD64,
++ BFD_RELOC_RISCV_TLS_DTPREL64,
++ BFD_RELOC_RISCV_TLS_TPREL32,
++ BFD_RELOC_RISCV_TLS_TPREL64,
++ BFD_RELOC_RISCV_ALIGN,
++
+ /* This is used to tell the dynamic linker to copy the value out of
+ the dynamic object into the runtime process image. */
+ BFD_RELOC_MICROBLAZE_COPY,
+diff -urN original-binutils/bfd/config.bfd binutils/bfd/config.bfd
+--- original-binutils/bfd/config.bfd 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/bfd/config.bfd 2015-03-07 09:55:02.359135671 +0100
+@@ -119,6 +119,7 @@
+ pdp11*) targ_archs=bfd_pdp11_arch ;;
+ pj*) targ_archs="bfd_pj_arch bfd_i386_arch";;
+ powerpc*) targ_archs="bfd_rs6000_arch bfd_powerpc_arch" ;;
++riscv*) targ_archs=bfd_riscv_arch ;;
+ rs6000) targ_archs="bfd_rs6000_arch bfd_powerpc_arch" ;;
+ s390*) targ_archs=bfd_s390_arch ;;
+ sh*) targ_archs=bfd_sh_arch ;;
+@@ -1319,6 +1320,14 @@
+ targ_defvec=rl78_elf32_vec
+ ;;
+
++#ifdef BFD64
++ riscv*-*-*)
++ targ_defvec=riscv_elf64_vec
++ targ_selvecs="riscv_elf32_vec riscv_elf64_vec"
++ want64=true
++ ;;
++#endif
++
+ rx-*-elf)
+ targ_defvec=rx_elf32_le_vec
+ targ_selvecs="rx_elf32_be_vec rx_elf32_le_vec rx_elf32_be_ns_vec"
+diff -urN original-binutils/bfd/configure binutils/bfd/configure
+--- original-binutils/bfd/configure 2014-12-23 15:22:04.000000000 +0100
++++ binutils-2.25/bfd/configure 2015-03-07 09:55:02.367135671 +0100
+@@ -15506,6 +15506,8 @@
+ powerpc_pei_vec) tb="$tb pei-ppc.lo peigen.lo cofflink.lo" ;;
+ powerpc_pei_le_vec) tb="$tb pei-ppc.lo peigen.lo cofflink.lo" ;;
+ powerpc_xcoff_vec) tb="$tb coff-rs6000.lo xcofflink.lo" ;;
++ riscv_elf32_vec) tb="$tb elf32-riscv.lo elfxx-riscv.lo elf32.lo $elf" ;;
++ riscv_elf64_vec) tb="$tb elf64-riscv.lo elf64.lo elfxx-riscv.lo elf32.lo $elf"; target_size=64 ;;
+ rl78_elf32_vec) tb="$tb elf32-rl78.lo elf32.lo $elf" ;;
+ rs6000_xcoff64_vec) tb="$tb coff64-rs6000.lo xcofflink.lo aix5ppc-core.lo"; target_size=64 ;;
+ rs6000_xcoff64_aix_vec) tb="$tb coff64-rs6000.lo xcofflink.lo aix5ppc-core.lo"; target_size=64 ;;
+diff -urN original-binutils/bfd/configure.ac binutils/bfd/configure.ac
+--- original-binutils/bfd/configure.ac 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/bfd/configure.ac 2015-03-07 09:55:02.367135671 +0100
+@@ -907,6 +907,8 @@
+ powerpc_pei_vec) tb="$tb pei-ppc.lo peigen.lo cofflink.lo" ;;
+ powerpc_pei_le_vec) tb="$tb pei-ppc.lo peigen.lo cofflink.lo" ;;
+ powerpc_xcoff_vec) tb="$tb coff-rs6000.lo xcofflink.lo" ;;
++ riscv_elf32_vec) tb="$tb elf32-riscv.lo elfxx-riscv.lo elf32.lo $elf" ;;
++ riscv_elf64_vec) tb="$tb elf64-riscv.lo elf64.lo elfxx-riscv.lo elf32.lo $elf"; target_size=64 ;;
+ rl78_elf32_vec) tb="$tb elf32-rl78.lo elf32.lo $elf" ;;
+ rs6000_xcoff64_vec) tb="$tb coff64-rs6000.lo xcofflink.lo aix5ppc-core.lo"; target_size=64 ;;
+ rs6000_xcoff64_aix_vec) tb="$tb coff64-rs6000.lo xcofflink.lo aix5ppc-core.lo"; target_size=64 ;;
+diff -urN original-binutils/bfd/cpu-riscv.c binutils/bfd/cpu-riscv.c
+--- original-binutils/bfd/cpu-riscv.c 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/bfd/cpu-riscv.c 2015-03-07 09:51:45.655139025 +0100
+@@ -0,0 +1,80 @@
++/* BFD backend for RISC-V
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++
++static const bfd_arch_info_type *riscv_compatible
++ (const bfd_arch_info_type *, const bfd_arch_info_type *);
++
++/* The default routine tests bits_per_word, which is wrong on RISC-V, as
++ RISC-V word size doesn't correlate with reloc size. */
++
++static const bfd_arch_info_type *
++riscv_compatible (const bfd_arch_info_type *a, const bfd_arch_info_type *b)
++{
++ if (a->arch != b->arch)
++ return NULL;
++
++ /* Machine compatibility is checked in
++ _bfd_riscv_elf_merge_private_bfd_data. */
++
++ return a;
++}
++
++#define N(BITS_WORD, BITS_ADDR, NUMBER, PRINT, DEFAULT, NEXT) \
++ { \
++ BITS_WORD, /* bits in a word */ \
++ BITS_ADDR, /* bits in an address */ \
++ 8, /* 8 bits in a byte */ \
++ bfd_arch_riscv, \
++ NUMBER, \
++ "riscv", \
++ PRINT, \
++ 3, \
++ DEFAULT, \
++ riscv_compatible, \
++ bfd_default_scan, \
++ bfd_arch_default_fill, \
++ NEXT, \
++ }
++
++enum
++{
++ I_riscv64,
++ I_riscv32
++};
++
++#define NN(index) (&arch_info_struct[(index) + 1])
++
++static const bfd_arch_info_type arch_info_struct[] =
++{
++ N (64, 64, bfd_mach_riscv64, "riscv:rv64", FALSE, NN(I_riscv64)),
++ N (32, 32, bfd_mach_riscv32, "riscv:rv32", FALSE, 0)
++};
++
++/* The default architecture is riscv:rv64. */
++
++const bfd_arch_info_type bfd_riscv_arch =
++N (64, 64, 0, "riscv", TRUE, &arch_info_struct[0]);
+diff -urN original-binutils/bfd/elf-bfd.h binutils/bfd/elf-bfd.h
+--- original-binutils/bfd/elf-bfd.h 2014-12-23 09:47:10.000000000 +0100
++++ binutils-2.25/bfd/elf-bfd.h 2015-03-07 09:55:02.367135671 +0100
+@@ -433,6 +433,7 @@
+ XGATE_ELF_DATA,
+ TILEGX_ELF_DATA,
+ TILEPRO_ELF_DATA,
++ RISCV_ELF_DATA,
+ GENERIC_ELF_DATA
+ };
+
+diff -urN original-binutils/bfd/elfnn-riscv.c binutils/bfd/elfnn-riscv.c
+--- original-binutils/bfd/elfnn-riscv.c 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/bfd/elfnn-riscv.c 2015-03-07 09:51:45.655139025 +0100
+@@ -0,0 +1,2954 @@
++/* RISC-V-specific support for NN-bit ELF.
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on TILE-Gx and MIPS targets.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++
++/* This file handles RISC-V ELF targets. */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++#include "bfdlink.h"
++#include "genlink.h"
++#include "elf-bfd.h"
++#include "elfxx-riscv.h"
++#include "elf/riscv.h"
++#include "opcode/riscv.h"
++
++#define ARCH_SIZE NN
++
++#define MINUS_ONE ((bfd_vma)0 - 1)
++
++#define RISCV_ELF_LOG_WORD_BYTES (ARCH_SIZE == 32 ? 2 : 3)
++
++#define RISCV_ELF_WORD_BYTES (1 << RISCV_ELF_LOG_WORD_BYTES)
++
++/* The name of the dynamic interpreter. This is put in the .interp
++ section. */
++
++#define ELF64_DYNAMIC_INTERPRETER "/lib/ld.so.1"
++#define ELF32_DYNAMIC_INTERPRETER "/lib32/ld.so.1"
++
++/* The RISC-V linker needs to keep track of the number of relocs that it
++ decides to copy as dynamic relocs in check_relocs for each symbol.
++ This is so that it can later discard them if they are found to be
++ unnecessary. We store the information in a field extending the
++ regular ELF linker hash table. */
++
++struct riscv_elf_dyn_relocs
++{
++ struct riscv_elf_dyn_relocs *next;
++
++ /* The input section of the reloc. */
++ asection *sec;
++
++ /* Total number of relocs copied for the input section. */
++ bfd_size_type count;
++
++ /* Number of pc-relative relocs copied for the input section. */
++ bfd_size_type pc_count;
++};
++
++/* RISC-V ELF linker hash entry. */
++
++struct riscv_elf_link_hash_entry
++{
++ struct elf_link_hash_entry elf;
++
++ /* Track dynamic relocs copied for this symbol. */
++ struct riscv_elf_dyn_relocs *dyn_relocs;
++
++#define GOT_UNKNOWN 0
++#define GOT_NORMAL 1
++#define GOT_TLS_GD 2
++#define GOT_TLS_IE 4
++#define GOT_TLS_LE 8
++ char tls_type;
++};
++
++#define riscv_elf_hash_entry(ent) \
++ ((struct riscv_elf_link_hash_entry *)(ent))
++
++struct _bfd_riscv_elf_obj_tdata
++{
++ struct elf_obj_tdata root;
++
++ /* tls_type for each local got entry. */
++ char *local_got_tls_type;
++};
++
++#define _bfd_riscv_elf_tdata(abfd) \
++ ((struct _bfd_riscv_elf_obj_tdata *) (abfd)->tdata.any)
++
++#define _bfd_riscv_elf_local_got_tls_type(abfd) \
++ (_bfd_riscv_elf_tdata (abfd)->local_got_tls_type)
++
++#define _bfd_riscv_elf_tls_type(abfd, h, symndx) \
++ (*((h) != NULL ? &riscv_elf_hash_entry(h)->tls_type \
++ : &_bfd_riscv_elf_local_got_tls_type (abfd) [symndx]))
++
++#define is_riscv_elf(bfd) \
++ (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
++ && elf_tdata (bfd) != NULL \
++ && elf_object_id (bfd) == RISCV_ELF_DATA)
++
++#include "elf/common.h"
++#include "elf/internal.h"
++
++struct riscv_elf_link_hash_table
++{
++ struct elf_link_hash_table elf;
++
++ /* Short-cuts to get to dynamic linker sections. */
++ asection *sdynbss;
++ asection *srelbss;
++ asection *sdyntdata;
++
++ /* Small local sym to section mapping cache. */
++ struct sym_cache sym_cache;
++};
++
++
++/* Get the RISC-V ELF linker hash table from a link_info structure. */
++#define riscv_elf_hash_table(p) \
++ (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
++ == RISCV_ELF_DATA ? ((struct riscv_elf_link_hash_table *) ((p)->hash)) : NULL)
++
++static void
++riscv_info_to_howto_rela (bfd *abfd ATTRIBUTE_UNUSED,
++ arelent *cache_ptr,
++ Elf_Internal_Rela *dst)
++{
++ cache_ptr->howto = riscv_elf_rtype_to_howto (ELFNN_R_TYPE (dst->r_info));
++}
++
++static void
++riscv_elf_append_rela (bfd *abfd, asection *s, Elf_Internal_Rela *rel)
++{
++ const struct elf_backend_data *bed;
++ bfd_byte *loc;
++
++ bed = get_elf_backend_data (abfd);
++ loc = s->contents + (s->reloc_count++ * bed->s->sizeof_rela);
++ bed->s->swap_reloca_out (abfd, rel, loc);
++}
++
++/* PLT/GOT stuff */
++
++#define PLT_HEADER_INSNS 8
++#define PLT_ENTRY_INSNS 4
++#define PLT_HEADER_SIZE (PLT_HEADER_INSNS * 4)
++#define PLT_ENTRY_SIZE (PLT_ENTRY_INSNS * 4)
++
++#define GOT_ENTRY_SIZE RISCV_ELF_WORD_BYTES
++
++#define GOTPLT_HEADER_SIZE (2 * GOT_ENTRY_SIZE)
++
++#define sec_addr(sec) ((sec)->output_section->vma + (sec)->output_offset)
++
++static bfd_vma
++riscv_elf_got_plt_val (bfd_vma plt_index, struct bfd_link_info *info)
++{
++ return sec_addr (riscv_elf_hash_table (info)->elf.sgotplt)
++ + GOTPLT_HEADER_SIZE + (plt_index * GOT_ENTRY_SIZE);
++}
++
++#if ARCH_SIZE == 32
++# define MATCH_LREG MATCH_LW
++#else
++# define MATCH_LREG MATCH_LD
++#endif
++
++/* The format of the first PLT entry. */
++
++static void
++riscv_make_plt0_entry(bfd_vma gotplt_addr, bfd_vma addr, uint32_t *entry)
++{
++ /* auipc t2, %hi(.got.plt)
++ sub t1, t1, t0 # shifted .got.plt offset + hdr size + 12
++ l[w|d] t3, %lo(.got.plt)(t2) # _dl_runtime_resolve
++ addi t1, t1, -(hdr size + 12) # shifted .got.plt offset
++ addi t0, t2, %lo(.got.plt) # &.got.plt
++ srli t1, t1, log2(16/PTRSIZE) # .got.plt offset
++ l[w|d] t0, PTRSIZE(t0) # link map
++ jr t3 */
++
++ entry[0] = RISCV_UTYPE (AUIPC, X_T2, RISCV_PCREL_HIGH_PART (gotplt_addr, addr));
++ entry[1] = RISCV_RTYPE (SUB, X_T1, X_T1, X_T0);
++ entry[2] = RISCV_ITYPE (LREG, X_T3, X_T2, RISCV_PCREL_LOW_PART (gotplt_addr, addr));
++ entry[3] = RISCV_ITYPE (ADDI, X_T1, X_T1, -(PLT_HEADER_SIZE + 12));
++ entry[4] = RISCV_ITYPE (ADDI, X_T0, X_T2, RISCV_PCREL_LOW_PART (gotplt_addr, addr));
++ entry[5] = RISCV_ITYPE (SRLI, X_T1, X_T1, 4 - RISCV_ELF_LOG_WORD_BYTES);
++ entry[6] = RISCV_ITYPE (LREG, X_T0, X_T0, RISCV_ELF_WORD_BYTES);
++ entry[7] = RISCV_ITYPE (JALR, 0, X_T3, 0);
++}
++
++/* The format of subsequent PLT entries. */
++
++static void
++riscv_make_plt_entry(bfd_vma got_address, bfd_vma addr, uint32_t *entry)
++{
++ /* auipc t1, %hi(.got.plt entry)
++ l[w|d] t0, %lo(.got.plt entry)(t1)
++ jalr t1, t0
++ nop */
++
++ entry[0] = RISCV_UTYPE (AUIPC, X_T1, RISCV_PCREL_HIGH_PART (got_address, addr));
++ entry[1] = RISCV_ITYPE (LREG, X_T0, X_T1, RISCV_PCREL_LOW_PART(got_address, addr));
++ entry[2] = RISCV_ITYPE (JALR, X_T1, X_T0, 0);
++ entry[3] = RISCV_NOP;
++}
++
++/* Create an entry in an RISC-V ELF linker hash table. */
++
++static struct bfd_hash_entry *
++link_hash_newfunc (struct bfd_hash_entry *entry,
++ struct bfd_hash_table *table, const char *string)
++{
++ /* Allocate the structure if it has not already been allocated by a
++ subclass. */
++ if (entry == NULL)
++ {
++ entry =
++ bfd_hash_allocate (table,
++ sizeof (struct riscv_elf_link_hash_entry));
++ if (entry == NULL)
++ return entry;
++ }
++
++ /* Call the allocation method of the superclass. */
++ entry = _bfd_elf_link_hash_newfunc (entry, table, string);
++ if (entry != NULL)
++ {
++ struct riscv_elf_link_hash_entry *eh;
++
++ eh = (struct riscv_elf_link_hash_entry *) entry;
++ eh->dyn_relocs = NULL;
++ eh->tls_type = GOT_UNKNOWN;
++ }
++
++ return entry;
++}
++
++/* Create a RISC-V ELF linker hash table. */
++
++static struct bfd_link_hash_table *
++riscv_elf_link_hash_table_create (bfd *abfd)
++{
++ struct riscv_elf_link_hash_table *ret;
++ bfd_size_type amt = sizeof (struct riscv_elf_link_hash_table);
++
++ ret = (struct riscv_elf_link_hash_table *) bfd_zmalloc (amt);
++ if (ret == NULL)
++ return NULL;
++
++ if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, link_hash_newfunc,
++ sizeof (struct riscv_elf_link_hash_entry),
++ RISCV_ELF_DATA))
++ {
++ free (ret);
++ return NULL;
++ }
++
++ return &ret->elf.root;
++}
++
++/* Create the .got section. */
++
++static bfd_boolean
++riscv_elf_create_got_section (bfd *abfd, struct bfd_link_info *info)
++{
++ flagword flags;
++ asection *s, *s_got;
++ struct elf_link_hash_entry *h;
++ const struct elf_backend_data *bed = get_elf_backend_data (abfd);
++ struct elf_link_hash_table *htab = elf_hash_table (info);
++
++ /* This function may be called more than once. */
++ s = bfd_get_linker_section (abfd, ".got");
++ if (s != NULL)
++ return TRUE;
++
++ flags = bed->dynamic_sec_flags;
++
++ s = bfd_make_section_anyway_with_flags (abfd,
++ (bed->rela_plts_and_copies_p
++ ? ".rela.got" : ".rel.got"),
++ (bed->dynamic_sec_flags
++ | SEC_READONLY));
++ if (s == NULL
++ || ! bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
++ return FALSE;
++ htab->srelgot = s;
++
++ s = s_got = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
++ if (s == NULL
++ || !bfd_set_section_alignment (abfd, s, bed->s->log_file_align))
++ return FALSE;
++ htab->sgot = s;
++
++ /* The first bit of the global offset table is the header. */
++ s->size += bed->got_header_size;
++
++ if (bed->want_got_plt)
++ {
++ s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
++ if (s == NULL
++ || !bfd_set_section_alignment (abfd, s,
++ bed->s->log_file_align))
++ return FALSE;
++ htab->sgotplt = s;
++
++ /* Reserve room for the header. */
++ s->size += GOTPLT_HEADER_SIZE;
++ }
++
++ if (bed->want_got_sym)
++ {
++ /* Define the symbol _GLOBAL_OFFSET_TABLE_ at the start of the .got
++ section. We don't do this in the linker script because we don't want
++ to define the symbol if we are not creating a global offset
++ table. */
++ h = _bfd_elf_define_linkage_sym (abfd, info, s_got,
++ "_GLOBAL_OFFSET_TABLE_");
++ elf_hash_table (info)->hgot = h;
++ if (h == NULL)
++ return FALSE;
++ }
++
++ return TRUE;
++}
++
++/* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and
++ .rela.bss sections in DYNOBJ, and set up shortcuts to them in our
++ hash table. */
++
++static bfd_boolean
++riscv_elf_create_dynamic_sections (bfd *dynobj,
++ struct bfd_link_info *info)
++{
++ struct riscv_elf_link_hash_table *htab;
++
++ htab = riscv_elf_hash_table (info);
++ BFD_ASSERT (htab != NULL);
++
++ if (!riscv_elf_create_got_section (dynobj, info))
++ return FALSE;
++
++ if (!_bfd_elf_create_dynamic_sections (dynobj, info))
++ return FALSE;
++
++ htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
++ if (!info->shared)
++ {
++ htab->srelbss = bfd_get_linker_section (dynobj, ".rela.bss");
++ htab->sdyntdata =
++ bfd_make_section_anyway_with_flags (dynobj, ".tdata.dyn",
++ SEC_ALLOC | SEC_THREAD_LOCAL);
++ }
++
++ if (!htab->elf.splt || !htab->elf.srelplt || !htab->sdynbss
++ || (!info->shared && (!htab->srelbss || !htab->sdyntdata)))
++ abort ();
++
++ return TRUE;
++}
++
++/* Copy the extra info we tack onto an elf_link_hash_entry. */
++
++static void
++riscv_elf_copy_indirect_symbol (struct bfd_link_info *info,
++ struct elf_link_hash_entry *dir,
++ struct elf_link_hash_entry *ind)
++{
++ struct riscv_elf_link_hash_entry *edir, *eind;
++
++ edir = (struct riscv_elf_link_hash_entry *) dir;
++ eind = (struct riscv_elf_link_hash_entry *) ind;
++
++ if (eind->dyn_relocs != NULL)
++ {
++ if (edir->dyn_relocs != NULL)
++ {
++ struct riscv_elf_dyn_relocs **pp;
++ struct riscv_elf_dyn_relocs *p;
++
++ /* Add reloc counts against the indirect sym to the direct sym
++ list. Merge any entries against the same section. */
++ for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
++ {
++ struct riscv_elf_dyn_relocs *q;
++
++ for (q = edir->dyn_relocs; q != NULL; q = q->next)
++ if (q->sec == p->sec)
++ {
++ q->pc_count += p->pc_count;
++ q->count += p->count;
++ *pp = p->next;
++ break;
++ }
++ if (q == NULL)
++ pp = &p->next;
++ }
++ *pp = edir->dyn_relocs;
++ }
++
++ edir->dyn_relocs = eind->dyn_relocs;
++ eind->dyn_relocs = NULL;
++ }
++
++ if (ind->root.type == bfd_link_hash_indirect
++ && dir->got.refcount <= 0)
++ {
++ edir->tls_type = eind->tls_type;
++ eind->tls_type = GOT_UNKNOWN;
++ }
++ _bfd_elf_link_hash_copy_indirect (info, dir, ind);
++}
++
++static bfd_boolean
++riscv_elf_record_tls_type (bfd *abfd, struct elf_link_hash_entry *h,
++ unsigned long symndx, char tls_type)
++{
++ char *new_tls_type = &_bfd_riscv_elf_tls_type (abfd, h, symndx);
++ *new_tls_type |= tls_type;
++ if ((*new_tls_type & GOT_NORMAL) && (*new_tls_type & ~GOT_NORMAL))
++ {
++ (*_bfd_error_handler)
++ (_("%B: `%s' accessed both as normal and thread local symbol"),
++ abfd, h ? h->root.root.string : "<local>");
++ return FALSE;
++ }
++ return TRUE;
++}
++
++static bfd_boolean
++riscv_elf_record_got_reference (bfd *abfd, struct bfd_link_info *info,
++ struct elf_link_hash_entry *h, long symndx)
++{
++ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
++
++ if (htab->elf.sgot == NULL)
++ {
++ if (!riscv_elf_create_got_section (htab->elf.dynobj, info))
++ return FALSE;
++ }
++
++ if (h != NULL)
++ {
++ h->got.refcount += 1;
++ return TRUE;
++ }
++
++ /* This is a global offset table entry for a local symbol. */
++ if (elf_local_got_refcounts (abfd) == NULL)
++ {
++ bfd_size_type size = symtab_hdr->sh_info * (sizeof (bfd_vma) + 1);
++ if (!(elf_local_got_refcounts (abfd) = bfd_zalloc (abfd, size)))
++ return FALSE;
++ _bfd_riscv_elf_local_got_tls_type (abfd)
++ = (char *) (elf_local_got_refcounts (abfd) + symtab_hdr->sh_info);
++ }
++ elf_local_got_refcounts (abfd) [symndx] += 1;
++
++ return TRUE;
++}
++
++static bfd_boolean
++bad_static_reloc (bfd *abfd, unsigned r_type, struct elf_link_hash_entry *h)
++{
++ (*_bfd_error_handler)
++ (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
++ abfd, riscv_elf_rtype_to_howto (r_type)->name,
++ h != NULL ? h->root.root.string : "a local symbol");
++ bfd_set_error (bfd_error_bad_value);
++ return FALSE;
++}
++/* Look through the relocs for a section during the first phase, and
++ allocate space in the global offset table or procedure linkage
++ table. */
++
++static bfd_boolean
++riscv_elf_check_relocs (bfd *abfd, struct bfd_link_info *info,
++ asection *sec, const Elf_Internal_Rela *relocs)
++{
++ struct riscv_elf_link_hash_table *htab;
++ Elf_Internal_Shdr *symtab_hdr;
++ struct elf_link_hash_entry **sym_hashes;
++ const Elf_Internal_Rela *rel;
++ asection *sreloc = NULL;
++
++ if (info->relocatable)
++ return TRUE;
++
++ htab = riscv_elf_hash_table (info);
++ symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
++ sym_hashes = elf_sym_hashes (abfd);
++
++ if (htab->elf.dynobj == NULL)
++ htab->elf.dynobj = abfd;
++
++ for (rel = relocs; rel < relocs + sec->reloc_count; rel++)
++ {
++ unsigned int r_type;
++ unsigned long r_symndx;
++ struct elf_link_hash_entry *h;
++
++ r_symndx = ELFNN_R_SYM (rel->r_info);
++ r_type = ELFNN_R_TYPE (rel->r_info);
++
++ if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
++ {
++ (*_bfd_error_handler) (_("%B: bad symbol index: %d"),
++ abfd, r_symndx);
++ return FALSE;
++ }
++
++ if (r_symndx < symtab_hdr->sh_info)
++ h = NULL;
++ else
++ {
++ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *) h->root.u.i.link;
++
++ /* PR15323, ref flags aren't set for references in the same
++ object. */
++ h->root.non_ir_ref = 1;
++ }
++
++ switch (r_type)
++ {
++ case R_RISCV_TLS_GD_HI20:
++ if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
++ || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_GD))
++ return FALSE;
++ break;
++
++ case R_RISCV_TLS_GOT_HI20:
++ if (info->shared)
++ info->flags |= DF_STATIC_TLS;
++ if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
++ || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_IE))
++ return FALSE;
++ break;
++
++ case R_RISCV_GOT_HI20:
++ if (!riscv_elf_record_got_reference (abfd, info, h, r_symndx)
++ || !riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_NORMAL))
++ return FALSE;
++ break;
++
++ case R_RISCV_CALL_PLT:
++ /* This symbol requires a procedure linkage table entry. We
++ actually build the entry in adjust_dynamic_symbol,
++ because this might be a case of linking PIC code without
++ linking in any dynamic objects, in which case we don't
++ need to generate a procedure linkage table after all. */
++
++ if (h != NULL)
++ {
++ h->needs_plt = 1;
++ h->plt.refcount += 1;
++ }
++ break;
++
++ case R_RISCV_CALL:
++ case R_RISCV_JAL:
++ case R_RISCV_BRANCH:
++ case R_RISCV_PCREL_HI20:
++ /* In shared libs, these relocs are known to bind locally. */
++ if (info->shared)
++ break;
++ goto static_reloc;
++
++ case R_RISCV_TPREL_HI20:
++ if (!info->executable)
++ return bad_static_reloc (abfd, r_type, h);
++ if (h != NULL)
++ riscv_elf_record_tls_type (abfd, h, r_symndx, GOT_TLS_LE);
++ goto static_reloc;
++
++ case R_RISCV_HI20:
++ if (info->shared)
++ return bad_static_reloc (abfd, r_type, h);
++ /* Fall through. */
++
++ case R_RISCV_COPY:
++ case R_RISCV_JUMP_SLOT:
++ case R_RISCV_RELATIVE:
++ case R_RISCV_64:
++ case R_RISCV_32:
++ /* Fall through. */
++
++ static_reloc:
++ if (h != NULL)
++ h->non_got_ref = 1;
++
++ if (h != NULL && !info->shared)
++ {
++ /* We may need a .plt entry if the function this reloc
++ refers to is in a shared lib. */
++ h->plt.refcount += 1;
++ }
++
++ /* If we are creating a shared library, and this is a reloc
++ against a global symbol, or a non PC relative reloc
++ against a local symbol, then we need to copy the reloc
++ into the shared library. However, if we are linking with
++ -Bsymbolic, we do not need to copy a reloc against a
++ global symbol which is defined in an object we are
++ including in the link (i.e., DEF_REGULAR is set). At
++ this point we have not seen all the input files, so it is
++ possible that DEF_REGULAR is not set now but will be set
++ later (it is never cleared). In case of a weak definition,
++ DEF_REGULAR may be cleared later by a strong definition in
++ a shared library. We account for that possibility below by
++ storing information in the relocs_copied field of the hash
++ table entry. A similar situation occurs when creating
++ shared libraries and symbol visibility changes render the
++ symbol local.
++
++ If on the other hand, we are creating an executable, we
++ may need to keep relocations for symbols satisfied by a
++ dynamic library if we manage to avoid copy relocs for the
++ symbol. */
++ if ((info->shared
++ && (sec->flags & SEC_ALLOC) != 0
++ && (! riscv_elf_rtype_to_howto (r_type)->pc_relative
++ || (h != NULL
++ && (! info->symbolic
++ || h->root.type == bfd_link_hash_defweak
++ || !h->def_regular))))
++ || (!info->shared
++ && (sec->flags & SEC_ALLOC) != 0
++ && h != NULL
++ && (h->root.type == bfd_link_hash_defweak
++ || !h->def_regular)))
++ {
++ struct riscv_elf_dyn_relocs *p;
++ struct riscv_elf_dyn_relocs **head;
++
++ /* When creating a shared object, we must copy these
++ relocs into the output file. We create a reloc
++ section in dynobj and make room for the reloc. */
++ if (sreloc == NULL)
++ {
++ sreloc = _bfd_elf_make_dynamic_reloc_section
++ (sec, htab->elf.dynobj, RISCV_ELF_LOG_WORD_BYTES,
++ abfd, /*rela?*/ TRUE);
++
++ if (sreloc == NULL)
++ return FALSE;
++ }
++
++ /* If this is a global symbol, we count the number of
++ relocations we need for this symbol. */
++ if (h != NULL)
++ head = &((struct riscv_elf_link_hash_entry *) h)->dyn_relocs;
++ else
++ {
++ /* Track dynamic relocs needed for local syms too.
++ We really need local syms available to do this
++ easily. Oh well. */
++
++ asection *s;
++ void *vpp;
++ Elf_Internal_Sym *isym;
++
++ isym = bfd_sym_from_r_symndx (&htab->sym_cache,
++ abfd, r_symndx);
++ if (isym == NULL)
++ return FALSE;
++
++ s = bfd_section_from_elf_index (abfd, isym->st_shndx);
++ if (s == NULL)
++ s = sec;
++
++ vpp = &elf_section_data (s)->local_dynrel;
++ head = (struct riscv_elf_dyn_relocs **) vpp;
++ }
++
++ p = *head;
++ if (p == NULL || p->sec != sec)
++ {
++ bfd_size_type amt = sizeof *p;
++ p = ((struct riscv_elf_dyn_relocs *)
++ bfd_alloc (htab->elf.dynobj, amt));
++ if (p == NULL)
++ return FALSE;
++ p->next = *head;
++ *head = p;
++ p->sec = sec;
++ p->count = 0;
++ p->pc_count = 0;
++ }
++
++ p->count += 1;
++ p->pc_count += riscv_elf_rtype_to_howto (r_type)->pc_relative;
++ }
++
++ break;
++
++ case R_RISCV_GNU_VTINHERIT:
++ if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
++ return FALSE;
++ break;
++
++ case R_RISCV_GNU_VTENTRY:
++ if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
++ return FALSE;
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ return TRUE;
++}
++
++static asection *
++riscv_elf_gc_mark_hook (asection *sec,
++ struct bfd_link_info *info,
++ Elf_Internal_Rela *rel,
++ struct elf_link_hash_entry *h,
++ Elf_Internal_Sym *sym)
++{
++ if (h != NULL)
++ switch (ELFNN_R_TYPE (rel->r_info))
++ {
++ case R_RISCV_GNU_VTINHERIT:
++ case R_RISCV_GNU_VTENTRY:
++ return NULL;
++ }
++
++ return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
++}
++
++/* Update the got entry reference counts for the section being removed. */
++static bfd_boolean
++riscv_elf_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info,
++ asection *sec, const Elf_Internal_Rela *relocs)
++{
++ const Elf_Internal_Rela *rel, *relend;
++ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
++ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
++ bfd_signed_vma *local_got_refcounts = elf_local_got_refcounts (abfd);
++
++ if (info->relocatable)
++ return TRUE;
++
++ elf_section_data (sec)->local_dynrel = NULL;
++
++ for (rel = relocs, relend = relocs + sec->reloc_count; rel < relend; rel++)
++ {
++ unsigned long r_symndx;
++ struct elf_link_hash_entry *h = NULL;
++
++ r_symndx = ELFNN_R_SYM (rel->r_info);
++ if (r_symndx >= symtab_hdr->sh_info)
++ {
++ struct riscv_elf_link_hash_entry *eh;
++ struct riscv_elf_dyn_relocs **pp;
++ struct riscv_elf_dyn_relocs *p;
++
++ h = sym_hashes[r_symndx - symtab_hdr->sh_info];
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *) h->root.u.i.link;
++ eh = (struct riscv_elf_link_hash_entry *) h;
++ for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next)
++ if (p->sec == sec)
++ {
++ /* Everything must go for SEC. */
++ *pp = p->next;
++ break;
++ }
++ }
++
++ switch (ELFNN_R_TYPE (rel->r_info))
++ {
++ case R_RISCV_GOT_HI20:
++ case R_RISCV_TLS_GOT_HI20:
++ case R_RISCV_TLS_GD_HI20:
++ if (h != NULL)
++ {
++ if (h->got.refcount > 0)
++ h->got.refcount--;
++ }
++ else
++ {
++ if (local_got_refcounts &&
++ local_got_refcounts[r_symndx] > 0)
++ local_got_refcounts[r_symndx]--;
++ }
++ break;
++
++ case R_RISCV_HI20:
++ case R_RISCV_PCREL_HI20:
++ case R_RISCV_COPY:
++ case R_RISCV_JUMP_SLOT:
++ case R_RISCV_RELATIVE:
++ case R_RISCV_64:
++ case R_RISCV_32:
++ case R_RISCV_BRANCH:
++ case R_RISCV_CALL:
++ case R_RISCV_JAL:
++ if (info->shared)
++ break;
++ /* Fall through. */
++
++ case R_RISCV_CALL_PLT:
++ if (h != NULL)
++ {
++ if (h->plt.refcount > 0)
++ h->plt.refcount--;
++ }
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ return TRUE;
++}
++
++/* Adjust a symbol defined by a dynamic object and referenced by a
++ regular object. The current definition is in some section of the
++ dynamic object, but we're not including those sections. We have to
++ change the definition to something the rest of the link can
++ understand. */
++
++static bfd_boolean
++riscv_elf_adjust_dynamic_symbol (struct bfd_link_info *info,
++ struct elf_link_hash_entry *h)
++{
++ struct riscv_elf_link_hash_table *htab;
++ struct riscv_elf_link_hash_entry * eh;
++ struct riscv_elf_dyn_relocs *p;
++ bfd *dynobj;
++ asection *s;
++
++ htab = riscv_elf_hash_table (info);
++ BFD_ASSERT (htab != NULL);
++
++ dynobj = htab->elf.dynobj;
++
++ /* Make sure we know what is going on here. */
++ BFD_ASSERT (dynobj != NULL
++ && (h->needs_plt
++ || h->u.weakdef != NULL
++ || (h->def_dynamic
++ && h->ref_regular
++ && !h->def_regular)));
++
++ /* If this is a function, put it in the procedure linkage table. We
++ will fill in the contents of the procedure linkage table later
++ (although we could actually do it here). */
++ if (h->type == STT_FUNC || h->needs_plt)
++ {
++ if (h->plt.refcount <= 0
++ || SYMBOL_CALLS_LOCAL (info, h)
++ || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
++ && h->root.type == bfd_link_hash_undefweak))
++ {
++ /* This case can occur if we saw a R_RISCV_CALL_PLT reloc in an
++ input file, but the symbol was never referred to by a dynamic
++ object, or if all references were garbage collected. In such
++ a case, we don't actually need to build a PLT entry. */
++ h->plt.offset = (bfd_vma) -1;
++ h->needs_plt = 0;
++ }
++
++ return TRUE;
++ }
++ else
++ h->plt.offset = (bfd_vma) -1;
++
++ /* If this is a weak symbol, and there is a real definition, the
++ processor independent code will have arranged for us to see the
++ real definition first, and we can just use the same value. */
++ if (h->u.weakdef != NULL)
++ {
++ BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
++ || h->u.weakdef->root.type == bfd_link_hash_defweak);
++ h->root.u.def.section = h->u.weakdef->root.u.def.section;
++ h->root.u.def.value = h->u.weakdef->root.u.def.value;
++ return TRUE;
++ }
++
++ /* This is a reference to a symbol defined by a dynamic object which
++ is not a function. */
++
++ /* If we are creating a shared library, we must presume that the
++ only references to the symbol are via the global offset table.
++ For such cases we need not do anything here; the relocations will
++ be handled correctly by relocate_section. */
++ if (info->shared)
++ return TRUE;
++
++ /* If there are no references to this symbol that do not use the
++ GOT, we don't need to generate a copy reloc. */
++ if (!h->non_got_ref)
++ return TRUE;
++
++ /* If -z nocopyreloc was given, we won't generate them either. */
++ if (info->nocopyreloc)
++ {
++ h->non_got_ref = 0;
++ return TRUE;
++ }
++
++ eh = (struct riscv_elf_link_hash_entry *) h;
++ for (p = eh->dyn_relocs; p != NULL; p = p->next)
++ {
++ s = p->sec->output_section;
++ if (s != NULL && (s->flags & SEC_READONLY) != 0)
++ break;
++ }
++
++ /* If we didn't find any dynamic relocs in read-only sections, then
++ we'll be keeping the dynamic relocs and avoiding the copy reloc. */
++ if (p == NULL)
++ {
++ h->non_got_ref = 0;
++ return TRUE;
++ }
++
++ /* We must allocate the symbol in our .dynbss section, which will
++ become part of the .bss section of the executable. There will be
++ an entry for this symbol in the .dynsym section. The dynamic
++ object will contain position independent code, so all references
++ from the dynamic object to this symbol will go through the global
++ offset table. The dynamic linker will use the .dynsym entry to
++ determine the address it must put in the global offset table, so
++ both the dynamic object and the regular object will refer to the
++ same memory location for the variable. */
++
++ /* We must generate a R_RISCV_COPY reloc to tell the dynamic linker
++ to copy the initial value out of the dynamic object and into the
++ runtime process image. We need to remember the offset into the
++ .rel.bss section we are going to use. */
++ if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0)
++ {
++ htab->srelbss->size += sizeof (ElfNN_External_Rela);
++ h->needs_copy = 1;
++ }
++
++ if (eh->tls_type & ~GOT_NORMAL)
++ return _bfd_elf_adjust_dynamic_copy (h, htab->sdyntdata);
++
++ return _bfd_elf_adjust_dynamic_copy (h, htab->sdynbss);
++}
++
++/* Allocate space in .plt, .got and associated reloc sections for
++ dynamic relocs. */
++
++static bfd_boolean
++allocate_dynrelocs (struct elf_link_hash_entry *h, void *inf)
++{
++ struct bfd_link_info *info;
++ struct riscv_elf_link_hash_table *htab;
++ struct riscv_elf_link_hash_entry *eh;
++ struct riscv_elf_dyn_relocs *p;
++
++ if (h->root.type == bfd_link_hash_indirect)
++ return TRUE;
++
++ info = (struct bfd_link_info *) inf;
++ htab = riscv_elf_hash_table (info);
++ BFD_ASSERT (htab != NULL);
++
++ if (htab->elf.dynamic_sections_created
++ && h->plt.refcount > 0)
++ {
++ /* Make sure this symbol is output as a dynamic symbol.
++ Undefined weak syms won't yet be marked as dynamic. */
++ if (h->dynindx == -1
++ && !h->forced_local)
++ {
++ if (! bfd_elf_link_record_dynamic_symbol (info, h))
++ return FALSE;
++ }
++
++ if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, info->shared, h))
++ {
++ asection *s = htab->elf.splt;
++
++ if (s->size == 0)
++ s->size = PLT_HEADER_SIZE;
++
++ h->plt.offset = s->size;
++
++ /* Make room for this entry. */
++ s->size += PLT_ENTRY_SIZE;
++
++ /* We also need to make an entry in the .got.plt section. */
++ htab->elf.sgotplt->size += GOT_ENTRY_SIZE;
++
++ /* We also need to make an entry in the .rela.plt section. */
++ htab->elf.srelplt->size += sizeof (ElfNN_External_Rela);
++
++ /* If this symbol is not defined in a regular file, and we are
++ not generating a shared library, then set the symbol to this
++ location in the .plt. This is required to make function
++ pointers compare as equal between the normal executable and
++ the shared library. */
++ if (! info->shared
++ && !h->def_regular)
++ {
++ h->root.u.def.section = s;
++ h->root.u.def.value = h->plt.offset;
++ }
++ }
++ else
++ {
++ h->plt.offset = (bfd_vma) -1;
++ h->needs_plt = 0;
++ }
++ }
++ else
++ {
++ h->plt.offset = (bfd_vma) -1;
++ h->needs_plt = 0;
++ }
++
++ if (h->got.refcount > 0)
++ {
++ asection *s;
++ bfd_boolean dyn;
++ int tls_type = riscv_elf_hash_entry(h)->tls_type;
++
++ /* Make sure this symbol is output as a dynamic symbol.
++ Undefined weak syms won't yet be marked as dynamic. */
++ if (h->dynindx == -1
++ && !h->forced_local)
++ {
++ if (! bfd_elf_link_record_dynamic_symbol (info, h))
++ return FALSE;
++ }
++
++ s = htab->elf.sgot;
++ h->got.offset = s->size;
++ dyn = htab->elf.dynamic_sections_created;
++ if (tls_type & (GOT_TLS_GD | GOT_TLS_IE))
++ {
++ /* TLS_GD needs two dynamic relocs and two GOT slots. */
++ if (tls_type & GOT_TLS_GD)
++ {
++ s->size += 2 * RISCV_ELF_WORD_BYTES;
++ htab->elf.srelgot->size += 2 * sizeof (ElfNN_External_Rela);
++ }
++
++ /* TLS_IE needs one dynamic reloc and one GOT slot. */
++ if (tls_type & GOT_TLS_IE)
++ {
++ s->size += RISCV_ELF_WORD_BYTES;
++ htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
++ }
++ }
++ else
++ {
++ s->size += RISCV_ELF_WORD_BYTES;
++ if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h))
++ htab->elf.srelgot->size += sizeof (ElfNN_External_Rela);
++ }
++ }
++ else
++ h->got.offset = (bfd_vma) -1;
++
++ eh = (struct riscv_elf_link_hash_entry *) h;
++ if (eh->dyn_relocs == NULL)
++ return TRUE;
++
++ /* In the shared -Bsymbolic case, discard space allocated for
++ dynamic pc-relative relocs against symbols which turn out to be
++ defined in regular objects. For the normal shared case, discard
++ space for pc-relative relocs that have become local due to symbol
++ visibility changes. */
++
++ if (info->shared)
++ {
++ if (SYMBOL_CALLS_LOCAL (info, h))
++ {
++ struct riscv_elf_dyn_relocs **pp;
++
++ for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
++ {
++ p->count -= p->pc_count;
++ p->pc_count = 0;
++ if (p->count == 0)
++ *pp = p->next;
++ else
++ pp = &p->next;
++ }
++ }
++
++ /* Also discard relocs on undefined weak syms with non-default
++ visibility. */
++ if (eh->dyn_relocs != NULL
++ && h->root.type == bfd_link_hash_undefweak)
++ {
++ if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
++ eh->dyn_relocs = NULL;
++
++ /* Make sure undefined weak symbols are output as a dynamic
++ symbol in PIEs. */
++ else if (h->dynindx == -1
++ && !h->forced_local)
++ {
++ if (! bfd_elf_link_record_dynamic_symbol (info, h))
++ return FALSE;
++ }
++ }
++ }
++ else
++ {
++ /* For the non-shared case, discard space for relocs against
++ symbols which turn out to need copy relocs or are not
++ dynamic. */
++
++ if (!h->non_got_ref
++ && ((h->def_dynamic
++ && !h->def_regular)
++ || (htab->elf.dynamic_sections_created
++ && (h->root.type == bfd_link_hash_undefweak
++ || h->root.type == bfd_link_hash_undefined))))
++ {
++ /* Make sure this symbol is output as a dynamic symbol.
++ Undefined weak syms won't yet be marked as dynamic. */
++ if (h->dynindx == -1
++ && !h->forced_local)
++ {
++ if (! bfd_elf_link_record_dynamic_symbol (info, h))
++ return FALSE;
++ }
++
++ /* If that succeeded, we know we'll be keeping all the
++ relocs. */
++ if (h->dynindx != -1)
++ goto keep;
++ }
++
++ eh->dyn_relocs = NULL;
++
++ keep: ;
++ }
++
++ /* Finally, allocate space. */
++ for (p = eh->dyn_relocs; p != NULL; p = p->next)
++ {
++ asection *sreloc = elf_section_data (p->sec)->sreloc;
++ sreloc->size += p->count * sizeof (ElfNN_External_Rela);
++ }
++
++ return TRUE;
++}
++
++/* Find any dynamic relocs that apply to read-only sections. */
++
++static bfd_boolean
++readonly_dynrelocs (struct elf_link_hash_entry *h, void *inf)
++{
++ struct riscv_elf_link_hash_entry *eh;
++ struct riscv_elf_dyn_relocs *p;
++
++ eh = (struct riscv_elf_link_hash_entry *) h;
++ for (p = eh->dyn_relocs; p != NULL; p = p->next)
++ {
++ asection *s = p->sec->output_section;
++
++ if (s != NULL && (s->flags & SEC_READONLY) != 0)
++ {
++ ((struct bfd_link_info *) inf)->flags |= DF_TEXTREL;
++
++ /* Short-circuit the traversal. */
++ return FALSE;
++ }
++ }
++ return TRUE;
++}
++
++static bfd_boolean
++riscv_elf_size_dynamic_sections (bfd *output_bfd, struct bfd_link_info *info)
++{
++ struct riscv_elf_link_hash_table *htab;
++ bfd *dynobj;
++ asection *s;
++ bfd *ibfd;
++
++ htab = riscv_elf_hash_table (info);
++ BFD_ASSERT (htab != NULL);
++ dynobj = htab->elf.dynobj;
++ BFD_ASSERT (dynobj != NULL);
++
++ if (elf_hash_table (info)->dynamic_sections_created)
++ {
++ /* Set the contents of the .interp section to the interpreter. */
++ if (info->executable)
++ {
++ s = bfd_get_linker_section (dynobj, ".interp");
++ BFD_ASSERT (s != NULL);
++ s->size = strlen (ELFNN_DYNAMIC_INTERPRETER) + 1;
++ s->contents = (unsigned char *) ELFNN_DYNAMIC_INTERPRETER;
++ }
++ }
++
++ /* Set up .got offsets for local syms, and space for local dynamic
++ relocs. */
++ for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
++ {
++ bfd_signed_vma *local_got;
++ bfd_signed_vma *end_local_got;
++ char *local_tls_type;
++ bfd_size_type locsymcount;
++ Elf_Internal_Shdr *symtab_hdr;
++ asection *srel;
++
++ if (! is_riscv_elf (ibfd))
++ continue;
++
++ for (s = ibfd->sections; s != NULL; s = s->next)
++ {
++ struct riscv_elf_dyn_relocs *p;
++
++ for (p = elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
++ {
++ if (!bfd_is_abs_section (p->sec)
++ && bfd_is_abs_section (p->sec->output_section))
++ {
++ /* Input section has been discarded, either because
++ it is a copy of a linkonce section or due to
++ linker script /DISCARD/, so we'll be discarding
++ the relocs too. */
++ }
++ else if (p->count != 0)
++ {
++ srel = elf_section_data (p->sec)->sreloc;
++ srel->size += p->count * sizeof (ElfNN_External_Rela);
++ if ((p->sec->output_section->flags & SEC_READONLY) != 0)
++ info->flags |= DF_TEXTREL;
++ }
++ }
++ }
++
++ local_got = elf_local_got_refcounts (ibfd);
++ if (!local_got)
++ continue;
++
++ symtab_hdr = &elf_symtab_hdr (ibfd);
++ locsymcount = symtab_hdr->sh_info;
++ end_local_got = local_got + locsymcount;
++ local_tls_type = _bfd_riscv_elf_local_got_tls_type (ibfd);
++ s = htab->elf.sgot;
++ srel = htab->elf.srelgot;
++ for (; local_got < end_local_got; ++local_got, ++local_tls_type)
++ {
++ if (*local_got > 0)
++ {
++ *local_got = s->size;
++ s->size += RISCV_ELF_WORD_BYTES;
++ if (*local_tls_type & GOT_TLS_GD)
++ s->size += RISCV_ELF_WORD_BYTES;
++ if (info->shared
++ || (*local_tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
++ srel->size += sizeof (ElfNN_External_Rela);
++ }
++ else
++ *local_got = (bfd_vma) -1;
++ }
++ }
++
++ /* Allocate global sym .plt and .got entries, and space for global
++ sym dynamic relocs. */
++ elf_link_hash_traverse (&htab->elf, allocate_dynrelocs, info);
++
++ if (htab->elf.sgotplt)
++ {
++ struct elf_link_hash_entry *got;
++ got = elf_link_hash_lookup (elf_hash_table (info),
++ "_GLOBAL_OFFSET_TABLE_",
++ FALSE, FALSE, FALSE);
++
++ /* Don't allocate .got.plt section if there are no GOT nor PLT
++ entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */
++ if ((got == NULL
++ || !got->ref_regular_nonweak)
++ && (htab->elf.sgotplt->size == GOTPLT_HEADER_SIZE)
++ && (htab->elf.splt == NULL
++ || htab->elf.splt->size == 0)
++ && (htab->elf.sgot == NULL
++ || (htab->elf.sgot->size
++ == get_elf_backend_data (output_bfd)->got_header_size)))
++ htab->elf.sgotplt->size = 0;
++ }
++
++ /* The check_relocs and adjust_dynamic_symbol entry points have
++ determined the sizes of the various dynamic sections. Allocate
++ memory for them. */
++ for (s = dynobj->sections; s != NULL; s = s->next)
++ {
++ if ((s->flags & SEC_LINKER_CREATED) == 0)
++ continue;
++
++ if (s == htab->elf.splt
++ || s == htab->elf.sgot
++ || s == htab->elf.sgotplt
++ || s == htab->sdynbss)
++ {
++ /* Strip this section if we don't need it; see the
++ comment below. */
++ }
++ else if (strncmp (s->name, ".rela", 5) == 0)
++ {
++ if (s->size != 0)
++ {
++ /* We use the reloc_count field as a counter if we need
++ to copy relocs into the output file. */
++ s->reloc_count = 0;
++ }
++ }
++ else
++ {
++ /* It's not one of our sections. */
++ continue;
++ }
++
++ if (s->size == 0)
++ {
++ /* If we don't need this section, strip it from the
++ output file. This is mostly to handle .rela.bss and
++ .rela.plt. We must create both sections in
++ create_dynamic_sections, because they must be created
++ before the linker maps input sections to output
++ sections. The linker does that before
++ adjust_dynamic_symbol is called, and it is that
++ function which decides whether anything needs to go
++ into these sections. */
++ s->flags |= SEC_EXCLUDE;
++ continue;
++ }
++
++ if ((s->flags & SEC_HAS_CONTENTS) == 0)
++ continue;
++
++ /* Allocate memory for the section contents. Zero the memory
++ for the benefit of .rela.plt, which has 4 unused entries
++ at the beginning, and we don't want garbage. */
++ s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size);
++ if (s->contents == NULL)
++ return FALSE;
++ }
++
++ if (elf_hash_table (info)->dynamic_sections_created)
++ {
++ /* Add some entries to the .dynamic section. We fill in the
++ values later, in riscv_elf_finish_dynamic_sections, but we
++ must add the entries now so that we get the correct size for
++ the .dynamic section. The DT_DEBUG entry is filled in by the
++ dynamic linker and used by the debugger. */
++#define add_dynamic_entry(TAG, VAL) \
++ _bfd_elf_add_dynamic_entry (info, TAG, VAL)
++
++ if (info->executable)
++ {
++ if (!add_dynamic_entry (DT_DEBUG, 0))
++ return FALSE;
++ }
++
++ if (htab->elf.srelplt->size != 0)
++ {
++ if (!add_dynamic_entry (DT_PLTGOT, 0)
++ || !add_dynamic_entry (DT_PLTRELSZ, 0)
++ || !add_dynamic_entry (DT_PLTREL, DT_RELA)
++ || !add_dynamic_entry (DT_JMPREL, 0))
++ return FALSE;
++ }
++
++ if (!add_dynamic_entry (DT_RELA, 0)
++ || !add_dynamic_entry (DT_RELASZ, 0)
++ || !add_dynamic_entry (DT_RELAENT, sizeof (ElfNN_External_Rela)))
++ return FALSE;
++
++ /* If any dynamic relocs apply to a read-only section,
++ then we need a DT_TEXTREL entry. */
++ if ((info->flags & DF_TEXTREL) == 0)
++ elf_link_hash_traverse (&htab->elf, readonly_dynrelocs, info);
++
++ if (info->flags & DF_TEXTREL)
++ {
++ if (!add_dynamic_entry (DT_TEXTREL, 0))
++ return FALSE;
++ }
++ }
++#undef add_dynamic_entry
++
++ return TRUE;
++}
++
++#define TP_OFFSET 0
++#define DTP_OFFSET 0x800
++
++/* Return the relocation value for a TLS dtp-relative reloc. */
++
++static bfd_vma
++dtpoff (struct bfd_link_info *info, bfd_vma address)
++{
++ /* If tls_sec is NULL, we should have signalled an error already. */
++ if (elf_hash_table (info)->tls_sec == NULL)
++ return 0;
++ return address - elf_hash_table (info)->tls_sec->vma - DTP_OFFSET;
++}
++
++/* Return the relocation value for a static TLS tp-relative relocation. */
++
++static bfd_vma
++tpoff (struct bfd_link_info *info, bfd_vma address)
++{
++ /* If tls_sec is NULL, we should have signalled an error already. */
++ if (elf_hash_table (info)->tls_sec == NULL)
++ return 0;
++ return address - elf_hash_table (info)->tls_sec->vma - TP_OFFSET;
++}
++
++/* Return the global pointer's value, or 0 if it is not in use. */
++
++static bfd_vma
++riscv_global_pointer_value (struct bfd_link_info *info)
++{
++ struct bfd_link_hash_entry *h;
++
++ h = bfd_link_hash_lookup (info->hash, "_gp", FALSE, FALSE, TRUE);
++ if (h == NULL || h->type != bfd_link_hash_defined)
++ return 0;
++
++ return h->u.def.value + sec_addr (h->u.def.section);
++}
++
++/* Emplace a static relocation. */
++
++static bfd_reloc_status_type
++perform_relocation (const reloc_howto_type *howto,
++ const Elf_Internal_Rela *rel,
++ bfd_vma value,
++ asection *input_section,
++ bfd *input_bfd,
++ bfd_byte *contents)
++{
++ if (howto->pc_relative)
++ value -= sec_addr (input_section) + rel->r_offset;
++ value += rel->r_addend;
++
++ switch (ELFNN_R_TYPE (rel->r_info))
++ {
++ case R_RISCV_HI20:
++ case R_RISCV_TPREL_HI20:
++ case R_RISCV_PCREL_HI20:
++ case R_RISCV_GOT_HI20:
++ case R_RISCV_TLS_GOT_HI20:
++ case R_RISCV_TLS_GD_HI20:
++ value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value));
++ break;
++
++ case R_RISCV_LO12_I:
++ case R_RISCV_TPREL_LO12_I:
++ case R_RISCV_PCREL_LO12_I:
++ value = ENCODE_ITYPE_IMM (value);
++ break;
++
++ case R_RISCV_LO12_S:
++ case R_RISCV_TPREL_LO12_S:
++ case R_RISCV_PCREL_LO12_S:
++ value = ENCODE_STYPE_IMM (value);
++ break;
++
++ case R_RISCV_CALL:
++ case R_RISCV_CALL_PLT:
++ if (!VALID_UTYPE_IMM (RISCV_CONST_HIGH_PART (value)))
++ return bfd_reloc_overflow;
++ value = ENCODE_UTYPE_IMM (RISCV_CONST_HIGH_PART (value))
++ | (ENCODE_ITYPE_IMM (value) << 32);
++ break;
++
++ case R_RISCV_JAL:
++ if (!VALID_UJTYPE_IMM (value))
++ return bfd_reloc_overflow;
++ value = ENCODE_UJTYPE_IMM (value);
++ break;
++
++ case R_RISCV_BRANCH:
++ if (!VALID_SBTYPE_IMM (value))
++ return bfd_reloc_overflow;
++ value = ENCODE_SBTYPE_IMM (value);
++ break;
++
++ case R_RISCV_32:
++ case R_RISCV_64:
++ case R_RISCV_ADD8:
++ case R_RISCV_ADD16:
++ case R_RISCV_ADD32:
++ case R_RISCV_ADD64:
++ case R_RISCV_SUB8:
++ case R_RISCV_SUB16:
++ case R_RISCV_SUB32:
++ case R_RISCV_SUB64:
++ case R_RISCV_TLS_DTPREL32:
++ case R_RISCV_TLS_DTPREL64:
++ break;
++
++ default:
++ return bfd_reloc_notsupported;
++ }
++
++ bfd_vma word = bfd_get (howto->bitsize, input_bfd, contents + rel->r_offset);
++ word = (word & ~howto->dst_mask) | (value & howto->dst_mask);
++ bfd_put (howto->bitsize, input_bfd, word, contents + rel->r_offset);
++
++ return bfd_reloc_ok;
++}
++
++/* Remember all PC-relative high-part relocs we've encountered to help us
++ later resolve the corresponding low-part relocs. */
++
++typedef struct {
++ bfd_vma address;
++ bfd_vma value;
++} riscv_pcrel_hi_reloc;
++
++typedef struct riscv_pcrel_lo_reloc {
++ asection *input_section;
++ struct bfd_link_info *info;
++ reloc_howto_type *howto;
++ const Elf_Internal_Rela *reloc;
++ bfd_vma addr;
++ const char *name;
++ bfd_byte *contents;
++ struct riscv_pcrel_lo_reloc *next;
++} riscv_pcrel_lo_reloc;
++
++typedef struct {
++ htab_t hi_relocs;
++ riscv_pcrel_lo_reloc *lo_relocs;
++} riscv_pcrel_relocs;
++
++static hashval_t
++riscv_pcrel_reloc_hash (const void *entry)
++{
++ const riscv_pcrel_hi_reloc *e = entry;
++ return (hashval_t)(e->address >> 2);
++}
++
++static bfd_boolean
++riscv_pcrel_reloc_eq (const void *entry1, const void *entry2)
++{
++ const riscv_pcrel_hi_reloc *e1 = entry1, *e2 = entry2;
++ return e1->address == e2->address;
++}
++
++static bfd_boolean
++riscv_init_pcrel_relocs (riscv_pcrel_relocs *p)
++{
++
++ p->lo_relocs = NULL;
++ p->hi_relocs = htab_create (1024, riscv_pcrel_reloc_hash,
++ riscv_pcrel_reloc_eq, free);
++ return p->hi_relocs != NULL;
++}
++
++static void
++riscv_free_pcrel_relocs (riscv_pcrel_relocs *p)
++{
++ riscv_pcrel_lo_reloc *cur = p->lo_relocs;
++ while (cur != NULL)
++ {
++ riscv_pcrel_lo_reloc *next = cur->next;
++ free (cur);
++ cur = next;
++ }
++
++ htab_delete (p->hi_relocs);
++}
++
++static bfd_boolean
++riscv_record_pcrel_hi_reloc (riscv_pcrel_relocs *p, bfd_vma addr, bfd_vma value)
++{
++ riscv_pcrel_hi_reloc entry = {addr, value - addr};
++ riscv_pcrel_hi_reloc **slot =
++ (riscv_pcrel_hi_reloc **) htab_find_slot (p->hi_relocs, &entry, INSERT);
++ BFD_ASSERT (*slot == NULL);
++ *slot = (riscv_pcrel_hi_reloc *) bfd_malloc (sizeof (riscv_pcrel_hi_reloc));
++ if (*slot == NULL)
++ return FALSE;
++ **slot = entry;
++ return TRUE;
++}
++
++static bfd_boolean
++riscv_record_pcrel_lo_reloc (riscv_pcrel_relocs *p,
++ asection *input_section,
++ struct bfd_link_info *info,
++ reloc_howto_type *howto,
++ const Elf_Internal_Rela *reloc,
++ bfd_vma addr,
++ const char *name,
++ bfd_byte *contents)
++{
++ riscv_pcrel_lo_reloc *entry;
++ entry = (riscv_pcrel_lo_reloc *) bfd_malloc (sizeof (riscv_pcrel_lo_reloc));
++ if (entry == NULL)
++ return FALSE;
++ *entry = (riscv_pcrel_lo_reloc) {input_section, info, howto, reloc, addr,
++ name, contents, p->lo_relocs};
++ p->lo_relocs = entry;
++ return TRUE;
++}
++
++static bfd_boolean
++riscv_resolve_pcrel_lo_relocs (riscv_pcrel_relocs *p)
++{
++ riscv_pcrel_lo_reloc *r;
++ for (r = p->lo_relocs; r != NULL; r = r->next)
++ {
++ bfd *input_bfd = r->input_section->owner;
++ riscv_pcrel_hi_reloc search = {r->addr, 0};
++ riscv_pcrel_hi_reloc *entry = htab_find (p->hi_relocs, &search);
++ if (entry == NULL)
++ return ((*r->info->callbacks->reloc_overflow)
++ (r->info, NULL, r->name, r->howto->name, (bfd_vma) 0,
++ input_bfd, r->input_section, r->reloc->r_offset));
++
++ perform_relocation (r->howto, r->reloc, entry->value, r->input_section,
++ input_bfd, r->contents);
++ }
++
++ return TRUE;
++}
++
++/* Relocate a RISC-V ELF section.
++
++ The RELOCATE_SECTION function is called by the new ELF backend linker
++ to handle the relocations for a section.
++
++ The relocs are always passed as Rela structures.
++
++ This function is responsible for adjusting the section contents as
++ necessary, and (if generating a relocatable output file) adjusting
++ the reloc addend as necessary.
++
++ This function does not have to worry about setting the reloc
++ address or the reloc symbol index.
++
++ LOCAL_SYMS is a pointer to the swapped in local symbols.
++
++ LOCAL_SECTIONS is an array giving the section in the input file
++ corresponding to the st_shndx field of each local symbol.
++
++ The global hash table entry for the global symbols can be found
++ via elf_sym_hashes (input_bfd).
++
++ When generating relocatable output, this function must handle
++ STB_LOCAL/STT_SECTION symbols specially. The output symbol is
++ going to be the section symbol corresponding to the output
++ section, which means that the addend must be adjusted
++ accordingly. */
++
++static bfd_boolean
++riscv_elf_relocate_section (bfd *output_bfd, struct bfd_link_info *info,
++ bfd *input_bfd, asection *input_section,
++ bfd_byte *contents, Elf_Internal_Rela *relocs,
++ Elf_Internal_Sym *local_syms,
++ asection **local_sections)
++{
++ Elf_Internal_Rela *rel;
++ Elf_Internal_Rela *relend;
++ riscv_pcrel_relocs pcrel_relocs;
++ bfd_boolean ret = FALSE;
++ asection *sreloc = elf_section_data (input_section)->sreloc;
++ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (input_bfd);
++ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (input_bfd);
++ bfd_vma *local_got_offsets = elf_local_got_offsets (input_bfd);
++
++ if (!riscv_init_pcrel_relocs (&pcrel_relocs))
++ return FALSE;
++
++ relend = relocs + input_section->reloc_count;
++ for (rel = relocs; rel < relend; rel++)
++ {
++ unsigned long r_symndx;
++ struct elf_link_hash_entry *h;
++ Elf_Internal_Sym *sym;
++ asection *sec;
++ bfd_vma relocation;
++ bfd_reloc_status_type r = bfd_reloc_ok;
++ const char *name;
++ bfd_vma off, ie_off;
++ bfd_boolean unresolved_reloc, is_ie = FALSE;
++ bfd_vma pc = sec_addr (input_section) + rel->r_offset;
++ int r_type = ELFNN_R_TYPE (rel->r_info), tls_type;
++ reloc_howto_type *howto = riscv_elf_rtype_to_howto (r_type);
++ const char *msg = NULL;
++
++ if (r_type == R_RISCV_GNU_VTINHERIT || r_type == R_RISCV_GNU_VTENTRY)
++ continue;
++
++ /* This is a final link. */
++ r_symndx = ELFNN_R_SYM (rel->r_info);
++ h = NULL;
++ sym = NULL;
++ sec = NULL;
++ unresolved_reloc = FALSE;
++ if (r_symndx < symtab_hdr->sh_info)
++ {
++ sym = local_syms + r_symndx;
++ sec = local_sections[r_symndx];
++ relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
++ }
++ else
++ {
++ bfd_boolean warned, ignored;
++
++ RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
++ r_symndx, symtab_hdr, sym_hashes,
++ h, sec, relocation,
++ unresolved_reloc, warned, ignored);
++ if (warned)
++ {
++ /* To avoid generating warning messages about truncated
++ relocations, set the relocation's address to be the same as
++ the start of this section. */
++ if (input_section->output_section != NULL)
++ relocation = input_section->output_section->vma;
++ else
++ relocation = 0;
++ }
++ }
++
++ if (sec != NULL && discarded_section (sec))
++ RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
++ rel, 1, relend, howto, 0, contents);
++
++ if (info->relocatable)
++ continue;
++
++ if (h != NULL)
++ name = h->root.root.string;
++ else
++ {
++ name = (bfd_elf_string_from_elf_section
++ (input_bfd, symtab_hdr->sh_link, sym->st_name));
++ if (name == NULL || *name == '\0')
++ name = bfd_section_name (input_bfd, sec);
++ }
++
++ switch (r_type)
++ {
++ case R_RISCV_NONE:
++ case R_RISCV_TPREL_ADD:
++ case R_RISCV_COPY:
++ case R_RISCV_JUMP_SLOT:
++ case R_RISCV_RELATIVE:
++ /* These require nothing of us at all. */
++ continue;
++
++ case R_RISCV_BRANCH:
++ case R_RISCV_HI20:
++ /* These require no special handling beyond perform_relocation. */
++ break;
++
++ case R_RISCV_GOT_HI20:
++ if (h != NULL)
++ {
++ bfd_boolean dyn;
++
++ off = h->got.offset;
++ BFD_ASSERT (off != (bfd_vma) -1);
++ dyn = elf_hash_table (info)->dynamic_sections_created;
++
++ if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
++ || (info->shared
++ && SYMBOL_REFERENCES_LOCAL (info, h)))
++ {
++ /* This is actually a static link, or it is a
++ -Bsymbolic link and the symbol is defined
++ locally, or the symbol was forced to be local
++ because of a version file. We must initialize
++ this entry in the global offset table. Since the
++ offset must always be a multiple of the word size,
++ we use the least significant bit to record whether
++ we have initialized it already.
++
++ When doing a dynamic link, we create a .rela.got
++ relocation entry to initialize the value. This
++ is done in the finish_dynamic_symbol routine. */
++ if ((off & 1) != 0)
++ off &= ~1;
++ else
++ {
++ bfd_put_NN (output_bfd, relocation,
++ htab->elf.sgot->contents + off);
++ h->got.offset |= 1;
++ }
++ }
++ else
++ unresolved_reloc = FALSE;
++ }
++ else
++ {
++ BFD_ASSERT (local_got_offsets != NULL
++ && local_got_offsets[r_symndx] != (bfd_vma) -1);
++
++ off = local_got_offsets[r_symndx];
++
++ /* The offset must always be a multiple of 8 on 64-bit.
++ We use the least significant bit to record
++ whether we have already processed this entry. */
++ if ((off & 1) != 0)
++ off &= ~1;
++ else
++ {
++ if (info->shared)
++ {
++ asection *s;
++ Elf_Internal_Rela outrel;
++
++ /* We need to generate a R_RISCV_RELATIVE reloc
++ for the dynamic linker. */
++ s = htab->elf.srelgot;
++ BFD_ASSERT (s != NULL);
++
++ outrel.r_offset = sec_addr (htab->elf.sgot) + off;
++ outrel.r_info =
++ ELFNN_R_INFO (0, R_RISCV_RELATIVE);
++ outrel.r_addend = relocation;
++ relocation = 0;
++ riscv_elf_append_rela (output_bfd, s, &outrel);
++ }
++
++ bfd_put_NN (output_bfd, relocation,
++ htab->elf.sgot->contents + off);
++ local_got_offsets[r_symndx] |= 1;
++ }
++ }
++ relocation = sec_addr (htab->elf.sgot) + off;
++ if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
++ r = bfd_reloc_overflow;
++ break;
++
++ case R_RISCV_ADD8:
++ case R_RISCV_ADD16:
++ case R_RISCV_ADD32:
++ case R_RISCV_ADD64:
++ {
++ bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
++ contents + rel->r_offset);
++ relocation = old_value + relocation;
++ }
++ break;
++
++ case R_RISCV_SUB8:
++ case R_RISCV_SUB16:
++ case R_RISCV_SUB32:
++ case R_RISCV_SUB64:
++ {
++ bfd_vma old_value = bfd_get (howto->bitsize, input_bfd,
++ contents + rel->r_offset);
++ relocation = old_value - relocation;
++ }
++ break;
++
++ case R_RISCV_CALL_PLT:
++ case R_RISCV_CALL:
++ case R_RISCV_JAL:
++ if (info->shared && h != NULL && h->plt.offset != MINUS_ONE)
++ {
++ /* Refer to the PLT entry. */
++ relocation = sec_addr (htab->elf.splt) + h->plt.offset;
++ unresolved_reloc = FALSE;
++ }
++ break;
++
++ case R_RISCV_TPREL_HI20:
++ relocation = tpoff (info, relocation);
++ break;
++
++ case R_RISCV_TPREL_LO12_I:
++ case R_RISCV_TPREL_LO12_S:
++ relocation = tpoff (info, relocation);
++ if (VALID_ITYPE_IMM (relocation + rel->r_addend))
++ {
++ /* We can use tp as the base register. */
++ bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
++ insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
++ insn |= X_TP << OP_SH_RS1;
++ bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
++ }
++ break;
++
++ case R_RISCV_LO12_I:
++ case R_RISCV_LO12_S:
++ {
++ bfd_vma gp = riscv_global_pointer_value (info);
++ bfd_boolean x0_base = VALID_ITYPE_IMM (relocation + rel->r_addend);
++ if (x0_base || VALID_ITYPE_IMM (relocation + rel->r_addend - gp))
++ {
++ /* We can use x0 or gp as the base register. */
++ bfd_vma insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
++ insn &= ~(OP_MASK_RS1 << OP_SH_RS1);
++ if (!x0_base)
++ {
++ rel->r_addend -= gp;
++ insn |= X_GP << OP_SH_RS1;
++ }
++ bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
++ }
++ break;
++ }
++
++ case R_RISCV_PCREL_HI20:
++ if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc,
++ relocation + rel->r_addend))
++ r = bfd_reloc_overflow;
++ break;
++
++ case R_RISCV_PCREL_LO12_I:
++ case R_RISCV_PCREL_LO12_S:
++ if (riscv_record_pcrel_lo_reloc (&pcrel_relocs, input_section, info,
++ howto, rel, relocation, name,
++ contents))
++ continue;
++ r = bfd_reloc_overflow;
++ break;
++
++ case R_RISCV_TLS_DTPREL32:
++ case R_RISCV_TLS_DTPREL64:
++ relocation = dtpoff (info, relocation);
++ break;
++
++ case R_RISCV_32:
++ case R_RISCV_64:
++ if ((input_section->flags & SEC_ALLOC) == 0)
++ break;
++
++ if ((info->shared
++ && (h == NULL
++ || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
++ || h->root.type != bfd_link_hash_undefweak)
++ && (! howto->pc_relative
++ || !SYMBOL_CALLS_LOCAL (info, h)))
++ || (!info->shared
++ && h != NULL
++ && h->dynindx != -1
++ && !h->non_got_ref
++ && ((h->def_dynamic
++ && !h->def_regular)
++ || h->root.type == bfd_link_hash_undefweak
++ || h->root.type == bfd_link_hash_undefined)))
++ {
++ Elf_Internal_Rela outrel;
++ bfd_boolean skip_static_relocation, skip_dynamic_relocation;
++
++ /* When generating a shared object, these relocations
++ are copied into the output file to be resolved at run
++ time. */
++
++ outrel.r_offset =
++ _bfd_elf_section_offset (output_bfd, info, input_section,
++ rel->r_offset);
++ skip_static_relocation = outrel.r_offset != (bfd_vma) -2;
++ skip_dynamic_relocation = outrel.r_offset >= (bfd_vma) -2;
++ outrel.r_offset += sec_addr (input_section);
++
++ if (skip_dynamic_relocation)
++ memset (&outrel, 0, sizeof outrel);
++ else if (h != NULL && h->dynindx != -1
++ && !(info->shared
++ && SYMBOLIC_BIND (info, h)
++ && h->def_regular))
++ {
++ outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
++ outrel.r_addend = rel->r_addend;
++ }
++ else
++ {
++ outrel.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
++ outrel.r_addend = relocation + rel->r_addend;
++ }
++
++ riscv_elf_append_rela (output_bfd, sreloc, &outrel);
++ if (skip_static_relocation)
++ continue;
++ }
++ break;
++
++ case R_RISCV_TLS_GOT_HI20:
++ is_ie = TRUE;
++ /* Fall through. */
++
++ case R_RISCV_TLS_GD_HI20:
++ if (h != NULL)
++ {
++ off = h->got.offset;
++ h->got.offset |= 1;
++ }
++ else
++ {
++ off = local_got_offsets[r_symndx];
++ local_got_offsets[r_symndx] |= 1;
++ }
++
++ tls_type = _bfd_riscv_elf_tls_type (input_bfd, h, r_symndx);
++ BFD_ASSERT (tls_type & (GOT_TLS_IE | GOT_TLS_GD));
++ /* If this symbol is referenced by both GD and IE TLS, the IE
++ reference's GOT slot follows the GD reference's slots. */
++ ie_off = 0;
++ if ((tls_type & GOT_TLS_GD) && (tls_type & GOT_TLS_IE))
++ ie_off = 2 * GOT_ENTRY_SIZE;
++
++ if ((off & 1) != 0)
++ off &= ~1;
++ else
++ {
++ Elf_Internal_Rela outrel;
++ int indx = 0;
++ bfd_boolean need_relocs = FALSE;
++
++ if (htab->elf.srelgot == NULL)
++ abort ();
++
++ if (h != NULL)
++ {
++ bfd_boolean dyn;
++ dyn = htab->elf.dynamic_sections_created;
++
++ if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h)
++ && (!info->shared
++ || !SYMBOL_REFERENCES_LOCAL (info, h)))
++ {
++ indx = h->dynindx;
++ }
++ }
++
++ /* The GOT entries have not been initialized yet. Do it
++ now, and emit any relocations. */
++ if ((info->shared || indx != 0)
++ && (h == NULL
++ || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
++ || h->root.type != bfd_link_hash_undefweak))
++ need_relocs = TRUE;
++
++ if (tls_type & GOT_TLS_GD)
++ {
++ if (need_relocs)
++ {
++ outrel.r_offset = sec_addr (htab->elf.sgot) + off;
++ outrel.r_addend = 0;
++ outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPMODNN);
++ bfd_put_NN (output_bfd, 0,
++ htab->elf.sgot->contents + off);
++ riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
++ if (indx == 0)
++ {
++ BFD_ASSERT (! unresolved_reloc);
++ bfd_put_NN (output_bfd,
++ dtpoff (info, relocation),
++ (htab->elf.sgot->contents + off +
++ RISCV_ELF_WORD_BYTES));
++ }
++ else
++ {
++ bfd_put_NN (output_bfd, 0,
++ (htab->elf.sgot->contents + off +
++ RISCV_ELF_WORD_BYTES));
++ outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_DTPRELNN);
++ outrel.r_offset += RISCV_ELF_WORD_BYTES;
++ riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
++ }
++ }
++ else
++ {
++ /* If we are not emitting relocations for a
++ general dynamic reference, then we must be in a
++ static link or an executable link with the
++ symbol binding locally. Mark it as belonging
++ to module 1, the executable. */
++ bfd_put_NN (output_bfd, 1,
++ htab->elf.sgot->contents + off);
++ bfd_put_NN (output_bfd,
++ dtpoff (info, relocation),
++ (htab->elf.sgot->contents + off +
++ RISCV_ELF_WORD_BYTES));
++ }
++ }
++
++ if (tls_type & GOT_TLS_IE)
++ {
++ if (need_relocs)
++ {
++ bfd_put_NN (output_bfd, 0,
++ htab->elf.sgot->contents + off + ie_off);
++ outrel.r_offset = sec_addr (htab->elf.sgot)
++ + off + ie_off;
++ outrel.r_addend = 0;
++ if (indx == 0)
++ outrel.r_addend = tpoff (info, relocation);
++ outrel.r_info = ELFNN_R_INFO (indx, R_RISCV_TLS_TPRELNN);
++ riscv_elf_append_rela (output_bfd, htab->elf.srelgot, &outrel);
++ }
++ else
++ {
++ bfd_put_NN (output_bfd, tpoff (info, relocation),
++ htab->elf.sgot->contents + off + ie_off);
++ }
++ }
++ }
++
++ BFD_ASSERT (off < (bfd_vma) -2);
++ relocation = sec_addr (htab->elf.sgot) + off + (is_ie ? ie_off : 0);
++ if (!riscv_record_pcrel_hi_reloc (&pcrel_relocs, pc, relocation))
++ r = bfd_reloc_overflow;
++ unresolved_reloc = FALSE;
++ break;
++
++ default:
++ r = bfd_reloc_notsupported;
++ }
++
++ /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
++ because such sections are not SEC_ALLOC and thus ld.so will
++ not process them. */
++ if (unresolved_reloc
++ && !((input_section->flags & SEC_DEBUGGING) != 0
++ && h->def_dynamic)
++ && _bfd_elf_section_offset (output_bfd, info, input_section,
++ rel->r_offset) != (bfd_vma) -1)
++ {
++ (*_bfd_error_handler)
++ (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
++ input_bfd,
++ input_section,
++ (long) rel->r_offset,
++ howto->name,
++ h->root.root.string);
++ continue;
++ }
++
++ if (r == bfd_reloc_ok)
++ r = perform_relocation (howto, rel, relocation, input_section,
++ input_bfd, contents);
++
++ switch (r)
++ {
++ case bfd_reloc_ok:
++ continue;
++
++ case bfd_reloc_overflow:
++ r = info->callbacks->reloc_overflow
++ (info, (h ? &h->root : NULL), name, howto->name,
++ (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
++ break;
++
++ case bfd_reloc_undefined:
++ r = info->callbacks->undefined_symbol
++ (info, name, input_bfd, input_section, rel->r_offset,
++ TRUE);
++ break;
++
++ case bfd_reloc_outofrange:
++ msg = _("internal error: out of range error");
++ break;
++
++ case bfd_reloc_notsupported:
++ msg = _("internal error: unsupported relocation error");
++ break;
++
++ case bfd_reloc_dangerous:
++ msg = _("internal error: dangerous relocation");
++ break;
++
++ default:
++ msg = _("internal error: unknown error");
++ break;
++ }
++
++ if (msg)
++ r = info->callbacks->warning
++ (info, msg, name, input_bfd, input_section, rel->r_offset);
++ goto out;
++ }
++
++ ret = riscv_resolve_pcrel_lo_relocs (&pcrel_relocs);
++out:
++ riscv_free_pcrel_relocs (&pcrel_relocs);
++ return ret;
++}
++
++/* Finish up dynamic symbol handling. We set the contents of various
++ dynamic sections here. */
++
++static bfd_boolean
++riscv_elf_finish_dynamic_symbol (bfd *output_bfd,
++ struct bfd_link_info *info,
++ struct elf_link_hash_entry *h,
++ Elf_Internal_Sym *sym)
++{
++ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++ const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
++
++ if (h->plt.offset != (bfd_vma) -1)
++ {
++ /* We've decided to create a PLT entry for this symbol. */
++ bfd_byte *loc;
++ bfd_vma i, header_address, plt_idx, got_address;
++ uint32_t plt_entry[PLT_ENTRY_INSNS];
++ Elf_Internal_Rela rela;
++
++ BFD_ASSERT (h->dynindx != -1);
++
++ /* Calculate the address of the PLT header. */
++ header_address = sec_addr (htab->elf.splt);
++
++ /* Calculate the index of the entry. */
++ plt_idx = (h->plt.offset - PLT_HEADER_SIZE) / PLT_ENTRY_SIZE;
++
++ /* Calculate the address of the .got.plt entry. */
++ got_address = riscv_elf_got_plt_val (plt_idx, info);
++
++ /* Find out where the .plt entry should go. */
++ loc = htab->elf.splt->contents + h->plt.offset;
++
++ /* Fill in the PLT entry itself. */
++ riscv_make_plt_entry (got_address, header_address + h->plt.offset,
++ plt_entry);
++ for (i = 0; i < PLT_ENTRY_INSNS; i++)
++ bfd_put_32 (output_bfd, plt_entry[i], loc + 4*i);
++
++ /* Fill in the initial value of the .got.plt entry. */
++ loc = htab->elf.sgotplt->contents
++ + (got_address - sec_addr (htab->elf.sgotplt));
++ bfd_put_NN (output_bfd, sec_addr (htab->elf.splt), loc);
++
++ /* Fill in the entry in the .rela.plt section. */
++ rela.r_offset = got_address;
++ rela.r_addend = 0;
++ rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_JUMP_SLOT);
++
++ loc = htab->elf.srelplt->contents + plt_idx * sizeof (ElfNN_External_Rela);
++ bed->s->swap_reloca_out (output_bfd, &rela, loc);
++
++ if (!h->def_regular)
++ {
++ /* Mark the symbol as undefined, rather than as defined in
++ the .plt section. Leave the value alone. */
++ sym->st_shndx = SHN_UNDEF;
++ /* If the symbol is weak, we do need to clear the value.
++ Otherwise, the PLT entry would provide a definition for
++ the symbol even if the symbol wasn't defined anywhere,
++ and so the symbol would never be NULL. */
++ if (!h->ref_regular_nonweak)
++ sym->st_value = 0;
++ }
++ }
++
++ if (h->got.offset != (bfd_vma) -1
++ && !(riscv_elf_hash_entry(h)->tls_type & (GOT_TLS_GD | GOT_TLS_IE)))
++ {
++ asection *sgot;
++ asection *srela;
++ Elf_Internal_Rela rela;
++
++ /* This symbol has an entry in the GOT. Set it up. */
++
++ sgot = htab->elf.sgot;
++ srela = htab->elf.srelgot;
++ BFD_ASSERT (sgot != NULL && srela != NULL);
++
++ rela.r_offset = sec_addr (sgot) + (h->got.offset &~ (bfd_vma) 1);
++
++ /* If this is a -Bsymbolic link, and the symbol is defined
++ locally, we just want to emit a RELATIVE reloc. Likewise if
++ the symbol was forced to be local because of a version file.
++ The entry in the global offset table will already have been
++ initialized in the relocate_section function. */
++ if (info->shared
++ && (info->symbolic || h->dynindx == -1)
++ && h->def_regular)
++ {
++ asection *sec = h->root.u.def.section;
++ rela.r_info = ELFNN_R_INFO (0, R_RISCV_RELATIVE);
++ rela.r_addend = (h->root.u.def.value
++ + sec->output_section->vma
++ + sec->output_offset);
++ }
++ else
++ {
++ BFD_ASSERT (h->dynindx != -1);
++ rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_NN);
++ rela.r_addend = 0;
++ }
++
++ bfd_put_NN (output_bfd, 0,
++ sgot->contents + (h->got.offset & ~(bfd_vma) 1));
++ riscv_elf_append_rela (output_bfd, srela, &rela);
++ }
++
++ if (h->needs_copy)
++ {
++ Elf_Internal_Rela rela;
++
++ /* This symbols needs a copy reloc. Set it up. */
++ BFD_ASSERT (h->dynindx != -1);
++
++ rela.r_offset = sec_addr (h->root.u.def.section) + h->root.u.def.value;
++ rela.r_info = ELFNN_R_INFO (h->dynindx, R_RISCV_COPY);
++ rela.r_addend = 0;
++ riscv_elf_append_rela (output_bfd, htab->srelbss, &rela);
++ }
++
++ /* Mark some specially defined symbols as absolute. */
++ if (h == htab->elf.hdynamic
++ || (h == htab->elf.hgot || h == htab->elf.hplt))
++ sym->st_shndx = SHN_ABS;
++
++ return TRUE;
++}
++
++/* Finish up the dynamic sections. */
++
++static bfd_boolean
++riscv_finish_dyn (bfd *output_bfd, struct bfd_link_info *info,
++ bfd *dynobj, asection *sdyn)
++{
++ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++ const struct elf_backend_data *bed = get_elf_backend_data (output_bfd);
++ size_t dynsize = bed->s->sizeof_dyn;
++ bfd_byte *dyncon, *dynconend;
++
++ dynconend = sdyn->contents + sdyn->size;
++ for (dyncon = sdyn->contents; dyncon < dynconend; dyncon += dynsize)
++ {
++ Elf_Internal_Dyn dyn;
++ asection *s;
++
++ bed->s->swap_dyn_in (dynobj, dyncon, &dyn);
++
++ switch (dyn.d_tag)
++ {
++ case DT_PLTGOT:
++ s = htab->elf.sgotplt;
++ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
++ break;
++ case DT_JMPREL:
++ s = htab->elf.srelplt;
++ dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
++ break;
++ case DT_PLTRELSZ:
++ s = htab->elf.srelplt;
++ dyn.d_un.d_val = s->size;
++ break;
++ default:
++ continue;
++ }
++
++ bed->s->swap_dyn_out (output_bfd, &dyn, dyncon);
++ }
++ return TRUE;
++}
++
++static bfd_boolean
++riscv_elf_finish_dynamic_sections (bfd *output_bfd,
++ struct bfd_link_info *info)
++{
++ bfd *dynobj;
++ asection *sdyn;
++ struct riscv_elf_link_hash_table *htab;
++
++ htab = riscv_elf_hash_table (info);
++ BFD_ASSERT (htab != NULL);
++ dynobj = htab->elf.dynobj;
++
++ sdyn = bfd_get_linker_section (dynobj, ".dynamic");
++
++ if (elf_hash_table (info)->dynamic_sections_created)
++ {
++ asection *splt;
++ bfd_boolean ret;
++
++ splt = htab->elf.splt;
++ BFD_ASSERT (splt != NULL && sdyn != NULL);
++
++ ret = riscv_finish_dyn (output_bfd, info, dynobj, sdyn);
++
++ if (ret != TRUE)
++ return ret;
++
++ /* Fill in the head and tail entries in the procedure linkage table. */
++ if (splt->size > 0)
++ {
++ int i;
++ uint32_t plt_header[PLT_HEADER_INSNS];
++ riscv_make_plt0_entry (sec_addr (htab->elf.sgotplt),
++ sec_addr (splt), plt_header);
++
++ for (i = 0; i < PLT_HEADER_INSNS; i++)
++ bfd_put_32 (output_bfd, plt_header[i], splt->contents + 4*i);
++ }
++
++ elf_section_data (splt->output_section)->this_hdr.sh_entsize
++ = PLT_ENTRY_SIZE;
++ }
++
++ if (htab->elf.sgotplt)
++ {
++ if (bfd_is_abs_section (htab->elf.sgotplt->output_section))
++ {
++ (*_bfd_error_handler)
++ (_("discarded output section: `%A'"), htab->elf.sgotplt);
++ return FALSE;
++ }
++
++ if (htab->elf.sgotplt->size > 0)
++ {
++ /* Write the first two entries in .got.plt, needed for the dynamic
++ linker. */
++ bfd_put_NN (output_bfd, (bfd_vma) -1, htab->elf.sgotplt->contents);
++ bfd_put_NN (output_bfd, (bfd_vma) 0,
++ htab->elf.sgotplt->contents + GOT_ENTRY_SIZE);
++ }
++
++ elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize =
++ GOT_ENTRY_SIZE;
++ }
++
++ if (htab->elf.sgot)
++ {
++ if (htab->elf.sgot->size > 0)
++ {
++ /* Set the first entry in the global offset table to the address of
++ the dynamic section. */
++ bfd_vma val = sdyn ? sec_addr (sdyn) : 0;
++ bfd_put_NN (output_bfd, val, htab->elf.sgot->contents);
++ }
++
++ elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize =
++ GOT_ENTRY_SIZE;
++ }
++
++ return TRUE;
++}
++
++/* Return address for Ith PLT stub in section PLT, for relocation REL
++ or (bfd_vma) -1 if it should not be included. */
++
++static bfd_vma
++riscv_elf_plt_sym_val (bfd_vma i, const asection *plt,
++ const arelent *rel ATTRIBUTE_UNUSED)
++{
++ return plt->vma + PLT_HEADER_SIZE + i * PLT_ENTRY_SIZE;
++}
++
++static enum elf_reloc_type_class
++riscv_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
++ const asection *rel_sec ATTRIBUTE_UNUSED,
++ const Elf_Internal_Rela *rela)
++{
++ switch (ELFNN_R_TYPE (rela->r_info))
++ {
++ case R_RISCV_RELATIVE:
++ return reloc_class_relative;
++ case R_RISCV_JUMP_SLOT:
++ return reloc_class_plt;
++ case R_RISCV_COPY:
++ return reloc_class_copy;
++ default:
++ return reloc_class_normal;
++ }
++}
++
++/* Return true if bfd machine EXTENSION is an extension of machine BASE. */
++
++static bfd_boolean
++riscv_mach_extends_p (unsigned long base, unsigned long extension)
++{
++ return extension == base;
++}
++
++/* Merge backend specific data from an object file to the output
++ object file when linking. */
++
++static bfd_boolean
++_bfd_riscv_elf_merge_private_bfd_data (bfd *ibfd, bfd *obfd)
++{
++ flagword old_flags;
++ flagword new_flags;
++
++ if (!is_riscv_elf (ibfd) || !is_riscv_elf (obfd))
++ return TRUE;
++
++ if (strcmp (bfd_get_target (ibfd), bfd_get_target (obfd)) != 0)
++ {
++ (*_bfd_error_handler)
++ (_("%B: ABI is incompatible with that of the selected emulation"),
++ ibfd);
++ return FALSE;
++ }
++
++ if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
++ return FALSE;
++
++ new_flags = elf_elfheader (ibfd)->e_flags;
++ old_flags = elf_elfheader (obfd)->e_flags;
++
++ if (! elf_flags_init (obfd))
++ {
++ elf_flags_init (obfd) = TRUE;
++ elf_elfheader (obfd)->e_flags = new_flags;
++ elf_elfheader (obfd)->e_ident[EI_CLASS]
++ = elf_elfheader (ibfd)->e_ident[EI_CLASS];
++
++ if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
++ && (bfd_get_arch_info (obfd)->the_default
++ || riscv_mach_extends_p (bfd_get_mach (obfd),
++ bfd_get_mach (ibfd))))
++ {
++ if (! bfd_set_arch_mach (obfd, bfd_get_arch (ibfd),
++ bfd_get_mach (ibfd)))
++ return FALSE;
++ }
++
++ return TRUE;
++ }
++
++ /* Check flag compatibility. */
++
++ if (new_flags == old_flags)
++ return TRUE;
++
++ /* Don't link RV32 and RV64. */
++ if (elf_elfheader (ibfd)->e_ident[EI_CLASS]
++ != elf_elfheader (obfd)->e_ident[EI_CLASS])
++ {
++ (*_bfd_error_handler)
++ (_("%B: ELF class mismatch: can't link 32- and 64-bit modules"), ibfd);
++ goto fail;
++ }
++
++ /* Warn about any other mismatches. */
++ if (new_flags != old_flags)
++ {
++ if (!EF_IS_RISCV_EXT_Xcustom (new_flags) &&
++ !EF_IS_RISCV_EXT_Xcustom (old_flags))
++ {
++ (*_bfd_error_handler)
++ (_("%B: uses different e_flags (0x%lx) fields than previous modules (0x%lx)"),
++ ibfd, (unsigned long) new_flags,
++ (unsigned long) old_flags);
++ goto fail;
++ }
++ else if (EF_IS_RISCV_EXT_Xcustom(new_flags))
++ EF_SET_RISCV_EXT (elf_elfheader (obfd)->e_flags,
++ EF_GET_RISCV_EXT (old_flags));
++ }
++
++ return TRUE;
++
++fail:
++ bfd_set_error (bfd_error_bad_value);
++ return FALSE;
++}
++
++/* Delete some bytes from a section while relaxing. */
++
++static bfd_boolean
++riscv_relax_delete_bytes (bfd *abfd, asection *sec, bfd_vma addr, size_t count)
++{
++ unsigned int i, symcount;
++ bfd_vma toaddr = sec->size;
++ struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (abfd);
++ Elf_Internal_Shdr *symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
++ unsigned int sec_shndx = _bfd_elf_section_from_bfd_section (abfd, sec);
++ struct bfd_elf_section_data *data = elf_section_data (sec);
++ bfd_byte *contents = data->this_hdr.contents;
++
++ /* Actually delete the bytes. */
++ sec->size -= count;
++ memmove (contents + addr, contents + addr + count, toaddr - addr - count);
++
++ /* Adjust the location of all of the relocs. Note that we need not
++ adjust the addends, since all PC-relative references must be against
++ symbols, which we will adjust below. */
++ for (i = 0; i < sec->reloc_count; i++)
++ if (data->relocs[i].r_offset > addr && data->relocs[i].r_offset < toaddr)
++ data->relocs[i].r_offset -= count;
++
++ /* Adjust the local symbols defined in this section. */
++ for (i = 0; i < symtab_hdr->sh_info; i++)
++ {
++ Elf_Internal_Sym *sym = (Elf_Internal_Sym *) symtab_hdr->contents + i;
++ if (sym->st_shndx == sec_shndx)
++ {
++ /* If the symbol is in the range of memory we just moved, we
++ have to adjust its value. */
++ if (sym->st_value > addr && sym->st_value <= toaddr)
++ sym->st_value -= count;
++
++ /* If the symbol *spans* the bytes we just deleted (i.e. its
++ *end* is in the moved bytes but its *start* isn't), then we
++ must adjust its size. */
++ if (sym->st_value <= addr
++ && sym->st_value + sym->st_size > addr
++ && sym->st_value + sym->st_size <= toaddr)
++ sym->st_size -= count;
++ }
++ }
++
++ /* Now adjust the global symbols defined in this section. */
++ symcount = ((symtab_hdr->sh_size / sizeof(ElfNN_External_Sym))
++ - symtab_hdr->sh_info);
++
++ for (i = 0; i < symcount; i++)
++ {
++ struct elf_link_hash_entry *sym_hash = sym_hashes[i];
++
++ if ((sym_hash->root.type == bfd_link_hash_defined
++ || sym_hash->root.type == bfd_link_hash_defweak)
++ && sym_hash->root.u.def.section == sec)
++ {
++ /* As above, adjust the value if needed. */
++ if (sym_hash->root.u.def.value > addr
++ && sym_hash->root.u.def.value <= toaddr)
++ sym_hash->root.u.def.value -= count;
++
++ /* As above, adjust the size if needed. */
++ if (sym_hash->root.u.def.value <= addr
++ && sym_hash->root.u.def.value + sym_hash->size > addr
++ && sym_hash->root.u.def.value + sym_hash->size <= toaddr)
++ sym_hash->size -= count;
++ }
++ }
++
++ return TRUE;
++}
++
++/* Relax AUIPC + JALR into JAL. */
++
++static bfd_boolean
++_bfd_riscv_relax_call (bfd *abfd, asection *sec,
++ struct bfd_link_info *link_info,
++ Elf_Internal_Rela *rel,
++ bfd_vma symval,
++ bfd_boolean *again)
++{
++ bfd_byte *contents = elf_section_data (sec)->this_hdr.contents;
++ bfd_signed_vma foff = symval - (sec_addr (sec) + rel->r_offset);
++ bfd_boolean near_zero = !link_info->shared && symval < RISCV_IMM_REACH/2;
++ bfd_vma auipc, jalr;
++ int r_type;
++
++ /* See if this function call can be shortened. */
++ if (!VALID_UJTYPE_IMM (foff) && !near_zero)
++ return TRUE;
++
++ /* Shorten the function call. */
++ BFD_ASSERT (rel->r_offset + 8 <= sec->size);
++
++ auipc = bfd_get_32 (abfd, contents + rel->r_offset);
++ jalr = bfd_get_32 (abfd, contents + rel->r_offset + 4);
++
++ if (VALID_UJTYPE_IMM (foff))
++ {
++ /* Relax to JAL rd, addr. */
++ r_type = R_RISCV_JAL;
++ auipc = (jalr & (OP_MASK_RD << OP_SH_RD)) | MATCH_JAL;
++ }
++ else /* near_zero */
++ {
++ /* Relax to JALR rd, x0, addr. */
++ r_type = R_RISCV_LO12_I;
++ auipc = (jalr & (OP_MASK_RD << OP_SH_RD)) | MATCH_JALR;
++ }
++
++ /* Replace the R_RISCV_CALL reloc. */
++ rel->r_info = ELFNN_R_INFO (ELFNN_R_SYM (rel->r_info), r_type);
++ /* Replace the AUIPC. */
++ bfd_put_32 (abfd, auipc, contents + rel->r_offset);
++
++ /* Delete unnecessary JALR. */
++ *again = TRUE;
++ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset + 4, 4);
++}
++
++/* Relax non-PIC global variable references. */
++
++static bfd_boolean
++_bfd_riscv_relax_lui (bfd *abfd, asection *sec,
++ struct bfd_link_info *link_info,
++ Elf_Internal_Rela *rel,
++ bfd_vma symval,
++ bfd_boolean *again)
++{
++ bfd_vma gp = riscv_global_pointer_value (link_info);
++
++ /* Bail out if this symbol isn't in range of either gp or x0. */
++ if (!VALID_ITYPE_IMM (symval - gp) && !(symval < RISCV_IMM_REACH/2))
++ return TRUE;
++
++ /* We can delete the unnecessary AUIPC. The corresponding LO12 reloc
++ will be converted to GPREL during relocation. */
++ BFD_ASSERT (rel->r_offset + 4 <= sec->size);
++ rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
++
++ *again = TRUE;
++ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
++}
++
++/* Relax non-PIC TLS references. */
++
++static bfd_boolean
++_bfd_riscv_relax_tls_le (bfd *abfd, asection *sec,
++ struct bfd_link_info *link_info,
++ Elf_Internal_Rela *rel,
++ bfd_vma symval,
++ bfd_boolean *again)
++{
++ /* See if this symbol is in range of tp. */
++ if (RISCV_CONST_HIGH_PART (tpoff (link_info, symval)) != 0)
++ return TRUE;
++
++ /* We can delete the unnecessary LUI and tp add. The LO12 reloc will be
++ made directly tp-relative. */
++ BFD_ASSERT (rel->r_offset + 4 <= sec->size);
++ rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
++
++ *again = TRUE;
++ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset, 4);
++}
++
++/* Implement R_RISCV_ALIGN by deleting excess alignment NOPs. */
++
++static bfd_boolean
++_bfd_riscv_relax_align (bfd *abfd, asection *sec,
++ struct bfd_link_info *link_info ATTRIBUTE_UNUSED,
++ Elf_Internal_Rela *rel,
++ bfd_vma symval,
++ bfd_boolean *again ATTRIBUTE_UNUSED)
++{
++ bfd_vma alignment = 1;
++ while (alignment <= rel->r_addend)
++ alignment *= 2;
++
++ symval -= rel->r_addend;
++ bfd_vma aligned_addr = ((symval - 1) & ~(alignment - 1)) + alignment;
++ bfd_vma nop_bytes_needed = aligned_addr - symval;
++
++ /* Make sure there are enough NOPs to actually achieve the alignment. */
++ if (rel->r_addend < nop_bytes_needed)
++ return FALSE;
++
++ /* Delete the reloc. */
++ rel->r_info = ELFNN_R_INFO (0, R_RISCV_NONE);
++
++ /* If the number of NOPs is already correct, there's nothing to do. */
++ if (nop_bytes_needed == rel->r_addend)
++ return TRUE;
++
++ /* Delete the excess NOPs. */
++ return riscv_relax_delete_bytes (abfd, sec, rel->r_offset,
++ rel->r_addend - nop_bytes_needed);
++}
++
++/* Relax a section. Pass 0 shortens code sequences unless disabled.
++ Pass 1, which cannot be disabled, handles code alignment directives. */
++
++static bfd_boolean
++_bfd_riscv_relax_section (bfd *abfd, asection *sec,
++ struct bfd_link_info *info, bfd_boolean *again)
++{
++ Elf_Internal_Shdr *symtab_hdr = &elf_symtab_hdr (abfd);
++ struct riscv_elf_link_hash_table *htab = riscv_elf_hash_table (info);
++ struct bfd_elf_section_data *data = elf_section_data (sec);
++ Elf_Internal_Rela *relocs;
++ bfd_boolean ret = FALSE;
++ unsigned int i;
++
++ *again = FALSE;
++
++ if (info->relocatable
++ || (sec->flags & SEC_RELOC) == 0
++ || sec->reloc_count == 0
++ || (info->disable_target_specific_optimizations
++ && info->relax_pass == 0))
++ return TRUE;
++
++ /* Read this BFD's relocs if we haven't done so already. */
++ if (data->relocs)
++ relocs = data->relocs;
++ else if (!(relocs = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL,
++ info->keep_memory)))
++ goto fail;
++
++ /* Examine and consider relaxing each reloc. */
++ for (i = 0; i < sec->reloc_count; i++)
++ {
++ Elf_Internal_Rela *rel = data->relocs + i;
++ typeof(&_bfd_riscv_relax_call) relax_func = NULL;
++ int type = ELFNN_R_TYPE (rel->r_info);
++ bfd_vma symval;
++
++ if (info->relax_pass == 0)
++ {
++ if (type == R_RISCV_CALL || type == R_RISCV_CALL_PLT)
++ relax_func = _bfd_riscv_relax_call;
++ else if (type == R_RISCV_HI20)
++ relax_func = _bfd_riscv_relax_lui;
++ else if (type == R_RISCV_TPREL_HI20 || type == R_RISCV_TPREL_ADD)
++ relax_func = _bfd_riscv_relax_tls_le;
++ }
++ else if (type == R_RISCV_ALIGN)
++ relax_func = _bfd_riscv_relax_align;
++
++ if (!relax_func)
++ continue;
++
++ data->relocs = relocs;
++
++ /* Read this BFD's contents if we haven't done so already. */
++ if (!data->this_hdr.contents
++ && !bfd_malloc_and_get_section (abfd, sec, &data->this_hdr.contents))
++ goto fail;
++
++ /* Read this BFD's symbols if we haven't done so already. */
++ if (symtab_hdr->sh_info != 0
++ && !symtab_hdr->contents
++ && !(symtab_hdr->contents =
++ (unsigned char *) bfd_elf_get_elf_syms (abfd, symtab_hdr,
++ symtab_hdr->sh_info,
++ 0, NULL, NULL, NULL)))
++ goto fail;
++
++ /* Get the value of the symbol referred to by the reloc. */
++ if (ELFNN_R_SYM (rel->r_info) < symtab_hdr->sh_info)
++ {
++ /* A local symbol. */
++ Elf_Internal_Sym *isym = ((Elf_Internal_Sym *) symtab_hdr->contents
++ + ELFNN_R_SYM (rel->r_info));
++
++ if (isym->st_shndx == SHN_UNDEF)
++ symval = sec_addr (sec) + rel->r_offset;
++ else
++ {
++ asection *isec;
++ BFD_ASSERT (isym->st_shndx < elf_numsections (abfd));
++ isec = elf_elfsections (abfd)[isym->st_shndx]->bfd_section;
++ if (sec_addr (isec) == 0)
++ continue;
++ symval = sec_addr (isec) + isym->st_value;
++ }
++ }
++ else
++ {
++ unsigned long indx;
++ struct elf_link_hash_entry *h;
++
++ indx = ELFNN_R_SYM (rel->r_info) - symtab_hdr->sh_info;
++ h = elf_sym_hashes (abfd)[indx];
++
++ while (h->root.type == bfd_link_hash_indirect
++ || h->root.type == bfd_link_hash_warning)
++ h = (struct elf_link_hash_entry *) h->root.u.i.link;
++
++ if (h->plt.offset != MINUS_ONE)
++ symval = sec_addr (htab->elf.splt) + h->plt.offset;
++ else if (h->root.type == bfd_link_hash_undefweak)
++ symval = 0;
++ else if (h->root.u.def.section->output_section == NULL
++ || (h->root.type != bfd_link_hash_defined
++ && h->root.type != bfd_link_hash_defweak))
++ continue;
++ else
++ symval = sec_addr (h->root.u.def.section) + h->root.u.def.value;
++ }
++
++ symval += rel->r_addend;
++
++ if (!relax_func (abfd, sec, info, rel, symval, again))
++ goto fail;
++ }
++
++ ret = TRUE;
++
++fail:
++ if (relocs != data->relocs)
++ free (relocs);
++
++ return ret;
++}
++
++#define ELF_ARCH bfd_arch_riscv
++#define ELF_TARGET_ID RISCV_ELF_DATA
++#define ELF_MACHINE_CODE EM_RISCV
++#define ELF_MAXPAGESIZE 0x2000
++#define ELF_COMMONPAGESIZE 0x2000
++
++#define TARGET_LITTLE_SYM riscv_elfNN_vec
++#define TARGET_LITTLE_NAME "elfNN-littleriscv"
++
++#define elf_backend_reloc_type_class riscv_reloc_type_class
++
++#define bfd_elfNN_bfd_reloc_name_lookup riscv_reloc_name_lookup
++#define bfd_elfNN_bfd_link_hash_table_create riscv_elf_link_hash_table_create
++#define bfd_elfNN_bfd_reloc_type_lookup riscv_reloc_type_lookup
++#define bfd_elfNN_bfd_merge_private_bfd_data \
++ _bfd_riscv_elf_merge_private_bfd_data
++
++#define elf_backend_copy_indirect_symbol riscv_elf_copy_indirect_symbol
++#define elf_backend_create_dynamic_sections riscv_elf_create_dynamic_sections
++#define elf_backend_check_relocs riscv_elf_check_relocs
++#define elf_backend_adjust_dynamic_symbol riscv_elf_adjust_dynamic_symbol
++#define elf_backend_size_dynamic_sections riscv_elf_size_dynamic_sections
++#define elf_backend_relocate_section riscv_elf_relocate_section
++#define elf_backend_finish_dynamic_symbol riscv_elf_finish_dynamic_symbol
++#define elf_backend_finish_dynamic_sections riscv_elf_finish_dynamic_sections
++#define elf_backend_gc_mark_hook riscv_elf_gc_mark_hook
++#define elf_backend_gc_sweep_hook riscv_elf_gc_sweep_hook
++#define elf_backend_plt_sym_val riscv_elf_plt_sym_val
++#define elf_info_to_howto_rel NULL
++#define elf_info_to_howto riscv_info_to_howto_rela
++#define bfd_elfNN_bfd_relax_section _bfd_riscv_relax_section
++
++#define elf_backend_init_index_section _bfd_elf_init_1_index_section
++
++#define elf_backend_can_gc_sections 1
++#define elf_backend_can_refcount 1
++#define elf_backend_want_got_plt 1
++#define elf_backend_plt_readonly 1
++#define elf_backend_plt_alignment 4
++#define elf_backend_want_plt_sym 1
++#define elf_backend_got_header_size (ARCH_SIZE / 8)
++#define elf_backend_rela_normal 1
++#define elf_backend_default_execstack 0
++
++#include "elfNN-target.h"
+diff -urN original-binutils/bfd/elfxx-riscv.c binutils/bfd/elfxx-riscv.c
+--- original-binutils/bfd/elfxx-riscv.c 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/bfd/elfxx-riscv.c 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,730 @@
++/* RISC-V-specific support for ELF.
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on TILE-Gx and MIPS targets.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "bfd.h"
++#include "libbfd.h"
++#include "elf-bfd.h"
++#include "elf/riscv.h"
++#include "opcode/riscv.h"
++#include "libiberty.h"
++#include "elfxx-riscv.h"
++#include <stdint.h>
++
++#define MINUS_ONE ((bfd_vma)0 - 1)
++
++/* The relocation table used for SHT_RELA sections. */
++
++static reloc_howto_type howto_table[] =
++{
++ /* No relocation. */
++ HOWTO (R_RISCV_NONE, /* type */
++ 0, /* rightshift */
++ 0, /* size (0 = byte, 1 = short, 2 = long) */
++ 0, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_NONE", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 32 bit relocation. */
++ HOWTO (R_RISCV_32, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_32", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 64 bit relocation. */
++ HOWTO (R_RISCV_64, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_64", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* Relocation against a local symbol in a shared object. */
++ HOWTO (R_RISCV_RELATIVE, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_RELATIVE", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ 0xffffffff, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_COPY, /* type */
++ 0, /* rightshift */
++ 0, /* this one is variable size */
++ 0, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_COPY", /* name */
++ FALSE, /* partial_inplace */
++ 0x0, /* src_mask */
++ 0x0, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_JUMP_SLOT, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_bitfield, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_JUMP_SLOT", /* name */
++ FALSE, /* partial_inplace */
++ 0x0, /* src_mask */
++ 0x0, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* Dynamic TLS relocations. */
++ HOWTO (R_RISCV_TLS_DTPMOD32, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_DTPMOD32", /* name */
++ FALSE, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_TLS_DTPMOD64, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_DTPMOD64", /* name */
++ FALSE, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_TLS_DTPREL32, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_DTPREL32", /* name */
++ TRUE, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_TLS_DTPREL64, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_DTPREL64", /* name */
++ TRUE, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_TLS_TPREL32, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_TPREL32", /* name */
++ FALSE, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ HOWTO (R_RISCV_TLS_TPREL64, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_TPREL64", /* name */
++ FALSE, /* partial_inplace */
++ MINUS_ONE, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ EMPTY_HOWTO (12),
++ EMPTY_HOWTO (13),
++ EMPTY_HOWTO (14),
++ EMPTY_HOWTO (15),
++
++ /* 12-bit PC-relative branch offset. */
++ HOWTO (R_RISCV_BRANCH, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_BRANCH", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_SBTYPE_IMM(-1U),/* dst_mask */
++ TRUE), /* pcrel_offset */
++
++ /* 20-bit PC-relative jump offset. */
++ HOWTO (R_RISCV_JAL, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ /* This needs complex overflow
++ detection, because the upper 36
++ bits must match the PC + 4. */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_JAL", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UJTYPE_IMM(-1U), /* dst_mask */
++ TRUE), /* pcrel_offset */
++
++ /* 32-bit PC-relative function call (AUIPC/JALR). */
++ HOWTO (R_RISCV_CALL, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_CALL", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U) | ((bfd_vma) ENCODE_ITYPE_IMM(-1U) << 32), /* dst_mask */
++ TRUE), /* pcrel_offset */
++
++ /* 32-bit PC-relative function call (AUIPC/JALR). */
++ HOWTO (R_RISCV_CALL_PLT, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_CALL_PLT", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U) | ((bfd_vma) ENCODE_ITYPE_IMM(-1U) << 32), /* dst_mask */
++ TRUE), /* pcrel_offset */
++
++ /* High 20 bits of 32-bit PC-relative GOT access. */
++ HOWTO (R_RISCV_GOT_HI20, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_GOT_HI20", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 20 bits of 32-bit PC-relative TLS IE GOT access. */
++ HOWTO (R_RISCV_TLS_GOT_HI20, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_GOT_HI20", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 20 bits of 32-bit PC-relative TLS GD GOT reference. */
++ HOWTO (R_RISCV_TLS_GD_HI20, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TLS_GD_HI20", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 20 bits of 32-bit PC-relative reference. */
++ HOWTO (R_RISCV_PCREL_HI20, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ TRUE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_PCREL_HI20", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U), /* dst_mask */
++ TRUE), /* pcrel_offset */
++
++ /* Low 12 bits of a 32-bit PC-relative load or add. */
++ HOWTO (R_RISCV_PCREL_LO12_I, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_PCREL_LO12_I",/* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_ITYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* Low 12 bits of a 32-bit PC-relative store. */
++ HOWTO (R_RISCV_PCREL_LO12_S, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_PCREL_LO12_S",/* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_STYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 20 bits of 32-bit absolute address. */
++ HOWTO (R_RISCV_HI20, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_HI20", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 12 bits of 32-bit load or add. */
++ HOWTO (R_RISCV_LO12_I, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_LO12_I", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_ITYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 12 bits of 32-bit store. */
++ HOWTO (R_RISCV_LO12_S, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_LO12_S", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_STYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* High 20 bits of TLS LE thread pointer offset. */
++ HOWTO (R_RISCV_TPREL_HI20, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TPREL_HI20", /* name */
++ TRUE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_UTYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* Low 12 bits of TLS LE thread pointer offset for loads and adds. */
++ HOWTO (R_RISCV_TPREL_LO12_I, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TPREL_LO12_I",/* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_ITYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* Low 12 bits of TLS LE thread pointer offset for stores. */
++ HOWTO (R_RISCV_TPREL_LO12_S, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_signed, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TPREL_LO12_S",/* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ ENCODE_STYPE_IMM(-1U), /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* TLS LE thread pointer usage. */
++ HOWTO (R_RISCV_TPREL_ADD, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont,/* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_TPREL_ADD", /* name */
++ TRUE, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 8-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_ADD8, /* type */
++ 0, /* rightshift */
++ 0, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_ADD8", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 16-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_ADD16, /* type */
++ 0, /* rightshift */
++ 1, /* size (0 = byte, 1 = short, 2 = long) */
++ 16, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_ADD16", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 32-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_ADD32, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_ADD32", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 64-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_ADD64, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_ADD64", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 8-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_SUB8, /* type */
++ 0, /* rightshift */
++ 0, /* size (0 = byte, 1 = short, 2 = long) */
++ 8, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_SUB8", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 16-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_SUB16, /* type */
++ 0, /* rightshift */
++ 1, /* size (0 = byte, 1 = short, 2 = long) */
++ 16, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_SUB16", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 32-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_SUB32, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 32, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_SUB32", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* 64-bit in-place addition, for local label subtraction. */
++ HOWTO (R_RISCV_SUB64, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 64, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_SUB64", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ MINUS_ONE, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* GNU extension to record C++ vtable hierarchy */
++ HOWTO (R_RISCV_GNU_VTINHERIT, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 0, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont,/* complain_on_overflow */
++ NULL, /* special_function */
++ "R_RISCV_GNU_VTINHERIT", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* GNU extension to record C++ vtable member usage */
++ HOWTO (R_RISCV_GNU_VTENTRY, /* type */
++ 0, /* rightshift */
++ 4, /* size (0 = byte, 1 = short, 2 = long) */
++ 0, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont,/* complain_on_overflow */
++ _bfd_elf_rel_vtable_reloc_fn, /* special_function */
++ "R_RISCV_GNU_VTENTRY", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ FALSE), /* pcrel_offset */
++
++ /* Indicates an alignment statement. The addend field encodes how many
++ bytes of NOPs follow the statement. The desired alignment is the
++ addend rounded up to the next power of two. */
++ HOWTO (R_RISCV_ALIGN, /* type */
++ 0, /* rightshift */
++ 2, /* size (0 = byte, 1 = short, 2 = long) */
++ 0, /* bitsize */
++ FALSE, /* pc_relative */
++ 0, /* bitpos */
++ complain_overflow_dont, /* complain_on_overflow */
++ bfd_elf_generic_reloc, /* special_function */
++ "R_RISCV_ALIGN", /* name */
++ FALSE, /* partial_inplace */
++ 0, /* src_mask */
++ 0, /* dst_mask */
++ TRUE), /* pcrel_offset */
++};
++
++/* A mapping from BFD reloc types to RISC-V ELF reloc types. */
++
++struct elf_reloc_map {
++ bfd_reloc_code_real_type bfd_val;
++ enum elf_riscv_reloc_type elf_val;
++};
++
++static const struct elf_reloc_map riscv_reloc_map[] =
++{
++ { BFD_RELOC_NONE, R_RISCV_NONE },
++ { BFD_RELOC_32, R_RISCV_32 },
++ { BFD_RELOC_64, R_RISCV_64 },
++ { BFD_RELOC_RISCV_ADD8, R_RISCV_ADD8 },
++ { BFD_RELOC_RISCV_ADD16, R_RISCV_ADD16 },
++ { BFD_RELOC_RISCV_ADD32, R_RISCV_ADD32 },
++ { BFD_RELOC_RISCV_ADD64, R_RISCV_ADD64 },
++ { BFD_RELOC_RISCV_SUB8, R_RISCV_SUB8 },
++ { BFD_RELOC_RISCV_SUB16, R_RISCV_SUB16 },
++ { BFD_RELOC_RISCV_SUB32, R_RISCV_SUB32 },
++ { BFD_RELOC_RISCV_SUB64, R_RISCV_SUB64 },
++ { BFD_RELOC_CTOR, R_RISCV_64 },
++ { BFD_RELOC_12_PCREL, R_RISCV_BRANCH },
++ { BFD_RELOC_RISCV_HI20, R_RISCV_HI20 },
++ { BFD_RELOC_RISCV_LO12_I, R_RISCV_LO12_I },
++ { BFD_RELOC_RISCV_LO12_S, R_RISCV_LO12_S },
++ { BFD_RELOC_RISCV_PCREL_LO12_I, R_RISCV_PCREL_LO12_I },
++ { BFD_RELOC_RISCV_PCREL_LO12_S, R_RISCV_PCREL_LO12_S },
++ { BFD_RELOC_RISCV_CALL, R_RISCV_CALL },
++ { BFD_RELOC_RISCV_CALL_PLT, R_RISCV_CALL_PLT },
++ { BFD_RELOC_RISCV_PCREL_HI20, R_RISCV_PCREL_HI20 },
++ { BFD_RELOC_RISCV_JMP, R_RISCV_JAL },
++ { BFD_RELOC_RISCV_GOT_HI20, R_RISCV_GOT_HI20 },
++ { BFD_RELOC_RISCV_TLS_DTPMOD32, R_RISCV_TLS_DTPMOD32 },
++ { BFD_RELOC_RISCV_TLS_DTPREL32, R_RISCV_TLS_DTPREL32 },
++ { BFD_RELOC_RISCV_TLS_DTPMOD64, R_RISCV_TLS_DTPMOD64 },
++ { BFD_RELOC_RISCV_TLS_DTPREL64, R_RISCV_TLS_DTPREL64 },
++ { BFD_RELOC_RISCV_TLS_TPREL32, R_RISCV_TLS_TPREL32 },
++ { BFD_RELOC_RISCV_TLS_TPREL64, R_RISCV_TLS_TPREL64 },
++ { BFD_RELOC_RISCV_TPREL_HI20, R_RISCV_TPREL_HI20 },
++ { BFD_RELOC_RISCV_TPREL_ADD, R_RISCV_TPREL_ADD },
++ { BFD_RELOC_RISCV_TPREL_LO12_S, R_RISCV_TPREL_LO12_S },
++ { BFD_RELOC_RISCV_TPREL_LO12_I, R_RISCV_TPREL_LO12_I },
++ { BFD_RELOC_RISCV_TLS_GOT_HI20, R_RISCV_TLS_GOT_HI20 },
++ { BFD_RELOC_RISCV_TLS_GD_HI20, R_RISCV_TLS_GD_HI20 },
++ { BFD_RELOC_RISCV_ALIGN, R_RISCV_ALIGN },
++};
++
++/* Given a BFD reloc type, return a howto structure. */
++
++reloc_howto_type *
++riscv_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ bfd_reloc_code_real_type code)
++{
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE (riscv_reloc_map); i++)
++ if (riscv_reloc_map[i].bfd_val == code)
++ return &howto_table[(int) riscv_reloc_map[i].elf_val];
++
++ bfd_set_error (bfd_error_bad_value);
++ return NULL;
++}
++
++reloc_howto_type *
++riscv_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
++ const char *r_name)
++{
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE (howto_table); i++)
++ if (howto_table[i].name && strcasecmp (howto_table[i].name, r_name) == 0)
++ return &howto_table[i];
++
++ return NULL;
++}
++
++reloc_howto_type *
++riscv_elf_rtype_to_howto (unsigned int r_type)
++{
++ if ((unsigned int)r_type >= ARRAY_SIZE (howto_table))
++ {
++ (*_bfd_error_handler)(_("unrecognized relocation (0x%x)"), r_type);
++ bfd_set_error (bfd_error_bad_value);
++ return NULL;
++ }
++ return &howto_table[r_type];
++}
+diff -urN original-binutils/bfd/elfxx-riscv.h binutils/bfd/elfxx-riscv.h
+--- original-binutils/bfd/elfxx-riscv.h 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/bfd/elfxx-riscv.h 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,34 @@
++/* RISC-V ELF specific backend routines.
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "elf/common.h"
++#include "elf/internal.h"
++
++extern reloc_howto_type *
++riscv_reloc_name_lookup (bfd *, const char *);
++
++extern reloc_howto_type *
++riscv_reloc_type_lookup (bfd *, bfd_reloc_code_real_type);
++
++extern reloc_howto_type *
++riscv_elf_rtype_to_howto (unsigned int r_type);
+diff -urN original-binutils/bfd/Makefile.am binutils/bfd/Makefile.am
+--- original-binutils/bfd/Makefile.am 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/bfd/Makefile.am 2015-03-07 09:55:02.371135671 +0100
+@@ -931,6 +931,18 @@
+ sed -e s/NN/64/g < $(srcdir)/elfnn-ia64.c > elf64-ia64.new
+ mv -f elf64-ia64.new elf64-ia64.c
+
++elf32-riscv.c : elfnn-riscv.c
++ rm -f elf32-riscv.c
++ echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf32-riscv.new
++ sed -e s/NN/32/g < $(srcdir)/elfnn-riscv.c >> elf32-riscv.new
++ mv -f elf32-riscv.new elf32-riscv.c
++
++elf64-riscv.c : elfnn-riscv.c
++ rm -f elf64-riscv.c
++ echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf64-riscv.new
++ sed -e s/NN/64/g < $(srcdir)/elfnn-riscv.c >> elf64-riscv.new
++ mv -f elf64-riscv.new elf64-riscv.c
++
+ peigen.c : peXXigen.c
+ rm -f peigen.c
+ sed -e s/XX/pe/g < $(srcdir)/peXXigen.c > peigen.new
+diff -urN original-binutils/bfd/Makefile.in binutils/bfd/Makefile.in
+--- original-binutils/bfd/Makefile.in 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/bfd/Makefile.in 2015-03-07 09:55:02.371135671 +0100
+@@ -2009,6 +2009,18 @@
+ sed -e s/NN/64/g < $(srcdir)/elfnn-ia64.c > elf64-ia64.new
+ mv -f elf64-ia64.new elf64-ia64.c
+
++elf32-riscv.c : elfnn-riscv.c
++ rm -f elf32-riscv.c
++ echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf32-riscv.new
++ sed -e s/NN/32/g < $(srcdir)/elfnn-riscv.c >> elf32-riscv.new
++ mv -f elf32-riscv.new elf32-riscv.c
++
++elf64-riscv.c : elfnn-riscv.c
++ rm -f elf64-riscv.c
++ echo "#line 1 \"$(srcdir)/elfnn-riscv.c\"" > elf64-riscv.new
++ sed -e s/NN/64/g < $(srcdir)/elfnn-riscv.c >> elf64-riscv.new
++ mv -f elf64-riscv.new elf64-riscv.c
++
+ peigen.c : peXXigen.c
+ rm -f peigen.c
+ sed -e s/XX/pe/g < $(srcdir)/peXXigen.c > peigen.new
+diff -urN original-binutils/bfd/targets.c binutils/bfd/targets.c
+--- original-binutils/bfd/targets.c 2014-11-04 10:54:41.000000000 +0100
++++ binutils-2.25/bfd/targets.c 2015-03-07 09:55:02.371135671 +0100
+@@ -784,6 +784,8 @@
+ extern const bfd_target powerpc_pei_vec;
+ extern const bfd_target powerpc_pei_le_vec;
+ extern const bfd_target powerpc_xcoff_vec;
++extern const bfd_target riscv_elf32_vec;
++extern const bfd_target riscv_elf64_vec;
+ extern const bfd_target rl78_elf32_vec;
+ extern const bfd_target rs6000_xcoff64_vec;
+ extern const bfd_target rs6000_xcoff64_aix_vec;
+diff -urN original-binutils/binutils/readelf.c binutils/binutils/readelf.c
+--- original-binutils/binutils/readelf.c 2014-12-23 09:47:10.000000000 +0100
++++ binutils-2.25/binutils/readelf.c 2015-03-07 09:55:02.375135671 +0100
+@@ -125,6 +125,7 @@
+ #include "elf/metag.h"
+ #include "elf/microblaze.h"
+ #include "elf/mips.h"
++#include "elf/riscv.h"
+ #include "elf/mmix.h"
+ #include "elf/mn10200.h"
+ #include "elf/mn10300.h"
+@@ -720,6 +721,7 @@
+ case EM_OR1K:
+ case EM_PPC64:
+ case EM_PPC:
++ case EM_RISCV:
+ case EM_RL78:
+ case EM_RX:
+ case EM_S390:
+@@ -1252,6 +1254,10 @@
+ rtype = elf_mips_reloc_type (type);
+ break;
+
++ case EM_RISCV:
++ rtype = elf_riscv_reloc_type (type);
++ break;
++
+ case EM_ALPHA:
+ rtype = elf_alpha_reloc_type (type);
+ break;
+@@ -2164,6 +2170,7 @@
+ case EM_CR16:
+ case EM_MICROBLAZE:
+ case EM_MICROBLAZE_OLD: return "Xilinx MicroBlaze";
++ case EM_RISCV: return "RISC-V";
+ case EM_RL78: return "Renesas RL78";
+ case EM_RX: return "Renesas RX";
+ case EM_METAG: return "Imagination Technologies Meta processor architecture";
+@@ -2951,6 +2958,14 @@
+ decode_NDS32_machine_flags (e_flags, buf, sizeof buf);
+ break;
+
++ case EM_RISCV:
++ {
++ unsigned int riscv_extension = EF_GET_RISCV_EXT(e_flags);
++ strcat (buf, ", ");
++ strcat (buf, riscv_elf_flag_to_name (riscv_extension));
++ }
++ break;
++
+ case EM_SH:
+ switch ((e_flags & EF_SH_MACH_MASK))
+ {
+@@ -10789,6 +10804,8 @@
+ return reloc_type == 1; /* R_PPC64_ADDR32. */
+ case EM_PPC:
+ return reloc_type == 1; /* R_PPC_ADDR32. */
++ case EM_RISCV:
++ return reloc_type == 1; /* R_RISCV_32. */
+ case EM_RL78:
+ return reloc_type == 1; /* R_RL78_DIR32. */
+ case EM_RX:
+@@ -10924,6 +10941,8 @@
+ return reloc_type == 80; /* R_PARISC_DIR64. */
+ case EM_PPC64:
+ return reloc_type == 38; /* R_PPC64_ADDR64. */
++ case EM_RISCV:
++ return reloc_type == 2; /* R_RISCV_64. */
+ case EM_SPARC32PLUS:
+ case EM_SPARCV9:
+ case EM_SPARC:
+@@ -11072,6 +11091,7 @@
+ case EM_ADAPTEVA_EPIPHANY:
+ case EM_PPC: /* R_PPC_NONE. */
+ case EM_PPC64: /* R_PPC64_NONE. */
++ case EM_RISCV: /* R_RISCV_NONE. */
+ case EM_ARM: /* R_ARM_NONE. */
+ case EM_IA_64: /* R_IA64_NONE. */
+ case EM_SH: /* R_SH_NONE. */
+diff -urN original-binutils/config.sub binutils/config.sub
+--- original-binutils/config.sub 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/config.sub 2015-03-07 09:55:02.375135671 +0100
+@@ -335,6 +335,9 @@
+ ms1)
+ basic_machine=mt-unknown
+ ;;
++ riscv)
++ basic_machine=riscv-ucb
++ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+diff -urN original-binutils/gas/config/tc-riscv.c binutils/gas/config/tc-riscv.c
+--- original-binutils/gas/config/tc-riscv.c 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/gas/config/tc-riscv.c 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,2225 @@
++/* tc-riscv.c -- RISC-V assembler
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target.
++
++ This file is part of GAS.
++
++ GAS is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GAS is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GAS; see the file COPYING. If not, write to the Free
++ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
++ 02110-1301, USA. */
++
++#include "as.h"
++#include "config.h"
++#include "subsegs.h"
++#include "safe-ctype.h"
++
++#include "itbl-ops.h"
++#include "dwarf2dbg.h"
++#include "dw2gencfi.h"
++
++#include "elf/riscv.h"
++#include "opcode/riscv.h"
++
++#include <execinfo.h>
++#include <stdint.h>
++
++/* Information about an instruction, including its format, operands
++ and fixups. */
++struct riscv_cl_insn
++{
++ /* The opcode's entry in riscv_opcodes. */
++ const struct riscv_opcode *insn_mo;
++
++ /* The encoded instruction bits. */
++ insn_t insn_opcode;
++
++ /* The frag that contains the instruction. */
++ struct frag *frag;
++
++ /* The offset into FRAG of the first instruction byte. */
++ long where;
++
++ /* The relocs associated with the instruction, if any. */
++ fixS *fixp;
++};
++
++bfd_boolean rv64 = TRUE; /* RV64 (true) or RV32 (false) */
++#define LOAD_ADDRESS_INSN (rv64 ? "ld" : "lw")
++#define ADD32_INSN (rv64 ? "addiw" : "addi")
++
++struct riscv_subset
++{
++ const char* name;
++ int version_major;
++ int version_minor;
++
++ struct riscv_subset* next;
++};
++
++static struct riscv_subset* riscv_subsets;
++
++static int
++riscv_subset_supports(const char* feature)
++{
++ struct riscv_subset* s;
++ bfd_boolean rv64_insn;
++
++ if ((rv64_insn = !strncmp(feature, "64", 2)) || !strncmp(feature, "32", 2))
++ {
++ if (rv64 != rv64_insn)
++ return 0;
++ feature += 2;
++ }
++
++ for (s = riscv_subsets; s != NULL; s = s->next)
++ if (strcmp(s->name, feature) == 0)
++ /* FIXME: once we support version numbers:
++ return major == s->version_major && minor <= s->version_minor; */
++ return 1;
++
++ return 0;
++}
++
++static void
++riscv_add_subset(const char* subset)
++{
++ struct riscv_subset* s = xmalloc(sizeof(struct riscv_subset));
++ s->name = xstrdup(subset);
++ s->version_major = 1;
++ s->version_minor = 0;
++ s->next = riscv_subsets;
++ riscv_subsets = s;
++}
++
++static void
++riscv_set_arch(const char* arg)
++{
++ /* Formally, ISA subset names begin with RV, RV32, or RV64, but we allow the
++ prefix to be omitted. We also allow all-lowercase names if version
++ numbers and eXtensions are omitted (i.e. only some combination of imafd
++ is supported in this case).
++
++ FIXME: Version numbers are not supported yet. */
++ const char* subsets = "IMAFD";
++ const char* p;
++
++ for (p = arg; *p; p++)
++ if (!ISLOWER(*p) || strchr(subsets, TOUPPER(*p)) == NULL)
++ break;
++
++ if (!*p)
++ {
++ /* Legal all-lowercase name. */
++ for (p = arg; *p; p++)
++ {
++ char subset[2] = {TOUPPER(*p), 0};
++ riscv_add_subset(subset);
++ }
++ return;
++ }
++
++ if (strncmp(arg, "RV32", 4) == 0)
++ {
++ rv64 = FALSE;
++ arg += 4;
++ }
++ else if (strncmp(arg, "RV64", 4) == 0)
++ {
++ rv64 = TRUE;
++ arg += 4;
++ }
++ else if (strncmp(arg, "RV", 2) == 0)
++ arg += 2;
++
++ if (*arg && *arg != 'I')
++ as_fatal("`I' must be the first ISA subset name specified (got %c)", *arg);
++
++ for (p = arg; *p; p++)
++ {
++ if (*p == 'X')
++ {
++ const char* q = p+1;
++ while (ISLOWER(*q))
++ q++;
++
++ char subset[q-p+1];
++ memcpy(subset, p, q-p);
++ subset[q-p] = 0;
++
++ riscv_add_subset(subset);
++ p = q-1;
++ }
++ else if (strchr(subsets, *p) != NULL)
++ {
++ char subset[2] = {*p, 0};
++ riscv_add_subset(subset);
++ }
++ else
++ as_fatal("unsupported ISA subset %c", *p);
++ }
++}
++
++/* This is the set of options which may be modified by the .set
++ pseudo-op. We use a struct so that .set push and .set pop are more
++ reliable. */
++
++struct riscv_set_options
++{
++ /* Generate position-independent code. */
++ int pic;
++ /* Generate RVC code. */
++ int rvc;
++};
++
++static struct riscv_set_options riscv_opts =
++{
++ 0, /* pic */
++ 0, /* rvc */
++};
++
++/* handle of the OPCODE hash table */
++static struct hash_control *op_hash = NULL;
++
++/* This array holds the chars that always start a comment. If the
++ pre-processor is disabled, these aren't very useful */
++const char comment_chars[] = "#";
++
++/* This array holds the chars that only start a comment at the beginning of
++ a line. If the line seems to have the form '# 123 filename'
++ .line and .file directives will appear in the pre-processed output */
++/* Note that input_file.c hand checks for '#' at the beginning of the
++ first line of the input file. This is because the compiler outputs
++ #NO_APP at the beginning of its output. */
++/* Also note that C style comments are always supported. */
++const char line_comment_chars[] = "#";
++
++/* This array holds machine specific line separator characters. */
++const char line_separator_chars[] = ";";
++
++/* Chars that can be used to separate mant from exp in floating point nums */
++const char EXP_CHARS[] = "eE";
++
++/* Chars that mean this number is a floating point constant */
++/* As in 0f12.456 */
++/* or 0d1.2345e12 */
++const char FLT_CHARS[] = "rRsSfFdDxXpP";
++
++/* Also be aware that MAXIMUM_NUMBER_OF_CHARS_FOR_FLOAT may have to be
++ changed in read.c . Ideally it shouldn't have to know about it at all,
++ but nothing is ideal around here.
++ */
++
++static char *insn_error;
++
++#define RELAX_BRANCH_ENCODE(uncond, toofar) \
++ ((relax_substateT) \
++ (0xc0000000 \
++ | ((toofar) ? 1 : 0) \
++ | ((uncond) ? 2 : 0)))
++#define RELAX_BRANCH_P(i) (((i) & 0xf0000000) == 0xc0000000)
++#define RELAX_BRANCH_TOOFAR(i) (((i) & 1) != 0)
++#define RELAX_BRANCH_UNCOND(i) (((i) & 2) != 0)
++
++/* Is the given value a sign-extended 32-bit value? */
++#define IS_SEXT_32BIT_NUM(x) \
++ (((x) &~ (offsetT) 0x7fffffff) == 0 \
++ || (((x) &~ (offsetT) 0x7fffffff) == ~ (offsetT) 0x7fffffff))
++
++#define IS_SEXT_NBIT_NUM(x,n) \
++ ({ int64_t __tmp = (x); \
++ __tmp = (__tmp << (64-(n))) >> (64-(n)); \
++ __tmp == (x); })
++
++/* Is the given value a zero-extended 32-bit value? Or a negated one? */
++#define IS_ZEXT_32BIT_NUM(x) \
++ (((x) &~ (offsetT) 0xffffffff) == 0 \
++ || (((x) &~ (offsetT) 0xffffffff) == ~ (offsetT) 0xffffffff))
++
++/* Replace bits MASK << SHIFT of STRUCT with the equivalent bits in
++ VALUE << SHIFT. VALUE is evaluated exactly once. */
++#define INSERT_BITS(STRUCT, VALUE, MASK, SHIFT) \
++ (STRUCT) = (((STRUCT) & ~((insn_t)(MASK) << (SHIFT))) \
++ | ((insn_t)((VALUE) & (MASK)) << (SHIFT)))
++
++/* Extract bits MASK << SHIFT from STRUCT and shift them right
++ SHIFT places. */
++#define EXTRACT_BITS(STRUCT, MASK, SHIFT) \
++ (((STRUCT) >> (SHIFT)) & (MASK))
++
++/* Change INSN's opcode so that the operand given by FIELD has value VALUE.
++ INSN is a riscv_cl_insn structure and VALUE is evaluated exactly once. */
++#define INSERT_OPERAND(FIELD, INSN, VALUE) \
++ INSERT_BITS ((INSN).insn_opcode, VALUE, OP_MASK_##FIELD, OP_SH_##FIELD)
++
++/* Extract the operand given by FIELD from riscv_cl_insn INSN. */
++#define EXTRACT_OPERAND(FIELD, INSN) \
++ EXTRACT_BITS ((INSN).insn_opcode, OP_MASK_##FIELD, OP_SH_##FIELD)
++
++/* Determine if an instruction matches an opcode. */
++#define OPCODE_MATCHES(OPCODE, OP) \
++ (((OPCODE) & MASK_##OP) == MATCH_##OP)
++
++#define INSN_MATCHES(INSN, OP) \
++ (((INSN).insn_opcode & MASK_##OP) == MATCH_##OP)
++
++/* Prototypes for static functions. */
++
++#define internalError() \
++ as_fatal (_("internal Error, line %d, %s"), __LINE__, __FILE__)
++
++static char *expr_end;
++
++/* Expressions which appear in instructions. These are set by
++ riscv_ip. */
++
++static expressionS imm_expr;
++static expressionS offset_expr;
++
++/* Relocs associated with imm_expr and offset_expr. */
++
++static bfd_reloc_code_real_type imm_reloc = BFD_RELOC_UNUSED;
++static bfd_reloc_code_real_type offset_reloc = BFD_RELOC_UNUSED;
++
++/* The default target format to use. */
++
++const char *
++riscv_target_format (void)
++{
++ return rv64 ? "elf64-littleriscv" : "elf32-littleriscv";
++}
++
++/* Return the length of instruction INSN. */
++
++static inline unsigned int
++insn_length (const struct riscv_cl_insn *insn)
++{
++ return riscv_insn_length (insn->insn_opcode);
++}
++
++/* Initialise INSN from opcode entry MO. Leave its position unspecified. */
++
++static void
++create_insn (struct riscv_cl_insn *insn, const struct riscv_opcode *mo)
++{
++ insn->insn_mo = mo;
++ insn->insn_opcode = mo->match;
++ insn->frag = NULL;
++ insn->where = 0;
++ insn->fixp = NULL;
++}
++
++/* Install INSN at the location specified by its "frag" and "where" fields. */
++
++static void
++install_insn (const struct riscv_cl_insn *insn)
++{
++ char *f = insn->frag->fr_literal + insn->where;
++ md_number_to_chars (f, insn->insn_opcode, insn_length(insn));
++}
++
++/* Move INSN to offset WHERE in FRAG. Adjust the fixups accordingly
++ and install the opcode in the new location. */
++
++static void
++move_insn (struct riscv_cl_insn *insn, fragS *frag, long where)
++{
++ insn->frag = frag;
++ insn->where = where;
++ if (insn->fixp != NULL)
++ {
++ insn->fixp->fx_frag = frag;
++ insn->fixp->fx_where = where;
++ }
++ install_insn (insn);
++}
++
++/* Add INSN to the end of the output. */
++
++static void
++add_fixed_insn (struct riscv_cl_insn *insn)
++{
++ char *f = frag_more (insn_length (insn));
++ move_insn (insn, frag_now, f - frag_now->fr_literal);
++}
++
++static void
++add_relaxed_insn (struct riscv_cl_insn *insn, int max_chars, int var,
++ relax_substateT subtype, symbolS *symbol, offsetT offset)
++{
++ frag_grow (max_chars);
++ move_insn (insn, frag_now, frag_more (0) - frag_now->fr_literal);
++ frag_var (rs_machine_dependent, max_chars, var,
++ subtype, symbol, offset, NULL);
++}
++
++/* Compute the length of a branch sequence, and adjust the
++ RELAX_BRANCH_TOOFAR bit accordingly. If FRAGP is NULL, the
++ worst-case length is computed. */
++static int
++relaxed_branch_length (fragS *fragp, asection *sec, int update)
++{
++ bfd_boolean toofar = TRUE;
++
++ if (fragp)
++ {
++ bfd_boolean uncond = RELAX_BRANCH_UNCOND (fragp->fr_subtype);
++
++ if (S_IS_DEFINED (fragp->fr_symbol)
++ && sec == S_GET_SEGMENT (fragp->fr_symbol))
++ {
++ offsetT val = S_GET_VALUE (fragp->fr_symbol) + fragp->fr_offset;
++ bfd_vma range;
++ val -= fragp->fr_address + fragp->fr_fix;
++
++ if (uncond)
++ range = RISCV_JUMP_REACH;
++ else
++ range = RISCV_BRANCH_REACH;
++ toofar = (bfd_vma)(val + range/2) >= range;
++ }
++
++ if (update && toofar != RELAX_BRANCH_TOOFAR (fragp->fr_subtype))
++ fragp->fr_subtype = RELAX_BRANCH_ENCODE (uncond, toofar);
++ }
++
++ return toofar ? 8 : 4;
++}
++
++struct regname {
++ const char *name;
++ unsigned int num;
++};
++
++enum reg_class {
++ RCLASS_GPR,
++ RCLASS_FPR,
++ RCLASS_CSR,
++ RCLASS_VEC_GPR,
++ RCLASS_VEC_FPR,
++ RCLASS_MAX
++};
++
++static struct hash_control *reg_names_hash = NULL;
++
++#define ENCODE_REG_HASH(cls, n) (void*)(uintptr_t)((n)*RCLASS_MAX + (cls) + 1)
++#define DECODE_REG_CLASS(hash) (((uintptr_t)(hash) - 1) % RCLASS_MAX)
++#define DECODE_REG_NUM(hash) (((uintptr_t)(hash) - 1) / RCLASS_MAX)
++
++static void
++hash_reg_name (enum reg_class class, const char *name, unsigned n)
++{
++ void *hash = ENCODE_REG_HASH (class, n);
++ const char *retval = hash_insert (reg_names_hash, name, hash);
++ if (retval != NULL)
++ as_fatal (_("internal error: can't hash `%s': %s"), name, retval);
++}
++
++static void
++hash_reg_names (enum reg_class class, const char * const names[], unsigned n)
++{
++ unsigned i;
++ for (i = 0; i < n; i++)
++ hash_reg_name (class, names[i], i);
++}
++
++static unsigned int
++reg_lookup_internal (const char *s, enum reg_class class)
++{
++ struct regname *r = (struct regname *) hash_find (reg_names_hash, s);
++ if (r == NULL || DECODE_REG_CLASS (r) != class)
++ return -1;
++ return DECODE_REG_NUM (r);
++}
++
++static int
++reg_lookup (char **s, enum reg_class class, unsigned int *regnop)
++{
++ char *e;
++ char save_c;
++ int reg = -1;
++
++ /* Find end of name. */
++ e = *s;
++ if (is_name_beginner (*e))
++ ++e;
++ while (is_part_of_name (*e))
++ ++e;
++
++ /* Terminate name. */
++ save_c = *e;
++ *e = '\0';
++
++ /* Look for the register. Advance to next token if one was recognized. */
++ if ((reg = reg_lookup_internal (*s, class)) >= 0)
++ *s = e;
++
++ *e = save_c;
++ if (regnop)
++ *regnop = reg;
++ return reg >= 0;
++}
++
++static int
++arg_lookup(char **s, const char* const* array, size_t size, unsigned *regnop)
++{
++ const char *p = strchr(*s, ',');
++ size_t i, len = p ? (size_t)(p - *s) : strlen(*s);
++
++ for (i = 0; i < size; i++)
++ if (array[i] != NULL && strncmp(array[i], *s, len) == 0)
++ {
++ *regnop = i;
++ *s += len;
++ return 1;
++ }
++
++ return 0;
++}
++
++/* For consistency checking, verify that all bits are specified either
++ by the match/mask part of the instruction definition, or by the
++ operand list. */
++static int
++validate_riscv_insn (const struct riscv_opcode *opc)
++{
++ const char *p = opc->args;
++ char c;
++ insn_t required_bits, used_bits = opc->mask;
++
++ if ((used_bits & opc->match) != opc->match)
++ {
++ as_bad (_("internal: bad RISC-V opcode (mask error): %s %s"),
++ opc->name, opc->args);
++ return 0;
++ }
++ required_bits = ((insn_t)1 << (8 * riscv_insn_length (opc->match))) - 1;
++ /* Work around for undefined behavior of uint64_t << 64 */
++ if(riscv_insn_length (opc->match) == 8)
++ required_bits = 0xffffffffffffffff;
++
++#define USE_BITS(mask,shift) (used_bits |= ((insn_t)(mask) << (shift)))
++ while (*p)
++ switch (c = *p++)
++ {
++ /* Xcustom */
++ case '^':
++ switch (c = *p++)
++ {
++ case 'd': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
++ case 's': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
++ case 't': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
++ case 'j': USE_BITS (OP_MASK_CUSTOM_IMM, OP_SH_CUSTOM_IMM); break;
++ }
++ break;
++ /* Xhwacha */
++ case '#':
++ switch (c = *p++)
++ {
++ case 'g': USE_BITS (OP_MASK_IMMNGPR, OP_SH_IMMNGPR); break;
++ case 'f': USE_BITS (OP_MASK_IMMNFPR, OP_SH_IMMNFPR); break;
++ case 'n': USE_BITS (OP_MASK_IMMSEGNELM, OP_SH_IMMSEGNELM); break;
++ case 'd': USE_BITS (OP_MASK_VRD, OP_SH_VRD); break;
++ case 's': USE_BITS (OP_MASK_VRS, OP_SH_VRS); break;
++ case 't': USE_BITS (OP_MASK_VRT, OP_SH_VRT); break;
++ case 'r': USE_BITS (OP_MASK_VRR, OP_SH_VRR); break;
++ case 'D': USE_BITS (OP_MASK_VFD, OP_SH_VFD); break;
++ case 'S': USE_BITS (OP_MASK_VFS, OP_SH_VFS); break;
++ case 'T': USE_BITS (OP_MASK_VFT, OP_SH_VFT); break;
++ case 'R': USE_BITS (OP_MASK_VFR, OP_SH_VFR); break;
++
++ default:
++ as_bad (_("internal: bad RISC-V opcode (unknown extension operand type `#%c'): %s %s"),
++ c, opc->name, opc->args);
++ return 0;
++ }
++ break;
++ case ',': break;
++ case '(': break;
++ case ')': break;
++ case '<': USE_BITS (OP_MASK_SHAMTW, OP_SH_SHAMTW); break;
++ case '>': USE_BITS (OP_MASK_SHAMT, OP_SH_SHAMT); break;
++ case 'A': break;
++ case 'D': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
++ case 'Z': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
++ case 'E': USE_BITS (OP_MASK_CSR, OP_SH_CSR); break;
++ case 'I': break;
++ case 'R': USE_BITS (OP_MASK_RS3, OP_SH_RS3); break;
++ case 'S': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
++ case 'U': USE_BITS (OP_MASK_RS1, OP_SH_RS1); /* fallthru */
++ case 'T': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
++ case 'd': USE_BITS (OP_MASK_RD, OP_SH_RD); break;
++ case 'm': USE_BITS (OP_MASK_RM, OP_SH_RM); break;
++ case 's': USE_BITS (OP_MASK_RS1, OP_SH_RS1); break;
++ case 't': USE_BITS (OP_MASK_RS2, OP_SH_RS2); break;
++ case 'P': USE_BITS (OP_MASK_PRED, OP_SH_PRED); break;
++ case 'Q': USE_BITS (OP_MASK_SUCC, OP_SH_SUCC); break;
++ case 'o':
++ case 'j': used_bits |= ENCODE_ITYPE_IMM(-1U); break;
++ case 'a': used_bits |= ENCODE_UJTYPE_IMM(-1U); break;
++ case 'p': used_bits |= ENCODE_SBTYPE_IMM(-1U); break;
++ case 'q': used_bits |= ENCODE_STYPE_IMM(-1U); break;
++ case 'u': used_bits |= ENCODE_UTYPE_IMM(-1U); break;
++ case '[': break;
++ case ']': break;
++ case '0': break;
++ default:
++ as_bad (_("internal: bad RISC-V opcode (unknown operand type `%c'): %s %s"),
++ c, opc->name, opc->args);
++ return 0;
++ }
++#undef USE_BITS
++ if (used_bits != required_bits)
++ {
++ as_bad (_("internal: bad RISC-V opcode (bits 0x%lx undefined): %s %s"),
++ ~(long)(used_bits & required_bits), opc->name, opc->args);
++ return 0;
++ }
++ return 1;
++}
++
++struct percent_op_match
++{
++ const char *str;
++ bfd_reloc_code_real_type reloc;
++};
++
++/* This function is called once, at assembler startup time. It should set up
++ all the tables, etc. that the MD part of the assembler will need. */
++
++void
++md_begin (void)
++{
++ const char *retval = NULL;
++ int i = 0;
++
++ if (! bfd_set_arch_mach (stdoutput, bfd_arch_riscv, 0))
++ as_warn (_("Could not set architecture and machine"));
++
++ op_hash = hash_new ();
++
++ for (i = 0; i < NUMOPCODES;)
++ {
++ const char *name = riscv_opcodes[i].name;
++
++ if (riscv_subset_supports(riscv_opcodes[i].subset))
++ retval = hash_insert (op_hash, name, (void *) &riscv_opcodes[i]);
++
++ if (retval != NULL)
++ {
++ fprintf (stderr, _("internal error: can't hash `%s': %s\n"),
++ riscv_opcodes[i].name, retval);
++ /* Probably a memory allocation problem? Give up now. */
++ as_fatal (_("Broken assembler. No assembly attempted."));
++ }
++ do
++ {
++ if (riscv_opcodes[i].pinfo != INSN_MACRO)
++ {
++ if (!validate_riscv_insn (&riscv_opcodes[i]))
++ as_fatal (_("Broken assembler. No assembly attempted."));
++ }
++ ++i;
++ }
++ while ((i < NUMOPCODES) && !strcmp (riscv_opcodes[i].name, name));
++ }
++
++ reg_names_hash = hash_new ();
++ hash_reg_names (RCLASS_GPR, riscv_gpr_names_numeric, NGPR);
++ hash_reg_names (RCLASS_GPR, riscv_gpr_names_abi, NGPR);
++ hash_reg_names (RCLASS_FPR, riscv_fpr_names_numeric, NFPR);
++ hash_reg_names (RCLASS_FPR, riscv_fpr_names_abi, NFPR);
++ hash_reg_names (RCLASS_VEC_GPR, riscv_vec_gpr_names, NVGPR);
++ hash_reg_names (RCLASS_VEC_FPR, riscv_vec_fpr_names, NVFPR);
++
++#define DECLARE_CSR(name, num) hash_reg_name (RCLASS_CSR, #name, num);
++#include "opcode/riscv-opc.h"
++#undef DECLARE_CSR
++
++ /* set the default alignment for the text section (2**2) */
++ record_alignment (text_section, 2);
++}
++
++/* Output an instruction. IP is the instruction information.
++ ADDRESS_EXPR is an operand of the instruction to be used with
++ RELOC_TYPE. */
++
++static void
++append_insn (struct riscv_cl_insn *ip, expressionS *address_expr,
++ bfd_reloc_code_real_type reloc_type)
++{
++#ifdef OBJ_ELF
++ dwarf2_emit_insn (0);
++#endif
++
++ gas_assert(reloc_type <= BFD_RELOC_UNUSED);
++
++ if (address_expr != NULL)
++ {
++ if (address_expr->X_op == O_constant)
++ {
++ switch (reloc_type)
++ {
++ case BFD_RELOC_32:
++ ip->insn_opcode |= address_expr->X_add_number;
++ break;
++
++ case BFD_RELOC_RISCV_HI20:
++ ip->insn_opcode |= ENCODE_UTYPE_IMM (
++ RISCV_CONST_HIGH_PART (address_expr->X_add_number));
++ break;
++
++ case BFD_RELOC_RISCV_LO12_S:
++ ip->insn_opcode |= ENCODE_STYPE_IMM (address_expr->X_add_number);
++ break;
++
++ case BFD_RELOC_UNUSED:
++ case BFD_RELOC_RISCV_LO12_I:
++ ip->insn_opcode |= ENCODE_ITYPE_IMM (address_expr->X_add_number);
++ break;
++
++ default:
++ internalError ();
++ }
++ reloc_type = BFD_RELOC_UNUSED;
++ }
++ else if (reloc_type == BFD_RELOC_12_PCREL)
++ {
++ add_relaxed_insn (ip, relaxed_branch_length (NULL, NULL, 0), 4,
++ RELAX_BRANCH_ENCODE (0, 0),
++ address_expr->X_add_symbol,
++ address_expr->X_add_number);
++ return;
++ }
++ else if (reloc_type < BFD_RELOC_UNUSED)
++ {
++ reloc_howto_type *howto;
++
++ howto = bfd_reloc_type_lookup (stdoutput, reloc_type);
++ if (howto == NULL)
++ as_bad (_("Unsupported RISC-V relocation number %d"), reloc_type);
++
++ ip->fixp = fix_new_exp (ip->frag, ip->where,
++ bfd_get_reloc_size (howto),
++ address_expr,
++ reloc_type == BFD_RELOC_12_PCREL ||
++ reloc_type == BFD_RELOC_RISCV_CALL ||
++ reloc_type == BFD_RELOC_RISCV_JMP,
++ reloc_type);
++
++ /* These relocations can have an addend that won't fit in
++ 4 octets for 64bit assembly. */
++ if (rv64
++ && ! howto->partial_inplace
++ && (reloc_type == BFD_RELOC_32
++ || reloc_type == BFD_RELOC_64
++ || reloc_type == BFD_RELOC_CTOR
++ || reloc_type == BFD_RELOC_RISCV_HI20
++ || reloc_type == BFD_RELOC_RISCV_LO12_I
++ || reloc_type == BFD_RELOC_RISCV_LO12_S))
++ ip->fixp->fx_no_overflow = 1;
++ }
++ }
++
++ add_fixed_insn (ip);
++
++ install_insn (ip);
++}
++
++/* Build an instruction created by a macro expansion. This is passed
++ a pointer to the count of instructions created so far, an
++ expression, the name of the instruction to build, an operand format
++ string, and corresponding arguments. */
++
++static void
++macro_build (expressionS *ep, const char *name, const char *fmt, ...)
++{
++ const struct riscv_opcode *mo;
++ struct riscv_cl_insn insn;
++ bfd_reloc_code_real_type r;
++ va_list args;
++
++ va_start (args, fmt);
++
++ r = BFD_RELOC_UNUSED;
++ mo = (struct riscv_opcode *) hash_find (op_hash, name);
++ gas_assert (mo);
++ gas_assert (strcmp (name, mo->name) == 0);
++
++ create_insn (&insn, mo);
++ for (;;)
++ {
++ switch (*fmt++)
++ {
++ case 'd':
++ INSERT_OPERAND (RD, insn, va_arg (args, int));
++ continue;
++
++ case 's':
++ INSERT_OPERAND (RS1, insn, va_arg (args, int));
++ continue;
++
++ case 't':
++ INSERT_OPERAND (RS2, insn, va_arg (args, int));
++ continue;
++
++ case '>':
++ INSERT_OPERAND (SHAMT, insn, va_arg (args, int));
++ continue;
++
++ case 'j':
++ case 'u':
++ case 'q':
++ gas_assert (ep != NULL);
++ r = va_arg (args, int);
++ continue;
++
++ case '\0':
++ break;
++ case ',':
++ continue;
++ default:
++ internalError ();
++ }
++ break;
++ }
++ va_end (args);
++ gas_assert (r == BFD_RELOC_UNUSED ? ep == NULL : ep != NULL);
++
++ append_insn (&insn, ep, r);
++}
++
++/*
++ * Sign-extend 32-bit mode constants that have bit 31 set and all
++ * higher bits unset.
++ */
++static void
++normalize_constant_expr (expressionS *ex)
++{
++ if (rv64)
++ return;
++ if ((ex->X_op == O_constant || ex->X_op == O_symbol)
++ && IS_ZEXT_32BIT_NUM (ex->X_add_number))
++ ex->X_add_number = (((ex->X_add_number & 0xffffffff) ^ 0x80000000)
++ - 0x80000000);
++}
++
++static symbolS *
++make_internal_label (void)
++{
++ return (symbolS *) local_symbol_make (FAKE_LABEL_NAME, now_seg,
++ (valueT) frag_now_fix(), frag_now);
++}
++
++/* Load an entry from the GOT. */
++static void
++pcrel_access (int destreg, int tempreg, expressionS *ep,
++ const char* lo_insn, const char* lo_pattern,
++ bfd_reloc_code_real_type hi_reloc,
++ bfd_reloc_code_real_type lo_reloc)
++{
++ expressionS ep2;
++ ep2.X_op = O_symbol;
++ ep2.X_add_symbol = make_internal_label ();
++ ep2.X_add_number = 0;
++
++ macro_build (ep, "auipc", "d,u", tempreg, hi_reloc);
++ macro_build (&ep2, lo_insn, lo_pattern, destreg, tempreg, lo_reloc);
++}
++
++static void
++pcrel_load (int destreg, int tempreg, expressionS *ep, const char* lo_insn,
++ bfd_reloc_code_real_type hi_reloc,
++ bfd_reloc_code_real_type lo_reloc)
++{
++ pcrel_access (destreg, tempreg, ep, lo_insn, "d,s,j", hi_reloc, lo_reloc);
++}
++
++static void
++pcrel_store (int srcreg, int tempreg, expressionS *ep, const char* lo_insn,
++ bfd_reloc_code_real_type hi_reloc,
++ bfd_reloc_code_real_type lo_reloc)
++{
++ pcrel_access (srcreg, tempreg, ep, lo_insn, "t,s,q", hi_reloc, lo_reloc);
++}
++
++/* PC-relative function call using AUIPC/JALR, relaxed to JAL. */
++static void
++riscv_call (int destreg, int tempreg, expressionS *ep,
++ bfd_reloc_code_real_type reloc)
++{
++ macro_build (ep, "auipc", "d,u", tempreg, reloc);
++ macro_build (NULL, "jalr", "d,s", destreg, tempreg);
++}
++
++/* Warn if an expression is not a constant. */
++
++static void
++check_absolute_expr (struct riscv_cl_insn *ip, expressionS *ex)
++{
++ if (ex->X_op == O_big)
++ as_bad (_("unsupported large constant"));
++ else if (ex->X_op != O_constant)
++ as_bad (_("Instruction %s requires absolute expression"),
++ ip->insn_mo->name);
++ normalize_constant_expr (ex);
++}
++
++/* Load an integer constant into a register. */
++
++static void
++load_const (int reg, expressionS *ep)
++{
++ int shift = RISCV_IMM_BITS;
++ expressionS upper = *ep, lower = *ep;
++ lower.X_add_number = (int32_t) ep->X_add_number << (32-shift) >> (32-shift);
++ upper.X_add_number -= lower.X_add_number;
++
++ gas_assert (ep->X_op == O_constant);
++
++ if (rv64 && !IS_SEXT_32BIT_NUM(ep->X_add_number))
++ {
++ /* Reduce to a signed 32-bit constant using SLLI and ADDI, which
++ is not optimal but also not so bad. */
++ while (((upper.X_add_number >> shift) & 1) == 0)
++ shift++;
++
++ upper.X_add_number = (int64_t) upper.X_add_number >> shift;
++ load_const(reg, &upper);
++
++ macro_build (NULL, "slli", "d,s,>", reg, reg, shift);
++ if (lower.X_add_number != 0)
++ macro_build (&lower, "addi", "d,s,j", reg, reg, BFD_RELOC_RISCV_LO12_I);
++ }
++ else
++ {
++ int hi_reg = 0;
++
++ if (upper.X_add_number != 0)
++ {
++ macro_build (ep, "lui", "d,u", reg, BFD_RELOC_RISCV_HI20);
++ hi_reg = reg;
++ }
++
++ if (lower.X_add_number != 0 || hi_reg == 0)
++ macro_build (ep, ADD32_INSN, "d,s,j", reg, hi_reg,
++ BFD_RELOC_RISCV_LO12_I);
++ }
++}
++
++/* Expand RISC-V assembly macros into one or more instructions. */
++static void
++macro (struct riscv_cl_insn *ip)
++{
++ int rd = (ip->insn_opcode >> OP_SH_RD) & OP_MASK_RD;
++ int rs1 = (ip->insn_opcode >> OP_SH_RS1) & OP_MASK_RS1;
++ int rs2 = (ip->insn_opcode >> OP_SH_RS2) & OP_MASK_RS2;
++ int mask = ip->insn_mo->mask;
++
++ switch (mask)
++ {
++ case M_LI:
++ load_const (rd, &imm_expr);
++ break;
++
++ case M_LA:
++ case M_LLA:
++ /* Load the address of a symbol into a register. */
++ if (!IS_SEXT_32BIT_NUM (offset_expr.X_add_number))
++ as_bad(_("offset too large"));
++
++ if (offset_expr.X_op == O_constant)
++ load_const (rd, &offset_expr);
++ else if (riscv_opts.pic && mask == M_LA) /* Global PIC symbol */
++ pcrel_load (rd, rd, &offset_expr, LOAD_ADDRESS_INSN,
++ BFD_RELOC_RISCV_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ else /* Local PIC symbol, or any non-PIC symbol */
++ pcrel_load (rd, rd, &offset_expr, "addi",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LA_TLS_GD:
++ pcrel_load (rd, rd, &offset_expr, "addi",
++ BFD_RELOC_RISCV_TLS_GD_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LA_TLS_IE:
++ pcrel_load (rd, rd, &offset_expr, LOAD_ADDRESS_INSN,
++ BFD_RELOC_RISCV_TLS_GOT_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LB:
++ pcrel_load (rd, rd, &offset_expr, "lb",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LBU:
++ pcrel_load (rd, rd, &offset_expr, "lbu",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LH:
++ pcrel_load (rd, rd, &offset_expr, "lh",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LHU:
++ pcrel_load (rd, rd, &offset_expr, "lhu",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LW:
++ pcrel_load (rd, rd, &offset_expr, "lw",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LWU:
++ pcrel_load (rd, rd, &offset_expr, "lwu",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_LD:
++ pcrel_load (rd, rd, &offset_expr, "ld",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_FLW:
++ pcrel_load (rd, rs1, &offset_expr, "flw",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_FLD:
++ pcrel_load (rd, rs1, &offset_expr, "fld",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_I);
++ break;
++
++ case M_SB:
++ pcrel_store (rs2, rs1, &offset_expr, "sb",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_SH:
++ pcrel_store (rs2, rs1, &offset_expr, "sh",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_SW:
++ pcrel_store (rs2, rs1, &offset_expr, "sw",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_SD:
++ pcrel_store (rs2, rs1, &offset_expr, "sd",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_FSW:
++ pcrel_store (rs2, rs1, &offset_expr, "fsw",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_FSD:
++ pcrel_store (rs2, rs1, &offset_expr, "fsd",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_VF:
++ pcrel_access (0, rs1, &offset_expr, "vf", "s,s,q",
++ BFD_RELOC_RISCV_PCREL_HI20, BFD_RELOC_RISCV_PCREL_LO12_S);
++ break;
++
++ case M_CALL:
++ riscv_call (rd, rs1, &offset_expr, offset_reloc);
++ break;
++
++ default:
++ as_bad (_("Macro %s not implemented"), ip->insn_mo->name);
++ break;
++ }
++}
++
++static const struct percent_op_match percent_op_utype[] =
++{
++ {"%tprel_hi", BFD_RELOC_RISCV_TPREL_HI20},
++ {"%pcrel_hi", BFD_RELOC_RISCV_PCREL_HI20},
++ {"%tls_ie_pcrel_hi", BFD_RELOC_RISCV_TLS_GOT_HI20},
++ {"%tls_gd_pcrel_hi", BFD_RELOC_RISCV_TLS_GD_HI20},
++ {"%hi", BFD_RELOC_RISCV_HI20},
++ {0, 0}
++};
++
++static const struct percent_op_match percent_op_itype[] =
++{
++ {"%lo", BFD_RELOC_RISCV_LO12_I},
++ {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_I},
++ {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_I},
++ {0, 0}
++};
++
++static const struct percent_op_match percent_op_stype[] =
++{
++ {"%lo", BFD_RELOC_RISCV_LO12_S},
++ {"%tprel_lo", BFD_RELOC_RISCV_TPREL_LO12_S},
++ {"%pcrel_lo", BFD_RELOC_RISCV_PCREL_LO12_S},
++ {0, 0}
++};
++
++static const struct percent_op_match percent_op_rtype[] =
++{
++ {"%tprel_add", BFD_RELOC_RISCV_TPREL_ADD},
++ {0, 0}
++};
++
++/* Return true if *STR points to a relocation operator. When returning true,
++ move *STR over the operator and store its relocation code in *RELOC.
++ Leave both *STR and *RELOC alone when returning false. */
++
++static bfd_boolean
++parse_relocation (char **str, bfd_reloc_code_real_type *reloc,
++ const struct percent_op_match *percent_op)
++{
++ for ( ; percent_op->str; percent_op++)
++ if (strncasecmp (*str, percent_op->str, strlen (percent_op->str)) == 0)
++ {
++ int len = strlen (percent_op->str);
++
++ if (!ISSPACE ((*str)[len]) && (*str)[len] != '(')
++ continue;
++
++ *str += strlen (percent_op->str);
++ *reloc = percent_op->reloc;
++
++ /* Check whether the output BFD supports this relocation.
++ If not, issue an error and fall back on something safe. */
++ if (!bfd_reloc_type_lookup (stdoutput, percent_op->reloc))
++ {
++ as_bad ("relocation %s isn't supported by the current ABI",
++ percent_op->str);
++ *reloc = BFD_RELOC_UNUSED;
++ }
++ return TRUE;
++ }
++ return FALSE;
++}
++
++static void
++my_getExpression (expressionS *ep, char *str)
++{
++ char *save_in;
++
++ save_in = input_line_pointer;
++ input_line_pointer = str;
++ expression (ep);
++ expr_end = input_line_pointer;
++ input_line_pointer = save_in;
++}
++
++/* Parse string STR as a 16-bit relocatable operand. Store the
++ expression in *EP and the relocation, if any, in RELOC.
++ Return the number of relocation operators used (0 or 1).
++
++ On exit, EXPR_END points to the first character after the expression. */
++
++static size_t
++my_getSmallExpression (expressionS *ep, bfd_reloc_code_real_type *reloc,
++ char *str, const struct percent_op_match *percent_op)
++{
++ size_t reloc_index;
++ int crux_depth, str_depth;
++ char *crux;
++
++ /* Search for the start of the main expression.
++ End the loop with CRUX pointing to the start
++ of the main expression and with CRUX_DEPTH containing the number
++ of open brackets at that point. */
++ reloc_index = -1;
++ str_depth = 0;
++ do
++ {
++ reloc_index++;
++ crux = str;
++ crux_depth = str_depth;
++
++ /* Skip over whitespace and brackets, keeping count of the number
++ of brackets. */
++ while (*str == ' ' || *str == '\t' || *str == '(')
++ if (*str++ == '(')
++ str_depth++;
++ }
++ while (*str == '%'
++ && reloc_index < 1
++ && parse_relocation (&str, reloc, percent_op));
++
++ my_getExpression (ep, crux);
++ str = expr_end;
++
++ /* Match every open bracket. */
++ while (crux_depth > 0 && (*str == ')' || *str == ' ' || *str == '\t'))
++ if (*str++ == ')')
++ crux_depth--;
++
++ if (crux_depth > 0)
++ as_bad ("unclosed '('");
++
++ expr_end = str;
++
++ return reloc_index;
++}
++
++/* This routine assembles an instruction into its binary format. As a
++ side effect, it sets one of the global variables imm_reloc or
++ offset_reloc to the type of relocation to do if one of the operands
++ is an address expression. */
++
++static void
++riscv_ip (char *str, struct riscv_cl_insn *ip)
++{
++ char *s;
++ const char *args;
++ char c = 0;
++ struct riscv_opcode *insn;
++ char *argsStart;
++ unsigned int regno;
++ char save_c = 0;
++ int argnum;
++ const struct percent_op_match *p;
++
++ insn_error = NULL;
++
++ /* If the instruction contains a '.', we first try to match an instruction
++ including the '.'. Then we try again without the '.'. */
++ insn = NULL;
++ for (s = str; *s != '\0' && !ISSPACE (*s); ++s)
++ continue;
++
++ /* If we stopped on whitespace, then replace the whitespace with null for
++ the call to hash_find. Save the character we replaced just in case we
++ have to re-parse the instruction. */
++ if (ISSPACE (*s))
++ {
++ save_c = *s;
++ *s++ = '\0';
++ }
++
++ insn = (struct riscv_opcode *) hash_find (op_hash, str);
++
++ /* If we didn't find the instruction in the opcode table, try again, but
++ this time with just the instruction up to, but not including the
++ first '.'. */
++ if (insn == NULL)
++ {
++ /* Restore the character we overwrite above (if any). */
++ if (save_c)
++ *(--s) = save_c;
++
++ /* Scan up to the first '.' or whitespace. */
++ for (s = str;
++ *s != '\0' && *s != '.' && !ISSPACE (*s);
++ ++s)
++ continue;
++
++ /* If we did not find a '.', then we can quit now. */
++ if (*s != '.')
++ {
++ insn_error = "unrecognized opcode";
++ return;
++ }
++
++ /* Lookup the instruction in the hash table. */
++ *s++ = '\0';
++ if ((insn = (struct riscv_opcode *) hash_find (op_hash, str)) == NULL)
++ {
++ insn_error = "unrecognized opcode";
++ return;
++ }
++ }
++
++ argsStart = s;
++ for (;;)
++ {
++ bfd_boolean ok = TRUE;
++ gas_assert (strcmp (insn->name, str) == 0);
++
++ create_insn (ip, insn);
++ insn_error = NULL;
++ argnum = 1;
++ for (args = insn->args;; ++args)
++ {
++ s += strspn (s, " \t");
++ switch (*args)
++ {
++ case '\0': /* end of args */
++ if (*s == '\0')
++ return;
++ break;
++ /* Xcustom */
++ case '^':
++ {
++ unsigned long max = OP_MASK_RD;
++ my_getExpression (&imm_expr, s);
++ check_absolute_expr (ip, &imm_expr);
++ switch (*++args)
++ {
++ case 'j':
++ max = OP_MASK_CUSTOM_IMM;
++ INSERT_OPERAND (CUSTOM_IMM, *ip, imm_expr.X_add_number);
++ break;
++ case 'd':
++ INSERT_OPERAND (RD, *ip, imm_expr.X_add_number);
++ break;
++ case 's':
++ INSERT_OPERAND (RS1, *ip, imm_expr.X_add_number);
++ break;
++ case 't':
++ INSERT_OPERAND (RS2, *ip, imm_expr.X_add_number);
++ break;
++ }
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ if ((unsigned long) imm_expr.X_add_number > max)
++ as_warn ("Bad custom immediate (%lu), must be at most %lu",
++ (unsigned long)imm_expr.X_add_number, max);
++ continue;
++ }
++
++ /* Xhwacha */
++ case '#':
++ switch ( *++args )
++ {
++ case 'g':
++ my_getExpression( &imm_expr, s );
++ /* check_absolute_expr( ip, &imm_expr ); */
++ if ((unsigned long) imm_expr.X_add_number > 32 )
++ as_warn( _( "Improper ngpr amount (%lu)" ),
++ (unsigned long) imm_expr.X_add_number );
++ INSERT_OPERAND( IMMNGPR, *ip, imm_expr.X_add_number );
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ continue;
++ case 'f':
++ my_getExpression( &imm_expr, s );
++ /* check_absolute_expr( ip, &imm_expr ); */
++ if ((unsigned long) imm_expr.X_add_number > 32 )
++ as_warn( _( "Improper nfpr amount (%lu)" ),
++ (unsigned long) imm_expr.X_add_number );
++ INSERT_OPERAND( IMMNFPR, *ip, imm_expr.X_add_number );
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ continue;
++ case 'n':
++ my_getExpression( &imm_expr, s );
++ /* check_absolute_expr( ip, &imm_expr ); */
++ if ((unsigned long) imm_expr.X_add_number > 8 )
++ as_warn( _( "Improper nelm amount (%lu)" ),
++ (unsigned long) imm_expr.X_add_number );
++ INSERT_OPERAND( IMMSEGNELM, *ip, imm_expr.X_add_number - 1 );
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ continue;
++ case 'd':
++ ok = reg_lookup( &s, RCLASS_VEC_GPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VRD, *ip, regno );
++ continue;
++ case 's':
++ ok = reg_lookup( &s, RCLASS_VEC_GPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VRS, *ip, regno );
++ continue;
++ case 't':
++ ok = reg_lookup( &s, RCLASS_VEC_GPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VRT, *ip, regno );
++ continue;
++ case 'r':
++ ok = reg_lookup( &s, RCLASS_VEC_GPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VRR, *ip, regno );
++ continue;
++ case 'D':
++ ok = reg_lookup( &s, RCLASS_VEC_FPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VFD, *ip, regno );
++ continue;
++ case 'S':
++ ok = reg_lookup( &s, RCLASS_VEC_FPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VFS, *ip, regno );
++ continue;
++ case 'T':
++ ok = reg_lookup( &s, RCLASS_VEC_FPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VFT, *ip, regno );
++ continue;
++ case 'R':
++ ok = reg_lookup( &s, RCLASS_VEC_FPR, &regno );
++ if ( !ok )
++ as_bad( _( "Invalid vector register" ) );
++ INSERT_OPERAND( VFR, *ip, regno );
++ continue;
++ }
++ break;
++
++ case ',':
++ ++argnum;
++ if (*s++ == *args)
++ continue;
++ s--;
++ break;
++
++ case '(':
++ case ')':
++ case '[':
++ case ']':
++ if (*s++ == *args)
++ continue;
++ break;
++
++ case '<': /* shift amount, 0 - 31 */
++ my_getExpression (&imm_expr, s);
++ check_absolute_expr (ip, &imm_expr);
++ if ((unsigned long) imm_expr.X_add_number > 31)
++ as_warn (_("Improper shift amount (%lu)"),
++ (unsigned long) imm_expr.X_add_number);
++ INSERT_OPERAND (SHAMTW, *ip, imm_expr.X_add_number);
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ continue;
++
++ case '>': /* shift amount, 0 - (XLEN-1) */
++ my_getExpression (&imm_expr, s);
++ check_absolute_expr (ip, &imm_expr);
++ if ((unsigned long) imm_expr.X_add_number > (rv64 ? 63 : 31))
++ as_warn (_("Improper shift amount (%lu)"),
++ (unsigned long) imm_expr.X_add_number);
++ INSERT_OPERAND (SHAMT, *ip, imm_expr.X_add_number);
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ continue;
++
++ case 'Z': /* CSRRxI immediate */
++ my_getExpression (&imm_expr, s);
++ check_absolute_expr (ip, &imm_expr);
++ if ((unsigned long) imm_expr.X_add_number > 31)
++ as_warn (_("Improper CSRxI immediate (%lu)"),
++ (unsigned long) imm_expr.X_add_number);
++ INSERT_OPERAND (RS1, *ip, imm_expr.X_add_number);
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ continue;
++
++ case 'E': /* Control register. */
++ ok = reg_lookup (&s, RCLASS_CSR, &regno);
++ if (ok)
++ INSERT_OPERAND (CSR, *ip, regno);
++ else
++ {
++ my_getExpression (&imm_expr, s);
++ check_absolute_expr (ip, &imm_expr);
++ if ((unsigned long) imm_expr.X_add_number > 0xfff)
++ as_warn(_("Improper CSR address (%lu)"),
++ (unsigned long) imm_expr.X_add_number);
++ INSERT_OPERAND (CSR, *ip, imm_expr.X_add_number);
++ imm_expr.X_op = O_absent;
++ s = expr_end;
++ }
++ continue;
++
++ case 'm': /* rounding mode */
++ if (arg_lookup (&s, riscv_rm, ARRAY_SIZE(riscv_rm), &regno))
++ {
++ INSERT_OPERAND (RM, *ip, regno);
++ continue;
++ }
++ break;
++
++ case 'P':
++ case 'Q': /* fence predecessor/successor */
++ if (arg_lookup (&s, riscv_pred_succ, ARRAY_SIZE(riscv_pred_succ), &regno))
++ {
++ if (*args == 'P')
++ INSERT_OPERAND(PRED, *ip, regno);
++ else
++ INSERT_OPERAND(SUCC, *ip, regno);
++ continue;
++ }
++ break;
++
++ case 'd': /* destination register */
++ case 's': /* source register */
++ case 't': /* target register */
++ ok = reg_lookup (&s, RCLASS_GPR, &regno);
++ if (ok)
++ {
++ c = *args;
++ if (*s == ' ')
++ ++s;
++
++ /* Now that we have assembled one operand, we use the args string
++ * to figure out where it goes in the instruction. */
++ switch (c)
++ {
++ case 's':
++ INSERT_OPERAND (RS1, *ip, regno);
++ break;
++ case 'd':
++ INSERT_OPERAND (RD, *ip, regno);
++ break;
++ case 't':
++ INSERT_OPERAND (RS2, *ip, regno);
++ break;
++ }
++ continue;
++ }
++ break;
++
++ case 'D': /* floating point rd */
++ case 'S': /* floating point rs1 */
++ case 'T': /* floating point rs2 */
++ case 'U': /* floating point rs1 and rs2 */
++ case 'R': /* floating point rs3 */
++ if (reg_lookup (&s, RCLASS_FPR, &regno))
++ {
++ c = *args;
++ if (*s == ' ')
++ ++s;
++ switch (c)
++ {
++ case 'D':
++ INSERT_OPERAND (RD, *ip, regno);
++ break;
++ case 'S':
++ INSERT_OPERAND (RS1, *ip, regno);
++ break;
++ case 'U':
++ INSERT_OPERAND (RS1, *ip, regno);
++ /* fallthru */
++ case 'T':
++ INSERT_OPERAND (RS2, *ip, regno);
++ break;
++ case 'R':
++ INSERT_OPERAND (RS3, *ip, regno);
++ break;
++ }
++ continue;
++ }
++
++ break;
++
++ case 'I':
++ my_getExpression (&imm_expr, s);
++ if (imm_expr.X_op != O_big
++ && imm_expr.X_op != O_constant)
++ insn_error = _("absolute expression required");
++ normalize_constant_expr (&imm_expr);
++ s = expr_end;
++ continue;
++
++ case 'A':
++ my_getExpression (&offset_expr, s);
++ normalize_constant_expr (&offset_expr);
++ imm_reloc = BFD_RELOC_32;
++ s = expr_end;
++ continue;
++
++ case 'j': /* sign-extended immediate */
++ imm_reloc = BFD_RELOC_RISCV_LO12_I;
++ p = percent_op_itype;
++ goto alu_op;
++ case 'q': /* store displacement */
++ p = percent_op_stype;
++ offset_reloc = BFD_RELOC_RISCV_LO12_S;
++ goto load_store;
++ case 'o': /* load displacement */
++ p = percent_op_itype;
++ offset_reloc = BFD_RELOC_RISCV_LO12_I;
++ goto load_store;
++ case '0': /* AMO "displacement," which must be zero */
++ p = percent_op_rtype;
++ offset_reloc = BFD_RELOC_UNUSED;
++load_store:
++ /* Check whether there is only a single bracketed expression
++ left. If so, it must be the base register and the
++ constant must be zero. */
++ offset_expr.X_op = O_constant;
++ offset_expr.X_add_number = 0;
++ if (*s == '(' && strchr (s + 1, '(') == 0)
++ continue;
++alu_op:
++ /* If this value won't fit into a 16 bit offset, then go
++ find a macro that will generate the 32 bit offset
++ code pattern. */
++ if (!my_getSmallExpression (&offset_expr, &offset_reloc, s, p))
++ {
++ normalize_constant_expr (&offset_expr);
++ if (offset_expr.X_op != O_constant
++ || (*args == '0' && offset_expr.X_add_number != 0)
++ || offset_expr.X_add_number >= (signed)RISCV_IMM_REACH/2
++ || offset_expr.X_add_number < -(signed)RISCV_IMM_REACH/2)
++ break;
++ }
++
++ s = expr_end;
++ continue;
++
++ case 'p': /* pc relative offset */
++ offset_reloc = BFD_RELOC_12_PCREL;
++ my_getExpression (&offset_expr, s);
++ s = expr_end;
++ continue;
++
++ case 'u': /* upper 20 bits */
++ p = percent_op_utype;
++ if (!my_getSmallExpression (&imm_expr, &imm_reloc, s, p)
++ && imm_expr.X_op == O_constant)
++ {
++ if (imm_expr.X_add_number < 0
++ || imm_expr.X_add_number >= (signed)RISCV_BIGIMM_REACH)
++ as_bad (_("lui expression not in range 0..1048575"));
++
++ imm_reloc = BFD_RELOC_RISCV_HI20;
++ imm_expr.X_add_number <<= RISCV_IMM_BITS;
++ }
++ s = expr_end;
++ continue;
++
++ case 'a': /* 26 bit address */
++ my_getExpression (&offset_expr, s);
++ s = expr_end;
++ offset_reloc = BFD_RELOC_RISCV_JMP;
++ continue;
++
++ case 'c':
++ my_getExpression (&offset_expr, s);
++ s = expr_end;
++ offset_reloc = BFD_RELOC_RISCV_CALL;
++ if (*s == '@')
++ offset_reloc = BFD_RELOC_RISCV_CALL_PLT, s++;
++ continue;
++
++ default:
++ as_bad (_("bad char = '%c'\n"), *args);
++ internalError ();
++ }
++ break;
++ }
++ /* Args don't match. */
++ if (insn + 1 < &riscv_opcodes[NUMOPCODES] &&
++ !strcmp (insn->name, insn[1].name))
++ {
++ ++insn;
++ s = argsStart;
++ insn_error = _("illegal operands");
++ continue;
++ }
++ if (save_c)
++ *(--argsStart) = save_c;
++ insn_error = _("illegal operands");
++ return;
++ }
++}
++
++void
++md_assemble (char *str)
++{
++ struct riscv_cl_insn insn;
++
++ imm_expr.X_op = O_absent;
++ offset_expr.X_op = O_absent;
++ imm_reloc = BFD_RELOC_UNUSED;
++ offset_reloc = BFD_RELOC_UNUSED;
++
++ riscv_ip (str, &insn);
++
++ if (insn_error)
++ {
++ as_bad ("%s `%s'", insn_error, str);
++ return;
++ }
++
++ if (insn.insn_mo->pinfo == INSN_MACRO)
++ macro (&insn);
++ else
++ {
++ if (imm_expr.X_op != O_absent)
++ append_insn (&insn, &imm_expr, imm_reloc);
++ else if (offset_expr.X_op != O_absent)
++ append_insn (&insn, &offset_expr, offset_reloc);
++ else
++ append_insn (&insn, NULL, BFD_RELOC_UNUSED);
++ }
++}
++
++char *
++md_atof (int type, char *litP, int *sizeP)
++{
++ return ieee_md_atof (type, litP, sizeP, TARGET_BYTES_BIG_ENDIAN);
++}
++
++void
++md_number_to_chars (char *buf, valueT val, int n)
++{
++ number_to_chars_littleendian (buf, val, n);
++}
++
++const char *md_shortopts = "O::g::G:";
++
++enum options
++ {
++ OPTION_M32 = OPTION_MD_BASE,
++ OPTION_M64,
++ OPTION_MARCH,
++ OPTION_PIC,
++ OPTION_NO_PIC,
++ OPTION_MRVC,
++ OPTION_MNO_RVC,
++ OPTION_END_OF_ENUM
++ };
++
++struct option md_longopts[] =
++{
++ {"m32", no_argument, NULL, OPTION_M32},
++ {"m64", no_argument, NULL, OPTION_M64},
++ {"march", required_argument, NULL, OPTION_MARCH},
++ {"fPIC", no_argument, NULL, OPTION_PIC},
++ {"fpic", no_argument, NULL, OPTION_PIC},
++ {"fno-pic", no_argument, NULL, OPTION_NO_PIC},
++ {"mrvc", no_argument, NULL, OPTION_MRVC},
++ {"mno-rvc", no_argument, NULL, OPTION_MNO_RVC},
++
++ {NULL, no_argument, NULL, 0}
++};
++size_t md_longopts_size = sizeof (md_longopts);
++
++int
++md_parse_option (int c, char *arg)
++{
++ switch (c)
++ {
++ case OPTION_MRVC:
++ riscv_opts.rvc = 1;
++ break;
++
++ case OPTION_MNO_RVC:
++ riscv_opts.rvc = 0;
++ break;
++
++ case OPTION_M32:
++ rv64 = FALSE;
++ break;
++
++ case OPTION_M64:
++ rv64 = TRUE;
++ break;
++
++ case OPTION_MARCH:
++ riscv_set_arch(arg);
++
++ case OPTION_NO_PIC:
++ riscv_opts.pic = FALSE;
++ break;
++
++ case OPTION_PIC:
++ riscv_opts.pic = TRUE;
++ break;
++
++ default:
++ return 0;
++ }
++
++ return 1;
++}
++
++void
++riscv_after_parse_args (void)
++{
++ if (riscv_subsets == NULL)
++ riscv_set_arch("RVIMAFDXcustom");
++}
++
++void
++riscv_init_after_args (void)
++{
++ /* initialize opcodes */
++ bfd_riscv_num_opcodes = bfd_riscv_num_builtin_opcodes;
++ riscv_opcodes = (struct riscv_opcode *) riscv_builtin_opcodes;
++}
++
++long
++md_pcrel_from (fixS *fixP)
++{
++ return fixP->fx_where + fixP->fx_frag->fr_address;
++}
++
++/* Apply a fixup to the object file. */
++
++void
++md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
++{
++ bfd_byte *buf = (bfd_byte *) (fixP->fx_frag->fr_literal + fixP->fx_where);
++
++ /* Remember value for tc_gen_reloc. */
++ fixP->fx_addnumber = *valP;
++
++ switch (fixP->fx_r_type)
++ {
++ case BFD_RELOC_RISCV_TLS_GOT_HI20:
++ case BFD_RELOC_RISCV_TLS_GD_HI20:
++ case BFD_RELOC_RISCV_TLS_DTPREL32:
++ case BFD_RELOC_RISCV_TLS_DTPREL64:
++ case BFD_RELOC_RISCV_TPREL_HI20:
++ case BFD_RELOC_RISCV_TPREL_LO12_I:
++ case BFD_RELOC_RISCV_TPREL_LO12_S:
++ case BFD_RELOC_RISCV_TPREL_ADD:
++ S_SET_THREAD_LOCAL (fixP->fx_addsy);
++ /* fall through */
++
++ case BFD_RELOC_RISCV_GOT_HI20:
++ case BFD_RELOC_RISCV_PCREL_HI20:
++ case BFD_RELOC_RISCV_HI20:
++ case BFD_RELOC_RISCV_LO12_I:
++ case BFD_RELOC_RISCV_LO12_S:
++ case BFD_RELOC_RISCV_ADD8:
++ case BFD_RELOC_RISCV_ADD16:
++ case BFD_RELOC_RISCV_ADD32:
++ case BFD_RELOC_RISCV_ADD64:
++ case BFD_RELOC_RISCV_SUB8:
++ case BFD_RELOC_RISCV_SUB16:
++ case BFD_RELOC_RISCV_SUB32:
++ case BFD_RELOC_RISCV_SUB64:
++ gas_assert (fixP->fx_addsy != NULL);
++ /* Nothing needed to do. The value comes from the reloc entry. */
++ break;
++
++ case BFD_RELOC_64:
++ case BFD_RELOC_32:
++ case BFD_RELOC_16:
++ case BFD_RELOC_8:
++ if (fixP->fx_addsy && fixP->fx_subsy)
++ {
++ fixP->fx_next = xmemdup (fixP, sizeof (*fixP), sizeof (*fixP));
++ fixP->fx_next->fx_addsy = fixP->fx_subsy;
++ fixP->fx_next->fx_subsy = NULL;
++ fixP->fx_next->fx_offset = 0;
++ fixP->fx_subsy = NULL;
++
++ if (fixP->fx_r_type == BFD_RELOC_64)
++ fixP->fx_r_type = BFD_RELOC_RISCV_ADD64,
++ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB64;
++ else if (fixP->fx_r_type == BFD_RELOC_32)
++ fixP->fx_r_type = BFD_RELOC_RISCV_ADD32,
++ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB32;
++ else if (fixP->fx_r_type == BFD_RELOC_16)
++ fixP->fx_r_type = BFD_RELOC_RISCV_ADD16,
++ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB16;
++ else
++ fixP->fx_r_type = BFD_RELOC_RISCV_ADD8,
++ fixP->fx_next->fx_r_type = BFD_RELOC_RISCV_SUB8;
++ }
++ /* fall through */
++
++ case BFD_RELOC_RVA:
++ /* If we are deleting this reloc entry, we must fill in the
++ value now. This can happen if we have a .word which is not
++ resolved when it appears but is later defined. */
++ if (fixP->fx_addsy == NULL)
++ {
++ gas_assert (fixP->fx_size <= sizeof (valueT));
++ md_number_to_chars ((char *) buf, *valP, fixP->fx_size);
++ fixP->fx_done = 1;
++ }
++ break;
++
++ case BFD_RELOC_RISCV_JMP:
++ if (fixP->fx_addsy)
++ {
++ /* Fill in a tentative value to improve objdump readability. */
++ bfd_vma delta = ENCODE_UJTYPE_IMM (S_GET_VALUE (fixP->fx_addsy) + *valP);
++ bfd_putl32 (bfd_getl32 (buf) | delta, buf);
++ }
++ break;
++
++ case BFD_RELOC_12_PCREL:
++ if (fixP->fx_addsy)
++ {
++ /* Fill in a tentative value to improve objdump readability. */
++ bfd_vma delta = ENCODE_SBTYPE_IMM (S_GET_VALUE (fixP->fx_addsy) + *valP);
++ bfd_putl32 (bfd_getl32 (buf) | delta, buf);
++ }
++ break;
++
++ case BFD_RELOC_RISCV_PCREL_LO12_S:
++ case BFD_RELOC_RISCV_PCREL_LO12_I:
++ case BFD_RELOC_RISCV_CALL:
++ case BFD_RELOC_RISCV_CALL_PLT:
++ case BFD_RELOC_RISCV_ALIGN:
++ break;
++
++ default:
++ /* We ignore generic BFD relocations we don't know about. */
++ if (bfd_reloc_type_lookup (stdoutput, fixP->fx_r_type) != NULL)
++ internalError ();
++ }
++}
++
++/* This structure is used to hold a stack of .set values. */
++
++struct riscv_option_stack
++{
++ struct riscv_option_stack *next;
++ struct riscv_set_options options;
++};
++
++static struct riscv_option_stack *riscv_opts_stack;
++
++/* Handle the .set pseudo-op. */
++
++static void
++s_riscv_option (int x ATTRIBUTE_UNUSED)
++{
++ char *name = input_line_pointer, ch;
++
++ while (!is_end_of_line[(unsigned char) *input_line_pointer])
++ ++input_line_pointer;
++ ch = *input_line_pointer;
++ *input_line_pointer = '\0';
++
++ if (strcmp (name, "rvc") == 0)
++ riscv_opts.rvc = 1;
++ else if (strcmp (name, "norvc") == 0)
++ riscv_opts.rvc = 0;
++ else if (strcmp (name, "push") == 0)
++ {
++ struct riscv_option_stack *s;
++
++ s = (struct riscv_option_stack *) xmalloc (sizeof *s);
++ s->next = riscv_opts_stack;
++ s->options = riscv_opts;
++ riscv_opts_stack = s;
++ }
++ else if (strcmp (name, "pop") == 0)
++ {
++ struct riscv_option_stack *s;
++
++ s = riscv_opts_stack;
++ if (s == NULL)
++ as_bad (_(".option pop with no .option push"));
++ else
++ {
++ riscv_opts = s->options;
++ riscv_opts_stack = s->next;
++ free (s);
++ }
++ }
++ else
++ {
++ as_warn (_("Unrecognized .option directive: %s\n"), name);
++ }
++ *input_line_pointer = ch;
++ demand_empty_rest_of_line ();
++}
++
++/* Handle the .dtprelword and .dtpreldword pseudo-ops. They generate
++ a 32-bit or 64-bit DTP-relative relocation (BYTES says which) for
++ use in DWARF debug information. */
++
++static void
++s_dtprel (int bytes)
++{
++ expressionS ex;
++ char *p;
++
++ expression (&ex);
++
++ if (ex.X_op != O_symbol)
++ {
++ as_bad (_("Unsupported use of %s"), (bytes == 8
++ ? ".dtpreldword"
++ : ".dtprelword"));
++ ignore_rest_of_line ();
++ }
++
++ p = frag_more (bytes);
++ md_number_to_chars (p, 0, bytes);
++ fix_new_exp (frag_now, p - frag_now->fr_literal, bytes, &ex, FALSE,
++ (bytes == 8
++ ? BFD_RELOC_RISCV_TLS_DTPREL64
++ : BFD_RELOC_RISCV_TLS_DTPREL32));
++
++ demand_empty_rest_of_line ();
++}
++
++/* Handle the .bss pseudo-op. */
++
++static void
++s_bss (int ignore ATTRIBUTE_UNUSED)
++{
++ subseg_set (bss_section, 0);
++ demand_empty_rest_of_line ();
++}
++
++/* Align to a given power of two. */
++
++static void
++s_align (int x ATTRIBUTE_UNUSED)
++{
++ int alignment, fill_value = 0, fill_value_specified = 0;
++
++ alignment = get_absolute_expression ();
++ if (alignment < 0 || alignment > 31)
++ as_bad (_("unsatisfiable alignment: %d"), alignment);
++
++ if (*input_line_pointer == ',')
++ {
++ ++input_line_pointer;
++ fill_value = get_absolute_expression ();
++ fill_value_specified = 1;
++ }
++
++ if (!fill_value_specified && subseg_text_p (now_seg) && alignment > 2)
++ {
++ /* Emit the worst-case NOP string. The linker will delete any
++ unnecessary NOPs. This allows us to support code alignment
++ in spite of linker relaxations. */
++ bfd_vma i, worst_case_nop_bytes = (1L << alignment) - 4;
++ char *nops = frag_more (worst_case_nop_bytes);
++ for (i = 0; i < worst_case_nop_bytes; i += 4)
++ md_number_to_chars (nops + i, RISCV_NOP, 4);
++
++ expressionS ex;
++ ex.X_op = O_constant;
++ ex.X_add_number = worst_case_nop_bytes;
++
++ fix_new_exp (frag_now, nops - frag_now->fr_literal, 0,
++ &ex, TRUE, BFD_RELOC_RISCV_ALIGN);
++ }
++ else if (alignment)
++ frag_align (alignment, fill_value, 0);
++
++ record_alignment (now_seg, alignment);
++
++ demand_empty_rest_of_line ();
++}
++
++int
++md_estimate_size_before_relax (fragS *fragp, asection *segtype)
++{
++ return (fragp->fr_var = relaxed_branch_length (fragp, segtype, FALSE));
++}
++
++/* Translate internal representation of relocation info to BFD target
++ format. */
++
++arelent *
++tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
++{
++ arelent *reloc = (arelent *) xmalloc (sizeof (arelent));
++
++ reloc->sym_ptr_ptr = (asymbol **) xmalloc (sizeof (asymbol *));
++ *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
++ reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
++
++ if (fixp->fx_pcrel)
++ /* At this point, fx_addnumber is "symbol offset - pcrel address".
++ Relocations want only the symbol offset. */
++ reloc->addend = fixp->fx_addnumber + reloc->address;
++ else
++ reloc->addend = fixp->fx_addnumber;
++
++ reloc->howto = bfd_reloc_type_lookup (stdoutput, fixp->fx_r_type);
++ if (reloc->howto == NULL)
++ {
++ if ((fixp->fx_r_type == BFD_RELOC_16 || fixp->fx_r_type == BFD_RELOC_8)
++ && fixp->fx_addsy != NULL && fixp->fx_subsy != NULL)
++ {
++ /* We don't have R_RISCV_8/16, but for this special case,
++ we can use R_RISCV_ADD8/16 with R_RISCV_SUB8/16. */
++ return reloc;
++ }
++
++ as_bad_where (fixp->fx_file, fixp->fx_line,
++ _("cannot represent %s relocation in object file"),
++ bfd_get_reloc_code_name (fixp->fx_r_type));
++ return NULL;
++ }
++
++ return reloc;
++}
++
++int
++riscv_relax_frag (asection *sec, fragS *fragp, long stretch ATTRIBUTE_UNUSED)
++{
++ if (RELAX_BRANCH_P (fragp->fr_subtype))
++ {
++ offsetT old_var = fragp->fr_var;
++ fragp->fr_var = relaxed_branch_length (fragp, sec, TRUE);
++ return fragp->fr_var - old_var;
++ }
++
++ return 0;
++}
++
++/* Convert a machine dependent frag. */
++
++static void
++md_convert_frag_branch (fragS *fragp)
++{
++ bfd_byte *buf;
++ insn_t insn;
++ expressionS exp;
++ fixS *fixp;
++
++ buf = (bfd_byte *)fragp->fr_literal + fragp->fr_fix;
++
++ exp.X_op = O_symbol;
++ exp.X_add_symbol = fragp->fr_symbol;
++ exp.X_add_number = fragp->fr_offset;
++
++ if (RELAX_BRANCH_TOOFAR (fragp->fr_subtype))
++ {
++ gas_assert (fragp->fr_var == 8);
++ /* We could relax JAL to AUIPC/JALR, but we don't do this yet. */
++ gas_assert (!RELAX_BRANCH_UNCOND (fragp->fr_subtype));
++
++ /* Invert the branch condition. Branch over the jump. */
++ insn = bfd_getl32 (buf);
++ insn ^= MATCH_BEQ ^ MATCH_BNE;
++ insn |= ENCODE_SBTYPE_IMM (8);
++ md_number_to_chars ((char *) buf, insn, 4);
++ buf += 4;
++
++ /* Jump to the target. */
++ fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
++ 4, &exp, FALSE, BFD_RELOC_RISCV_JMP);
++ md_number_to_chars ((char *) buf, MATCH_JAL, 4);
++ buf += 4;
++ }
++ else
++ {
++ fixp = fix_new_exp (fragp, buf - (bfd_byte *)fragp->fr_literal,
++ 4, &exp, FALSE, BFD_RELOC_12_PCREL);
++ buf += 4;
++ }
++
++ fixp->fx_file = fragp->fr_file;
++ fixp->fx_line = fragp->fr_line;
++ fixp->fx_pcrel = 1;
++
++ gas_assert (buf == (bfd_byte *)fragp->fr_literal
++ + fragp->fr_fix + fragp->fr_var);
++
++ fragp->fr_fix += fragp->fr_var;
++}
++
++/* Relax a machine dependent frag. This returns the amount by which
++ the current size of the frag should change. */
++
++void
++md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT asec ATTRIBUTE_UNUSED,
++ fragS *fragp)
++{
++ gas_assert (RELAX_BRANCH_P (fragp->fr_subtype));
++ md_convert_frag_branch (fragp);
++}
++
++void
++md_show_usage (FILE *stream)
++{
++ fprintf (stream, _("\
++RISC-V options:\n\
++ -m32 assemble RV32 code\n\
++ -m64 assemble RV64 code (default)\n\
++ -fpic generate position-independent code\n\
++ -fno-pic don't generate position-independent code (default)\n\
++"));
++}
++
++/* Standard calling conventions leave the CFA at SP on entry. */
++void
++riscv_cfi_frame_initial_instructions (void)
++{
++ cfi_add_CFA_def_cfa_register (X_SP);
++}
++
++int
++tc_riscv_regname_to_dw2regnum (char *regname)
++{
++ int reg;
++
++ if ((reg = reg_lookup_internal (regname, RCLASS_GPR)) >= 0)
++ return reg;
++
++ if ((reg = reg_lookup_internal (regname, RCLASS_FPR)) >= 0)
++ return reg + 32;
++
++ as_bad (_("unknown register `%s'"), regname);
++ return -1;
++}
++
++void
++riscv_elf_final_processing (void)
++{
++ struct riscv_subset* s;
++
++ unsigned int Xlen = 0;
++ for (s = riscv_subsets; s != NULL; s = s->next)
++ if (s->name[0] == 'X')
++ Xlen += strlen(s->name);
++
++ char extension[Xlen];
++ extension[0] = 0;
++ for (s = riscv_subsets; s != NULL; s = s->next)
++ if (s->name[0] == 'X')
++ strcat(extension, s->name);
++
++ EF_SET_RISCV_EXT(elf_elfheader (stdoutput)->e_flags,
++ riscv_elf_name_to_flag (extension));
++}
++
++/* Pseudo-op table. */
++
++static const pseudo_typeS riscv_pseudo_table[] =
++{
++ /* RISC-V-specific pseudo-ops. */
++ {"option", s_riscv_option, 0},
++ {"half", cons, 2},
++ {"word", cons, 4},
++ {"dword", cons, 8},
++ {"dtprelword", s_dtprel, 4},
++ {"dtpreldword", s_dtprel, 8},
++ {"bss", s_bss, 0},
++ {"align", s_align, 0},
++
++ /* leb128 doesn't work with relaxation; disallow it */
++ {"uleb128", s_err, 0},
++ {"sleb128", s_err, 0},
++
++ { NULL, NULL, 0 },
++};
++
++void
++riscv_pop_insert (void)
++{
++ extern void pop_insert (const pseudo_typeS *);
++
++ pop_insert (riscv_pseudo_table);
++}
+diff -urN original-binutils/gas/config/tc-riscv.h binutils/gas/config/tc-riscv.h
+--- original-binutils/gas/config/tc-riscv.h 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/gas/config/tc-riscv.h 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,102 @@
++/* tc-riscv.h -- header file for tc-riscv.c.
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target.
++
++ This file is part of GAS.
++
++ GAS is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ GAS is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with GAS; see the file COPYING. If not, write to the Free
++ Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
++ 02110-1301, USA. */
++
++#ifndef TC_RISCV
++#define TC_RISCV
++
++#include "opcode/riscv.h"
++
++struct frag;
++struct expressionS;
++
++#define TARGET_BYTES_BIG_ENDIAN 0
++
++#define TARGET_ARCH bfd_arch_riscv
++
++#define WORKING_DOT_WORD 1
++#define OLD_FLOAT_READS
++#define REPEAT_CONS_EXPRESSIONS
++#define LOCAL_LABELS_FB 1
++#define FAKE_LABEL_NAME ".L0 "
++
++#define md_relax_frag(segment, fragp, stretch) \
++ riscv_relax_frag(segment, fragp, stretch)
++extern int riscv_relax_frag (asection *, struct frag *, long);
++
++#define md_section_align(seg,size) (size)
++#define md_undefined_symbol(name) (0)
++#define md_operand(x)
++
++#define MAX_MEM_FOR_RS_ALIGN_CODE (1 + 2)
++
++#define TC_SYMFIELD_TYPE int
++
++/* The ISA of the target may change based on command-line arguments. */
++#define TARGET_FORMAT riscv_target_format()
++extern const char *riscv_target_format (void);
++
++#define md_after_parse_args() riscv_after_parse_args()
++extern void riscv_after_parse_args (void);
++
++#define tc_init_after_args() riscv_init_after_args()
++extern void riscv_init_after_args (void);
++
++#define md_parse_long_option(arg) riscv_parse_long_option (arg)
++extern int riscv_parse_long_option (const char *);
++
++/* Let the linker resolve all the relocs due to relaxation. */
++#define tc_fix_adjustable(fixp) 0
++#define md_allow_local_subtract(l,r,s) 0
++
++/* Values passed to md_apply_fix don't include symbol values. */
++#define MD_APPLY_SYM_VALUE(FIX) 0
++
++/* Global syms must not be resolved, to support ELF shared libraries. */
++#define EXTERN_FORCE_RELOC \
++ (OUTPUT_FLAVOR == bfd_target_elf_flavour)
++
++#define TC_FORCE_RELOCATION_SUB_SAME(FIX, SEG) ((SEG)->flags & SEC_CODE)
++#define TC_FORCE_RELOCATION_SUB_LOCAL(FIX, SEG) 1
++#define TC_VALIDATE_FIX_SUB(FIX, SEG) 1
++#define TC_FORCE_RELOCATION_LOCAL(FIX) 1
++#define DIFF_EXPR_OK 1
++
++extern void riscv_pop_insert (void);
++#define md_pop_insert() riscv_pop_insert()
++
++#define TARGET_USE_CFIPOP 1
++
++#define tc_cfi_frame_initial_instructions riscv_cfi_frame_initial_instructions
++extern void riscv_cfi_frame_initial_instructions (void);
++
++#define tc_regname_to_dw2regnum tc_riscv_regname_to_dw2regnum
++extern int tc_riscv_regname_to_dw2regnum (char *regname);
++
++extern bfd_boolean rv64;
++#define DWARF2_DEFAULT_RETURN_COLUMN X_RA
++#define DWARF2_CIE_DATA_ALIGNMENT (rv64 ? 8 : 4)
++
++#define elf_tc_final_processing riscv_elf_final_processing
++extern void riscv_elf_final_processing (void);
++
++#endif /* TC_RISCV */
+diff -urN original-binutils/gas/configure.tgt binutils/gas/configure.tgt
+--- original-binutils/gas/configure.tgt 2014-10-14 09:32:03.000000000 +0200
++++ binutils-2.25/gas/configure.tgt 2015-03-07 09:55:02.379135671 +0100
+@@ -86,6 +86,7 @@
+ pj*) cpu_type=pj endian=big ;;
+ powerpc*le*) cpu_type=ppc endian=little ;;
+ powerpc*) cpu_type=ppc endian=big ;;
++ riscv*) cpu_type=riscv endian=little ;;
+ rs6000*) cpu_type=ppc ;;
+ rl78*) cpu_type=rl78 ;;
+ rx) cpu_type=rx ;;
+@@ -384,6 +385,8 @@
+ ppc-*-kaos*) fmt=elf ;;
+ ppc-*-lynxos*) fmt=elf em=lynx ;;
+
++ riscv*-*-*) fmt=elf endian=little em=linux bfd_gas=yes ;;
++
+ s390-*-linux-*) fmt=elf em=linux ;;
+ s390-*-tpf*) fmt=elf ;;
+
+diff -urN original-binutils/gas/Makefile.am binutils/gas/Makefile.am
+--- original-binutils/gas/Makefile.am 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/gas/Makefile.am 2015-03-07 09:55:02.379135671 +0100
+@@ -171,6 +171,7 @@
+ config/tc-pdp11.c \
+ config/tc-pj.c \
+ config/tc-ppc.c \
++ config/tc-riscv.c \
+ config/tc-rl78.c \
+ config/tc-rx.c \
+ config/tc-s390.c \
+@@ -242,6 +243,7 @@
+ config/tc-pdp11.h \
+ config/tc-pj.h \
+ config/tc-ppc.h \
++ config/tc-riscv.h \
+ config/tc-rl78.h \
+ config/tc-rx.h \
+ config/tc-s390.h \
+diff -urN original-binutils/gas/Makefile.in binutils/gas/Makefile.in
+--- original-binutils/gas/Makefile.in 2014-10-14 09:32:02.000000000 +0200
++++ binutils-2.25/gas/Makefile.in 2015-03-07 09:55:02.379135671 +0100
+@@ -440,6 +440,7 @@
+ config/tc-pdp11.c \
+ config/tc-pj.c \
+ config/tc-ppc.c \
++ config/tc-riscv.c \
+ config/tc-rl78.c \
+ config/tc-rx.c \
+ config/tc-s390.c \
+@@ -511,6 +512,7 @@
+ config/tc-pdp11.h \
+ config/tc-pj.h \
+ config/tc-ppc.h \
++ config/tc-riscv.h \
+ config/tc-rl78.h \
+ config/tc-rx.h \
+ config/tc-s390.h \
+@@ -866,6 +868,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-pdp11.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-pj.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-ppc.Po@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-riscv.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-rl78.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-rx.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tc-s390.Po@am__quote@
+@@ -1571,6 +1574,20 @@
+ @AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+ @am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-ppc.obj `if test -f 'config/tc-ppc.c'; then $(CYGPATH_W) 'config/tc-ppc.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-ppc.c'; fi`
+
++tc-riscv.o: config/tc-riscv.c
++@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-riscv.o -MD -MP -MF $(DEPDIR)/tc-riscv.Tpo -c -o tc-riscv.o `test -f 'config/tc-riscv.c' || echo '$(srcdir)/'`config/tc-riscv.c
++@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-riscv.Tpo $(DEPDIR)/tc-riscv.Po
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='config/tc-riscv.c' object='tc-riscv.o' libtool=no @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-riscv.o `test -f 'config/tc-riscv.c' || echo '$(srcdir)/'`config/tc-riscv.c
++
++tc-riscv.obj: config/tc-riscv.c
++@am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-riscv.obj -MD -MP -MF $(DEPDIR)/tc-riscv.Tpo -c -o tc-riscv.obj `if test -f 'config/tc-riscv.c'; then $(CYGPATH_W) 'config/tc-riscv.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-riscv.c'; fi`
++@am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-riscv.Tpo $(DEPDIR)/tc-riscv.Po
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ source='config/tc-riscv.c' object='tc-riscv.obj' libtool=no @AMDEPBACKSLASH@
++@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
++@am__fastdepCC_FALSE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o tc-riscv.obj `if test -f 'config/tc-riscv.c'; then $(CYGPATH_W) 'config/tc-riscv.c'; else $(CYGPATH_W) '$(srcdir)/config/tc-riscv.c'; fi`
++
+ tc-rl78.o: config/tc-rl78.c
+ @am__fastdepCC_TRUE@ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT tc-rl78.o -MD -MP -MF $(DEPDIR)/tc-rl78.Tpo -c -o tc-rl78.o `test -f 'config/tc-rl78.c' || echo '$(srcdir)/'`config/tc-rl78.c
+ @am__fastdepCC_TRUE@ $(am__mv) $(DEPDIR)/tc-rl78.Tpo $(DEPDIR)/tc-rl78.Po
+diff -urN original-binutils/include/dis-asm.h binutils/include/dis-asm.h
+--- original-binutils/include/dis-asm.h 2014-10-14 09:32:04.000000000 +0200
++++ binutils-2.25/include/dis-asm.h 2015-03-07 09:55:02.379135671 +0100
+@@ -254,6 +254,7 @@
+ extern int print_insn_little_mips (bfd_vma, disassemble_info *);
+ extern int print_insn_little_nios2 (bfd_vma, disassemble_info *);
+ extern int print_insn_little_powerpc (bfd_vma, disassemble_info *);
++extern int print_insn_riscv (bfd_vma, disassemble_info *);
+ extern int print_insn_little_score (bfd_vma, disassemble_info *);
+ extern int print_insn_lm32 (bfd_vma, disassemble_info *);
+ extern int print_insn_m32c (bfd_vma, disassemble_info *);
+@@ -313,6 +314,7 @@
+ extern void print_i386_disassembler_options (FILE *);
+ extern void print_mips_disassembler_options (FILE *);
+ extern void print_ppc_disassembler_options (FILE *);
++extern void print_riscv_disassembler_options (FILE *);
+ extern void print_arm_disassembler_options (FILE *);
+ extern void parse_arm_disassembler_option (char *);
+ extern void print_s390_disassembler_options (FILE *);
+diff -urN original-binutils/include/elf/common.h binutils/include/elf/common.h
+--- original-binutils/include/elf/common.h 2014-10-14 09:32:04.000000000 +0200
++++ binutils-2.25/include/elf/common.h 2015-03-07 09:55:02.383135671 +0100
+@@ -301,6 +301,7 @@
+ #define EM_INTEL207 207 /* Reserved by Intel */
+ #define EM_INTEL208 208 /* Reserved by Intel */
+ #define EM_INTEL209 209 /* Reserved by Intel */
++#define EM_RISCV 243 /* Reserved by Intel */
+
+ /* If it is necessary to assign new unofficial EM_* values, please pick large
+ random numbers (0x8523, 0xa7f2, etc.) to minimize the chances of collision
+diff -urN original-binutils/include/elf/riscv.h binutils/include/elf/riscv.h
+--- original-binutils/include/elf/riscv.h 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/include/elf/riscv.h 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,138 @@
++/* RISC-V ELF support for BFD.
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrw Waterman <waterman@cs.berkeley.edu> at UC Berkeley.
++ Based on MIPS ELF support for BFD, by Ian Lance Taylor.
++
++ This file is part of BFD, the Binary File Descriptor library.
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++/* This file holds definitions specific to the RISCV ELF ABI. Note
++ that most of this is not actually implemented by BFD. */
++
++#ifndef _ELF_RISCV_H
++#define _ELF_RISCV_H
++
++#include "elf/reloc-macros.h"
++
++/* Relocation types. */
++START_RELOC_NUMBERS (elf_riscv_reloc_type)
++ /* Relocation types used by the dynamic linker. */
++ RELOC_NUMBER (R_RISCV_NONE, 0)
++ RELOC_NUMBER (R_RISCV_32, 1)
++ RELOC_NUMBER (R_RISCV_64, 2)
++ RELOC_NUMBER (R_RISCV_RELATIVE, 3)
++ RELOC_NUMBER (R_RISCV_COPY, 4)
++ RELOC_NUMBER (R_RISCV_JUMP_SLOT, 5)
++ RELOC_NUMBER (R_RISCV_TLS_DTPMOD32, 6)
++ RELOC_NUMBER (R_RISCV_TLS_DTPMOD64, 7)
++ RELOC_NUMBER (R_RISCV_TLS_DTPREL32, 8)
++ RELOC_NUMBER (R_RISCV_TLS_DTPREL64, 9)
++ RELOC_NUMBER (R_RISCV_TLS_TPREL32, 10)
++ RELOC_NUMBER (R_RISCV_TLS_TPREL64, 11)
++
++ /* Relocation types not used by the dynamic linker. */
++ RELOC_NUMBER (R_RISCV_BRANCH, 16)
++ RELOC_NUMBER (R_RISCV_JAL, 17)
++ RELOC_NUMBER (R_RISCV_CALL, 18)
++ RELOC_NUMBER (R_RISCV_CALL_PLT, 19)
++ RELOC_NUMBER (R_RISCV_GOT_HI20, 20)
++ RELOC_NUMBER (R_RISCV_TLS_GOT_HI20, 21)
++ RELOC_NUMBER (R_RISCV_TLS_GD_HI20, 22)
++ RELOC_NUMBER (R_RISCV_PCREL_HI20, 23)
++ RELOC_NUMBER (R_RISCV_PCREL_LO12_I, 24)
++ RELOC_NUMBER (R_RISCV_PCREL_LO12_S, 25)
++ RELOC_NUMBER (R_RISCV_HI20, 26)
++ RELOC_NUMBER (R_RISCV_LO12_I, 27)
++ RELOC_NUMBER (R_RISCV_LO12_S, 28)
++ RELOC_NUMBER (R_RISCV_TPREL_HI20, 29)
++ RELOC_NUMBER (R_RISCV_TPREL_LO12_I, 30)
++ RELOC_NUMBER (R_RISCV_TPREL_LO12_S, 31)
++ RELOC_NUMBER (R_RISCV_TPREL_ADD, 32)
++ RELOC_NUMBER (R_RISCV_ADD8, 33)
++ RELOC_NUMBER (R_RISCV_ADD16, 34)
++ RELOC_NUMBER (R_RISCV_ADD32, 35)
++ RELOC_NUMBER (R_RISCV_ADD64, 36)
++ RELOC_NUMBER (R_RISCV_SUB8, 37)
++ RELOC_NUMBER (R_RISCV_SUB16, 38)
++ RELOC_NUMBER (R_RISCV_SUB32, 39)
++ RELOC_NUMBER (R_RISCV_SUB64, 40)
++ RELOC_NUMBER (R_RISCV_GNU_VTINHERIT, 41)
++ RELOC_NUMBER (R_RISCV_GNU_VTENTRY, 42)
++ RELOC_NUMBER (R_RISCV_ALIGN, 43)
++END_RELOC_NUMBERS (R_RISCV_max)
++
++/* Processor specific flags for the ELF header e_flags field. */
++
++/* Custom flag definitions. */
++
++#define EF_RISCV_EXT_MASK 0xffff
++#define EF_RISCV_EXT_SH 16
++#define E_RISCV_EXT_Xcustom 0x0000
++#define E_RISCV_EXT_Xhwacha 0x0001
++#define E_RISCV_EXT_RESERVED 0xffff
++
++#define EF_GET_RISCV_EXT(x) \
++ ((x >> EF_RISCV_EXT_SH) & EF_RISCV_EXT_MASK)
++
++#define EF_SET_RISCV_EXT(x, ext) \
++ do { x |= ((ext & EF_RISCV_EXT_MASK) << EF_RISCV_EXT_SH); } while (0)
++
++#define EF_IS_RISCV_EXT_Xcustom(x) \
++ (EF_GET_RISCV_EXT(x) == E_RISCV_EXT_Xcustom)
++
++/* A mapping from extension names to elf flags */
++
++struct riscv_extension_entry
++{
++ const char* name;
++ unsigned int flag;
++};
++
++static const struct riscv_extension_entry riscv_extension_map[] =
++{
++ {"Xcustom", E_RISCV_EXT_Xcustom},
++ {"Xhwacha", E_RISCV_EXT_Xhwacha},
++};
++
++/* Given an extension name, return an elf flag. */
++
++static inline const char* riscv_elf_flag_to_name(unsigned int flag)
++{
++ unsigned int i;
++
++ for (i=0; i<sizeof(riscv_extension_map)/sizeof(riscv_extension_map[0]); i++)
++ if (riscv_extension_map[i].flag == flag)
++ return riscv_extension_map[i].name;
++
++ return NULL;
++}
++
++/* Given an elf flag, return an extension name. */
++
++static inline unsigned int riscv_elf_name_to_flag(const char* name)
++{
++ unsigned int i;
++
++ for (i=0; i<sizeof(riscv_extension_map)/sizeof(riscv_extension_map[0]); i++)
++ if (strcmp(riscv_extension_map[i].name, name) == 0)
++ return riscv_extension_map[i].flag;
++
++ return E_RISCV_EXT_Xcustom;
++}
++
++#endif /* _ELF_RISCV_H */
+diff -urN original-binutils/include/opcode/riscv.h binutils/include/opcode/riscv.h
+--- original-binutils/include/opcode/riscv.h 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/include/opcode/riscv.h 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,320 @@
++/* riscv.h. RISC-V opcode list for GDB, the GNU debugger.
++ Copyright 2011
++ Free Software Foundation, Inc.
++ Contributed by Andrew Waterman
++
++This file is part of GDB, GAS, and the GNU binutils.
++
++GDB, GAS, and the GNU binutils are free software; you can redistribute
++them and/or modify them under the terms of the GNU General Public
++License as published by the Free Software Foundation; either version
++1, or (at your option) any later version.
++
++GDB, GAS, and the GNU binutils are distributed in the hope that they
++will be useful, but WITHOUT ANY WARRANTY; without even the implied
++warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
++the GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this file; see the file COPYING. If not, write to the Free
++Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++#ifndef _RISCV_H_
++#define _RISCV_H_
++
++#include "riscv-opc.h"
++#include <stdlib.h>
++#include <stdint.h>
++
++/* RVC fields */
++
++#define OP_MASK_CRD 0x1f
++#define OP_SH_CRD 5
++#define OP_MASK_CRS2 0x1f
++#define OP_SH_CRS2 5
++#define OP_MASK_CRS1 0x1f
++#define OP_SH_CRS1 10
++#define OP_MASK_CRDS 0x7
++#define OP_SH_CRDS 13
++#define OP_MASK_CRS2S 0x7
++#define OP_SH_CRS2S 13
++#define OP_MASK_CRS2BS 0x7
++#define OP_SH_CRS2BS 5
++#define OP_MASK_CRS1S 0x7
++#define OP_SH_CRS1S 10
++#define OP_MASK_CIMM6 0x3f
++#define OP_SH_CIMM6 10
++#define OP_MASK_CIMM5 0x1f
++#define OP_SH_CIMM5 5
++#define OP_MASK_CIMM10 0x3ff
++#define OP_SH_CIMM10 5
++
++static const char rvc_rs1_regmap[8] = { 20, 21, 2, 3, 4, 5, 6, 7 };
++#define rvc_rd_regmap rvc_rs1_regmap
++#define rvc_rs2b_regmap rvc_rs1_regmap
++static const char rvc_rs2_regmap[8] = { 20, 21, 2, 3, 4, 5, 6, 0 };
++
++typedef uint64_t insn_t;
++
++static inline unsigned int riscv_insn_length (insn_t insn)
++{
++ if ((insn & 0x3) != 3) /* RVC */
++ return 2;
++ if ((insn & 0x1f) != 0x1f) /* base ISA and extensions in 32-bit space */
++ return 4;
++ if ((insn & 0x3f) == 0x1f) /* 48-bit extensions */
++ return 6;
++ if ((insn & 0x7f) == 0x3f) /* 64-bit extensions */
++ return 8;
++ /* longer instructions not supported at the moment */
++ return 2;
++}
++
++static const char * const riscv_rm[8] = {
++ "rne", "rtz", "rdn", "rup", "rmm", 0, 0, "dyn"
++};
++static const char* const riscv_pred_succ[16] = {
++ 0, "w", "r", "rw", "o", "ow", "or", "orw",
++ "i", "iw", "ir", "irw", "io", "iow", "ior", "iorw",
++};
++
++#define RVC_JUMP_BITS 10
++#define RVC_JUMP_ALIGN_BITS 1
++#define RVC_JUMP_ALIGN (1 << RVC_JUMP_ALIGN_BITS)
++#define RVC_JUMP_REACH ((1ULL<<RVC_JUMP_BITS)*RVC_JUMP_ALIGN)
++
++#define RVC_BRANCH_BITS 5
++#define RVC_BRANCH_ALIGN_BITS RVC_JUMP_ALIGN_BITS
++#define RVC_BRANCH_ALIGN (1 << RVC_BRANCH_ALIGN_BITS)
++#define RVC_BRANCH_REACH ((1ULL<<RVC_BRANCH_BITS)*RVC_BRANCH_ALIGN)
++
++#define RV_X(x, s, n) (((x) >> (s)) & ((1<<(n))-1))
++#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
++
++#define EXTRACT_ITYPE_IMM(x) \
++ (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_STYPE_IMM(x) \
++ (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_SBTYPE_IMM(x) \
++ ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_UTYPE_IMM(x) \
++ ((RV_X(x, 12, 20) << 12) | (RV_IMM_SIGN(x) << 32))
++#define EXTRACT_UJTYPE_IMM(x) \
++ ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
++
++#define ENCODE_ITYPE_IMM(x) \
++ (RV_X(x, 0, 12) << 20)
++#define ENCODE_STYPE_IMM(x) \
++ ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
++#define ENCODE_SBTYPE_IMM(x) \
++ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
++#define ENCODE_UTYPE_IMM(x) \
++ (RV_X(x, 12, 20) << 12)
++#define ENCODE_UJTYPE_IMM(x) \
++ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
++
++#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
++#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
++#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
++#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
++#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
++
++#define RISCV_RTYPE(insn, rd, rs1, rs2) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
++#define RISCV_ITYPE(insn, rd, rs1, imm) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
++#define RISCV_STYPE(insn, rs1, rs2, imm) \
++ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
++#define RISCV_SBTYPE(insn, rs1, rs2, target) \
++ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
++#define RISCV_UTYPE(insn, rd, bigimm) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
++#define RISCV_UJTYPE(insn, rd, target) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
++
++#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
++
++#define RISCV_CONST_HIGH_PART(VALUE) \
++ (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
++#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
++#define RISCV_PCREL_HIGH_PART(VALUE, PC) RISCV_CONST_HIGH_PART((VALUE) - (PC))
++#define RISCV_PCREL_LOW_PART(VALUE, PC) RISCV_CONST_LOW_PART((VALUE) - (PC))
++
++/* RV fields */
++
++#define OP_MASK_OP 0x7f
++#define OP_SH_OP 0
++#define OP_MASK_RS2 0x1f
++#define OP_SH_RS2 20
++#define OP_MASK_RS1 0x1f
++#define OP_SH_RS1 15
++#define OP_MASK_RS3 0x1f
++#define OP_SH_RS3 27
++#define OP_MASK_RD 0x1f
++#define OP_SH_RD 7
++#define OP_MASK_SHAMT 0x3f
++#define OP_SH_SHAMT 20
++#define OP_MASK_SHAMTW 0x1f
++#define OP_SH_SHAMTW 20
++#define OP_MASK_RM 0x7
++#define OP_SH_RM 12
++#define OP_MASK_PRED 0xf
++#define OP_SH_PRED 24
++#define OP_MASK_SUCC 0xf
++#define OP_SH_SUCC 20
++#define OP_MASK_AQ 0x1
++#define OP_SH_AQ 26
++#define OP_MASK_RL 0x1
++#define OP_SH_RL 25
++
++#define OP_MASK_VRD 0x1f
++#define OP_SH_VRD 7
++#define OP_MASK_VRS 0x1f
++#define OP_SH_VRS 15
++#define OP_MASK_VRT 0x1f
++#define OP_SH_VRT 20
++#define OP_MASK_VRR 0x1f
++#define OP_SH_VRR 27
++
++#define OP_MASK_VFD 0x1f
++#define OP_SH_VFD 7
++#define OP_MASK_VFS 0x1f
++#define OP_SH_VFS 15
++#define OP_MASK_VFT 0x1f
++#define OP_SH_VFT 20
++#define OP_MASK_VFR 0x1f
++#define OP_SH_VFR 27
++
++#define OP_MASK_IMMNGPR 0x3f
++#define OP_SH_IMMNGPR 20
++#define OP_MASK_IMMNFPR 0x3f
++#define OP_SH_IMMNFPR 26
++#define OP_MASK_IMMSEGNELM 0x7
++#define OP_SH_IMMSEGNELM 29
++#define OP_MASK_CUSTOM_IMM 0x7f
++#define OP_SH_CUSTOM_IMM 25
++#define OP_MASK_CSR 0xfff
++#define OP_SH_CSR 20
++
++#define X_RA 1
++#define X_SP 2
++#define X_GP 3
++#define X_TP 4
++#define X_T0 5
++#define X_T1 6
++#define X_T2 7
++#define X_T3 28
++
++#define NGPR 32
++#define NFPR 32
++#define NVGPR 32
++#define NVFPR 32
++
++#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
++#define RISCV_JUMP_ALIGN_BITS 1
++#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
++#define RISCV_JUMP_REACH ((1ULL<<RISCV_JUMP_BITS)*RISCV_JUMP_ALIGN)
++
++#define RISCV_IMM_BITS 12
++#define RISCV_BIGIMM_BITS (32-RISCV_IMM_BITS)
++#define RISCV_IMM_REACH (1LL<<RISCV_IMM_BITS)
++#define RISCV_BIGIMM_REACH (1LL<<RISCV_BIGIMM_BITS)
++#define RISCV_BRANCH_BITS RISCV_IMM_BITS
++#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS
++#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS)
++#define RISCV_BRANCH_REACH (RISCV_IMM_REACH*RISCV_BRANCH_ALIGN)
++
++/* This structure holds information for a particular instruction. */
++
++struct riscv_opcode
++{
++ /* The name of the instruction. */
++ const char *name;
++ /* The ISA subset name (I, M, A, F, D, Xextension). */
++ const char *subset;
++ /* A string describing the arguments for this instruction. */
++ const char *args;
++ /* The basic opcode for the instruction. When assembling, this
++ opcode is modified by the arguments to produce the actual opcode
++ that is used. If pinfo is INSN_MACRO, then this is 0. */
++ insn_t match;
++ /* If pinfo is not INSN_MACRO, then this is a bit mask for the
++ relevant portions of the opcode when disassembling. If the
++ actual opcode anded with the match field equals the opcode field,
++ then we have found the correct instruction. If pinfo is
++ INSN_MACRO, then this field is the macro identifier. */
++ insn_t mask;
++ /* A function to determine if a word corresponds to this instruction.
++ Usually, this computes ((word & mask) == match). */
++ int (*match_func)(const struct riscv_opcode *op, insn_t word);
++ /* For a macro, this is INSN_MACRO. Otherwise, it is a collection
++ of bits describing the instruction, notably any relevant hazard
++ information. */
++ unsigned long pinfo;
++};
++
++#define INSN_WRITE_GPR_D 0x00000001
++#define INSN_WRITE_GPR_RA 0x00000004
++#define INSN_WRITE_FPR_D 0x00000008
++#define INSN_READ_GPR_S 0x00000040
++#define INSN_READ_GPR_T 0x00000080
++#define INSN_READ_FPR_S 0x00000100
++#define INSN_READ_FPR_T 0x00000200
++#define INSN_READ_FPR_R 0x00000400
++/* Instruction is a simple alias (I.E. "move" for daddu/addu/or) */
++#define INSN_ALIAS 0x00001000
++/* Instruction is actually a macro. It should be ignored by the
++ disassembler, and requires special treatment by the assembler. */
++#define INSN_MACRO 0xffffffff
++
++/* This is a list of macro expanded instructions.
++
++ _I appended means immediate
++ _A appended means address
++ _AB appended means address with base register
++ _D appended means 64 bit floating point constant
++ _S appended means 32 bit floating point constant. */
++
++enum
++{
++ M_LA,
++ M_LLA,
++ M_LA_TLS_GD,
++ M_LA_TLS_IE,
++ M_LB,
++ M_LBU,
++ M_LH,
++ M_LHU,
++ M_LW,
++ M_LWU,
++ M_LD,
++ M_SB,
++ M_SH,
++ M_SW,
++ M_SD,
++ M_FLW,
++ M_FLD,
++ M_FSW,
++ M_FSD,
++ M_CALL,
++ M_J,
++ M_LI,
++ M_VF,
++ M_NUM_MACROS
++};
++
++
++extern const char * const riscv_gpr_names_numeric[NGPR];
++extern const char * const riscv_gpr_names_abi[NGPR];
++extern const char * const riscv_fpr_names_numeric[NFPR];
++extern const char * const riscv_fpr_names_abi[NFPR];
++extern const char * const riscv_vec_gpr_names[NVGPR];
++extern const char * const riscv_vec_fpr_names[NVFPR];
++
++extern const struct riscv_opcode riscv_builtin_opcodes[];
++extern const int bfd_riscv_num_builtin_opcodes;
++extern struct riscv_opcode *riscv_opcodes;
++extern int bfd_riscv_num_opcodes;
++#define NUMOPCODES bfd_riscv_num_opcodes
++
++#endif /* _RISCV_H_ */
+diff -urN original-binutils/include/opcode/riscv-opc.h binutils/include/opcode/riscv-opc.h
+--- original-binutils/include/opcode/riscv-opc.h 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/include/opcode/riscv-opc.h 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,1216 @@
++/* Automatically generated by parse-opcodes */
++#ifndef RISCV_ENCODING_H
++#define RISCV_ENCODING_H
++#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
++#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
++#define MATCH_VLSEGSTWU 0xc00305b
++#define MASK_VLSEGSTWU 0x1e00707f
++#define MATCH_C_LW0 0x12
++#define MASK_C_LW0 0x801f
++#define MATCH_FMV_D_X 0xf2000053
++#define MASK_FMV_D_X 0xfff0707f
++#define MATCH_VLH 0x200205b
++#define MASK_VLH 0xfff0707f
++#define MATCH_C_LI 0x0
++#define MASK_C_LI 0x1f
++#define MATCH_FADD_D 0x2000053
++#define MASK_FADD_D 0xfe00007f
++#define MATCH_C_LD 0x9
++#define MASK_C_LD 0x1f
++#define MATCH_VLD 0x600205b
++#define MASK_VLD 0xfff0707f
++#define MATCH_FADD_S 0x53
++#define MASK_FADD_S 0xfe00007f
++#define MATCH_C_LW 0xa
++#define MASK_C_LW 0x1f
++#define MATCH_VLW 0x400205b
++#define MASK_VLW 0xfff0707f
++#define MATCH_VSSEGSTW 0x400307b
++#define MASK_VSSEGSTW 0x1e00707f
++#define MATCH_UTIDX 0x6077
++#define MASK_UTIDX 0xfffff07f
++#define MATCH_C_FLW 0x14
++#define MASK_C_FLW 0x1f
++#define MATCH_FSUB_D 0xa000053
++#define MASK_FSUB_D 0xfe00007f
++#define MATCH_VSSEGSTD 0x600307b
++#define MASK_VSSEGSTD 0x1e00707f
++#define MATCH_VSSEGSTB 0x307b
++#define MASK_VSSEGSTB 0x1e00707f
++#define MATCH_DIV 0x2004033
++#define MASK_DIV 0xfe00707f
++#define MATCH_FMV_H_X 0xf4000053
++#define MASK_FMV_H_X 0xfff0707f
++#define MATCH_C_FLD 0x15
++#define MASK_C_FLD 0x1f
++#define MATCH_FRRM 0x202073
++#define MASK_FRRM 0xfffff07f
++#define MATCH_VFMSV_S 0x1000202b
++#define MASK_VFMSV_S 0xfff0707f
++#define MATCH_C_LWSP 0x5
++#define MASK_C_LWSP 0x1f
++#define MATCH_FENCE 0xf
++#define MASK_FENCE 0x707f
++#define MATCH_FNMSUB_S 0x4b
++#define MASK_FNMSUB_S 0x600007f
++#define MATCH_FLE_S 0xa0000053
++#define MASK_FLE_S 0xfe00707f
++#define MATCH_FNMSUB_H 0x400004b
++#define MASK_FNMSUB_H 0x600007f
++#define MATCH_FLE_H 0xbc000053
++#define MASK_FLE_H 0xfe00707f
++#define MATCH_FLW 0x2007
++#define MASK_FLW 0x707f
++#define MATCH_VSETVL 0x600b
++#define MASK_VSETVL 0xfff0707f
++#define MATCH_VFMSV_D 0x1200202b
++#define MASK_VFMSV_D 0xfff0707f
++#define MATCH_FLE_D 0xa2000053
++#define MASK_FLE_D 0xfe00707f
++#define MATCH_FENCE_I 0x100f
++#define MASK_FENCE_I 0x707f
++#define MATCH_FNMSUB_D 0x200004b
++#define MASK_FNMSUB_D 0x600007f
++#define MATCH_ADDW 0x3b
++#define MASK_ADDW 0xfe00707f
++#define MATCH_XOR 0x4033
++#define MASK_XOR 0xfe00707f
++#define MATCH_SUB 0x40000033
++#define MASK_SUB 0xfe00707f
++#define MATCH_VSSTW 0x400307b
++#define MASK_VSSTW 0xfe00707f
++#define MATCH_VSSTH 0x200307b
++#define MASK_VSSTH 0xfe00707f
++#define MATCH_SC_W 0x1800202f
++#define MASK_SC_W 0xf800707f
++#define MATCH_VSSTB 0x307b
++#define MASK_VSSTB 0xfe00707f
++#define MATCH_VSSTD 0x600307b
++#define MASK_VSSTD 0xfe00707f
++#define MATCH_ADDI 0x13
++#define MASK_ADDI 0x707f
++#define MATCH_RDTIMEH 0xc8102073
++#define MASK_RDTIMEH 0xfffff07f
++#define MATCH_MULH 0x2001033
++#define MASK_MULH 0xfe00707f
++#define MATCH_CSRRSI 0x6073
++#define MASK_CSRRSI 0x707f
++#define MATCH_FCVT_D_WU 0xd2100053
++#define MASK_FCVT_D_WU 0xfff0007f
++#define MATCH_MULW 0x200003b
++#define MASK_MULW 0xfe00707f
++#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
++#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
++#define MATCH_VENQIMM1 0xc00302b
++#define MASK_VENQIMM1 0xfe007fff
++#define MATCH_VENQIMM2 0xe00302b
++#define MASK_VENQIMM2 0xfe007fff
++#define MATCH_RDINSTRET 0xc0202073
++#define MASK_RDINSTRET 0xfffff07f
++#define MATCH_C_SWSP 0x8
++#define MASK_C_SWSP 0x1f
++#define MATCH_VLSTW 0x400305b
++#define MASK_VLSTW 0xfe00707f
++#define MATCH_VLSTH 0x200305b
++#define MASK_VLSTH 0xfe00707f
++#define MATCH_VLSTB 0x305b
++#define MASK_VLSTB 0xfe00707f
++#define MATCH_VLSTD 0x600305b
++#define MASK_VLSTD 0xfe00707f
++#define MATCH_ANDI 0x7013
++#define MASK_ANDI 0x707f
++#define MATCH_FMV_X_S 0xe0000053
++#define MASK_FMV_X_S 0xfff0707f
++#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
++#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
++#define MATCH_FNMADD_S 0x4f
++#define MASK_FNMADD_S 0x600007f
++#define MATCH_LWU 0x6003
++#define MASK_LWU 0x707f
++#define MATCH_CUSTOM0_RS1 0x200b
++#define MASK_CUSTOM0_RS1 0x707f
++#define MATCH_VLSEGSTBU 0x800305b
++#define MASK_VLSEGSTBU 0x1e00707f
++#define MATCH_FNMADD_D 0x200004f
++#define MASK_FNMADD_D 0x600007f
++#define MATCH_FCVT_W_S 0xc0000053
++#define MASK_FCVT_W_S 0xfff0007f
++#define MATCH_C_SRAI 0x1019
++#define MASK_C_SRAI 0x1c1f
++#define MATCH_MULHSU 0x2002033
++#define MASK_MULHSU 0xfe00707f
++#define MATCH_FCVT_D_LU 0xd2300053
++#define MASK_FCVT_D_LU 0xfff0007f
++#define MATCH_FCVT_W_D 0xc2000053
++#define MASK_FCVT_W_D 0xfff0007f
++#define MATCH_FSUB_H 0xc000053
++#define MASK_FSUB_H 0xfe00007f
++#define MATCH_DIVUW 0x200503b
++#define MASK_DIVUW 0xfe00707f
++#define MATCH_SLTI 0x2013
++#define MASK_SLTI 0x707f
++#define MATCH_VLSTBU 0x800305b
++#define MASK_VLSTBU 0xfe00707f
++#define MATCH_SLTU 0x3033
++#define MASK_SLTU 0xfe00707f
++#define MATCH_FLH 0x1007
++#define MASK_FLH 0x707f
++#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
++#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
++#define MATCH_FLD 0x3007
++#define MASK_FLD 0x707f
++#define MATCH_FSUB_S 0x8000053
++#define MASK_FSUB_S 0xfe00007f
++#define MATCH_FCVT_H_LU 0x6c000053
++#define MASK_FCVT_H_LU 0xfff0007f
++#define MATCH_CUSTOM0 0xb
++#define MASK_CUSTOM0 0x707f
++#define MATCH_CUSTOM1 0x2b
++#define MASK_CUSTOM1 0x707f
++#define MATCH_CUSTOM2 0x5b
++#define MASK_CUSTOM2 0x707f
++#define MATCH_CUSTOM3 0x7b
++#define MASK_CUSTOM3 0x707f
++#define MATCH_VXCPTSAVE 0x302b
++#define MASK_VXCPTSAVE 0xfff07fff
++#define MATCH_VMSV 0x200202b
++#define MASK_VMSV 0xfff0707f
++#define MATCH_FCVT_LU_S 0xc0300053
++#define MASK_FCVT_LU_S 0xfff0007f
++#define MATCH_AUIPC 0x17
++#define MASK_AUIPC 0x7f
++#define MATCH_FRFLAGS 0x102073
++#define MASK_FRFLAGS 0xfffff07f
++#define MATCH_FCVT_LU_D 0xc2300053
++#define MASK_FCVT_LU_D 0xfff0007f
++#define MATCH_CSRRWI 0x5073
++#define MASK_CSRRWI 0x707f
++#define MATCH_FADD_H 0x4000053
++#define MASK_FADD_H 0xfe00007f
++#define MATCH_FSQRT_S 0x58000053
++#define MASK_FSQRT_S 0xfff0007f
++#define MATCH_VXCPTKILL 0x400302b
++#define MASK_VXCPTKILL 0xffffffff
++#define MATCH_STOP 0x5077
++#define MASK_STOP 0xffffffff
++#define MATCH_FSGNJN_S 0x20001053
++#define MASK_FSGNJN_S 0xfe00707f
++#define MATCH_FSGNJN_H 0x34000053
++#define MASK_FSGNJN_H 0xfe00707f
++#define MATCH_FSQRT_D 0x5a000053
++#define MASK_FSQRT_D 0xfff0007f
++#define MATCH_XORI 0x4013
++#define MASK_XORI 0x707f
++#define MATCH_DIVU 0x2005033
++#define MASK_DIVU 0xfe00707f
++#define MATCH_FSGNJN_D 0x22001053
++#define MASK_FSGNJN_D 0xfe00707f
++#define MATCH_FSQRT_H 0x24000053
++#define MASK_FSQRT_H 0xfff0007f
++#define MATCH_VSSEGSTH 0x200307b
++#define MASK_VSSEGSTH 0x1e00707f
++#define MATCH_SW 0x2023
++#define MASK_SW 0x707f
++#define MATCH_VLSTWU 0xc00305b
++#define MASK_VLSTWU 0xfe00707f
++#define MATCH_VFSSEGW 0x1400207b
++#define MASK_VFSSEGW 0x1ff0707f
++#define MATCH_LHU 0x5003
++#define MASK_LHU 0x707f
++#define MATCH_SH 0x1023
++#define MASK_SH 0x707f
++#define MATCH_FMSUB_H 0x4000047
++#define MASK_FMSUB_H 0x600007f
++#define MATCH_VXCPTAUX 0x200402b
++#define MASK_VXCPTAUX 0xfffff07f
++#define MATCH_FMSUB_D 0x2000047
++#define MASK_FMSUB_D 0x600007f
++#define MATCH_VFSSEGD 0x1600207b
++#define MASK_VFSSEGD 0x1ff0707f
++#define MATCH_VLSEGHU 0xa00205b
++#define MASK_VLSEGHU 0x1ff0707f
++#define MATCH_MOVN 0x2007077
++#define MASK_MOVN 0xfe00707f
++#define MATCH_CUSTOM1_RS1 0x202b
++#define MASK_CUSTOM1_RS1 0x707f
++#define MATCH_VLSTHU 0xa00305b
++#define MASK_VLSTHU 0xfe00707f
++#define MATCH_MOVZ 0x7077
++#define MASK_MOVZ 0xfe00707f
++#define MATCH_CSRRW 0x1073
++#define MASK_CSRRW 0x707f
++#define MATCH_LD 0x3003
++#define MASK_LD 0x707f
++#define MATCH_LB 0x3
++#define MASK_LB 0x707f
++#define MATCH_VLWU 0xc00205b
++#define MASK_VLWU 0xfff0707f
++#define MATCH_LH 0x1003
++#define MASK_LH 0x707f
++#define MATCH_LW 0x2003
++#define MASK_LW 0x707f
++#define MATCH_CSRRC 0x3073
++#define MASK_CSRRC 0x707f
++#define MATCH_FCVT_LU_H 0x4c000053
++#define MASK_FCVT_LU_H 0xfff0007f
++#define MATCH_FCVT_S_D 0x40100053
++#define MASK_FCVT_S_D 0xfff0007f
++#define MATCH_BGEU 0x7063
++#define MASK_BGEU 0x707f
++#define MATCH_VFLSTD 0x1600305b
++#define MASK_VFLSTD 0xfe00707f
++#define MATCH_FCVT_S_L 0xd0200053
++#define MASK_FCVT_S_L 0xfff0007f
++#define MATCH_FCVT_S_H 0x84000053
++#define MASK_FCVT_S_H 0xfff0007f
++#define MATCH_FSCSR 0x301073
++#define MASK_FSCSR 0xfff0707f
++#define MATCH_FCVT_S_W 0xd0000053
++#define MASK_FCVT_S_W 0xfff0007f
++#define MATCH_VFLSTW 0x1400305b
++#define MASK_VFLSTW 0xfe00707f
++#define MATCH_VXCPTEVAC 0x600302b
++#define MASK_VXCPTEVAC 0xfff07fff
++#define MATCH_AMOMINU_D 0xc000302f
++#define MASK_AMOMINU_D 0xf800707f
++#define MATCH_FSFLAGS 0x101073
++#define MASK_FSFLAGS 0xfff0707f
++#define MATCH_SRLI 0x5013
++#define MASK_SRLI 0xfc00707f
++#define MATCH_C_SRLI 0x819
++#define MASK_C_SRLI 0x1c1f
++#define MATCH_AMOMINU_W 0xc000202f
++#define MASK_AMOMINU_W 0xf800707f
++#define MATCH_SRLW 0x503b
++#define MASK_SRLW 0xfe00707f
++#define MATCH_VFLSEGW 0x1400205b
++#define MASK_VFLSEGW 0x1ff0707f
++#define MATCH_C_LD0 0x8012
++#define MASK_C_LD0 0x801f
++#define MATCH_VLSEGBU 0x800205b
++#define MASK_VLSEGBU 0x1ff0707f
++#define MATCH_JALR 0x67
++#define MASK_JALR 0x707f
++#define MATCH_BLT 0x4063
++#define MASK_BLT 0x707f
++#define MATCH_CUSTOM2_RD_RS1 0x605b
++#define MASK_CUSTOM2_RD_RS1 0x707f
++#define MATCH_FCLASS_S 0xe0001053
++#define MASK_FCLASS_S 0xfff0707f
++#define MATCH_REM 0x2006033
++#define MASK_REM 0xfe00707f
++#define MATCH_FCLASS_D 0xe2001053
++#define MASK_FCLASS_D 0xfff0707f
++#define MATCH_FMUL_S 0x10000053
++#define MASK_FMUL_S 0xfe00007f
++#define MATCH_RDCYCLEH 0xc8002073
++#define MASK_RDCYCLEH 0xfffff07f
++#define MATCH_VLSEGSTHU 0xa00305b
++#define MASK_VLSEGSTHU 0x1e00707f
++#define MATCH_FMUL_D 0x12000053
++#define MASK_FMUL_D 0xfe00007f
++#define MATCH_ORI 0x6013
++#define MASK_ORI 0x707f
++#define MATCH_FMUL_H 0x14000053
++#define MASK_FMUL_H 0xfe00007f
++#define MATCH_VFLSEGD 0x1600205b
++#define MASK_VFLSEGD 0x1ff0707f
++#define MATCH_FEQ_S 0xa0002053
++#define MASK_FEQ_S 0xfe00707f
++#define MATCH_FSGNJX_D 0x22002053
++#define MASK_FSGNJX_D 0xfe00707f
++#define MATCH_SRAIW 0x4000501b
++#define MASK_SRAIW 0xfe00707f
++#define MATCH_FSGNJX_H 0x3c000053
++#define MASK_FSGNJX_H 0xfe00707f
++#define MATCH_FSGNJX_S 0x20002053
++#define MASK_FSGNJX_S 0xfe00707f
++#define MATCH_FEQ_D 0xa2002053
++#define MASK_FEQ_D 0xfe00707f
++#define MATCH_CUSTOM1_RD_RS1 0x602b
++#define MASK_CUSTOM1_RD_RS1 0x707f
++#define MATCH_FEQ_H 0xac000053
++#define MASK_FEQ_H 0xfe00707f
++#define MATCH_AMOMAXU_D 0xe000302f
++#define MASK_AMOMAXU_D 0xf800707f
++#define MATCH_DIVW 0x200403b
++#define MASK_DIVW 0xfe00707f
++#define MATCH_AMOMAXU_W 0xe000202f
++#define MASK_AMOMAXU_W 0xf800707f
++#define MATCH_SRAI_RV32 0x40005013
++#define MASK_SRAI_RV32 0xfe00707f
++#define MATCH_C_SRLI32 0xc19
++#define MASK_C_SRLI32 0x1c1f
++#define MATCH_VFSSTW 0x1400307b
++#define MASK_VFSSTW 0xfe00707f
++#define MATCH_CUSTOM0_RD 0x400b
++#define MASK_CUSTOM0_RD 0x707f
++#define MATCH_C_BEQ 0x10
++#define MASK_C_BEQ 0x1f
++#define MATCH_VFSSTD 0x1600307b
++#define MASK_VFSSTD 0xfe00707f
++#define MATCH_CUSTOM3_RD_RS1 0x607b
++#define MASK_CUSTOM3_RD_RS1 0x707f
++#define MATCH_LR_D 0x1000302f
++#define MASK_LR_D 0xf9f0707f
++#define MATCH_LR_W 0x1000202f
++#define MASK_LR_W 0xf9f0707f
++#define MATCH_FCVT_H_WU 0x7c000053
++#define MASK_FCVT_H_WU 0xfff0007f
++#define MATCH_VMVV 0x200002b
++#define MASK_VMVV 0xfff0707f
++#define MATCH_SLLW 0x103b
++#define MASK_SLLW 0xfe00707f
++#define MATCH_SLLI 0x1013
++#define MASK_SLLI 0xfc00707f
++#define MATCH_BEQ 0x63
++#define MASK_BEQ 0x707f
++#define MATCH_AND 0x7033
++#define MASK_AND 0xfe00707f
++#define MATCH_LBU 0x4003
++#define MASK_LBU 0x707f
++#define MATCH_FSGNJ_S 0x20000053
++#define MASK_FSGNJ_S 0xfe00707f
++#define MATCH_FMSUB_S 0x47
++#define MASK_FMSUB_S 0x600007f
++#define MATCH_C_SUB3 0x11c
++#define MASK_C_SUB3 0x31f
++#define MATCH_FSGNJ_H 0x2c000053
++#define MASK_FSGNJ_H 0xfe00707f
++#define MATCH_VLB 0x205b
++#define MASK_VLB 0xfff0707f
++#define MATCH_C_ADDIW 0x1d
++#define MASK_C_ADDIW 0x1f
++#define MATCH_CUSTOM3_RS1_RS2 0x307b
++#define MASK_CUSTOM3_RS1_RS2 0x707f
++#define MATCH_FSGNJ_D 0x22000053
++#define MASK_FSGNJ_D 0xfe00707f
++#define MATCH_VLSEGWU 0xc00205b
++#define MASK_VLSEGWU 0x1ff0707f
++#define MATCH_FCVT_S_WU 0xd0100053
++#define MASK_FCVT_S_WU 0xfff0007f
++#define MATCH_CUSTOM3_RS1 0x207b
++#define MASK_CUSTOM3_RS1 0x707f
++#define MATCH_SC_D 0x1800302f
++#define MASK_SC_D 0xf800707f
++#define MATCH_VFSW 0x1400207b
++#define MASK_VFSW 0xfff0707f
++#define MATCH_AMOSWAP_D 0x800302f
++#define MASK_AMOSWAP_D 0xf800707f
++#define MATCH_SB 0x23
++#define MASK_SB 0x707f
++#define MATCH_AMOSWAP_W 0x800202f
++#define MASK_AMOSWAP_W 0xf800707f
++#define MATCH_VFSD 0x1600207b
++#define MASK_VFSD 0xfff0707f
++#define MATCH_CUSTOM2_RS1 0x205b
++#define MASK_CUSTOM2_RS1 0x707f
++#define MATCH_SD 0x3023
++#define MASK_SD 0x707f
++#define MATCH_FMV_S_X 0xf0000053
++#define MASK_FMV_S_X 0xfff0707f
++#define MATCH_REMUW 0x200703b
++#define MASK_REMUW 0xfe00707f
++#define MATCH_JAL 0x6f
++#define MASK_JAL 0x7f
++#define MATCH_C_FSD 0x18
++#define MASK_C_FSD 0x1f
++#define MATCH_RDCYCLE 0xc0002073
++#define MASK_RDCYCLE 0xfffff07f
++#define MATCH_C_BNE 0x11
++#define MASK_C_BNE 0x1f
++#define MATCH_C_ADD 0x1a
++#define MASK_C_ADD 0x801f
++#define MATCH_VXCPTCAUSE 0x402b
++#define MASK_VXCPTCAUSE 0xfffff07f
++#define MATCH_VGETCFG 0x400b
++#define MASK_VGETCFG 0xfffff07f
++#define MATCH_LUI 0x37
++#define MASK_LUI 0x7f
++#define MATCH_VSETCFG 0x200b
++#define MASK_VSETCFG 0x7fff
++#define MATCH_C_SDSP 0x6
++#define MASK_C_SDSP 0x1f
++#define MATCH_C_LDSP 0x4
++#define MASK_C_LDSP 0x1f
++#define MATCH_FNMADD_H 0x400004f
++#define MASK_FNMADD_H 0x600007f
++#define MATCH_CUSTOM0_RS1_RS2 0x300b
++#define MASK_CUSTOM0_RS1_RS2 0x707f
++#define MATCH_SLLI_RV32 0x1013
++#define MASK_SLLI_RV32 0xfe00707f
++#define MATCH_MUL 0x2000033
++#define MASK_MUL 0xfe00707f
++#define MATCH_CSRRCI 0x7073
++#define MASK_CSRRCI 0x707f
++#define MATCH_C_SRAI32 0x1419
++#define MASK_C_SRAI32 0x1c1f
++#define MATCH_FLT_H 0xb4000053
++#define MASK_FLT_H 0xfe00707f
++#define MATCH_SRAI 0x40005013
++#define MASK_SRAI 0xfc00707f
++#define MATCH_AMOAND_D 0x6000302f
++#define MASK_AMOAND_D 0xf800707f
++#define MATCH_FLT_D 0xa2001053
++#define MASK_FLT_D 0xfe00707f
++#define MATCH_SRAW 0x4000503b
++#define MASK_SRAW 0xfe00707f
++#define MATCH_CSRRS 0x2073
++#define MASK_CSRRS 0x707f
++#define MATCH_FLT_S 0xa0001053
++#define MASK_FLT_S 0xfe00707f
++#define MATCH_ADDIW 0x1b
++#define MASK_ADDIW 0x707f
++#define MATCH_AMOAND_W 0x6000202f
++#define MASK_AMOAND_W 0xf800707f
++#define MATCH_CUSTOM2_RD 0x405b
++#define MASK_CUSTOM2_RD 0x707f
++#define MATCH_FCVT_WU_D 0xc2100053
++#define MASK_FCVT_WU_D 0xfff0007f
++#define MATCH_AMOXOR_W 0x2000202f
++#define MASK_AMOXOR_W 0xf800707f
++#define MATCH_FCVT_D_L 0xd2200053
++#define MASK_FCVT_D_L 0xfff0007f
++#define MATCH_FCVT_WU_H 0x5c000053
++#define MASK_FCVT_WU_H 0xfff0007f
++#define MATCH_C_SLLI 0x19
++#define MASK_C_SLLI 0x1c1f
++#define MATCH_AMOXOR_D 0x2000302f
++#define MASK_AMOXOR_D 0xf800707f
++#define MATCH_FCVT_WU_S 0xc0100053
++#define MASK_FCVT_WU_S 0xfff0007f
++#define MATCH_CUSTOM3_RD 0x407b
++#define MASK_CUSTOM3_RD 0x707f
++#define MATCH_FMAX_H 0xcc000053
++#define MASK_FMAX_H 0xfe00707f
++#define MATCH_VENQCNT 0x1000302b
++#define MASK_VENQCNT 0xfe007fff
++#define MATCH_VLBU 0x800205b
++#define MASK_VLBU 0xfff0707f
++#define MATCH_VLHU 0xa00205b
++#define MASK_VLHU 0xfff0707f
++#define MATCH_C_SW 0xd
++#define MASK_C_SW 0x1f
++#define MATCH_C_SD 0xc
++#define MASK_C_SD 0x1f
++#define MATCH_C_OR3 0x21c
++#define MASK_C_OR3 0x31f
++#define MATCH_C_AND3 0x31c
++#define MASK_C_AND3 0x31f
++#define MATCH_VFSSEGSTW 0x1400307b
++#define MASK_VFSSEGSTW 0x1e00707f
++#define MATCH_SLT 0x2033
++#define MASK_SLT 0xfe00707f
++#define MATCH_AMOOR_D 0x4000302f
++#define MASK_AMOOR_D 0xf800707f
++#define MATCH_REMU 0x2007033
++#define MASK_REMU 0xfe00707f
++#define MATCH_REMW 0x200603b
++#define MASK_REMW 0xfe00707f
++#define MATCH_SLL 0x1033
++#define MASK_SLL 0xfe00707f
++#define MATCH_VFSSEGSTD 0x1600307b
++#define MASK_VFSSEGSTD 0x1e00707f
++#define MATCH_AMOOR_W 0x4000202f
++#define MASK_AMOOR_W 0xf800707f
++#define MATCH_CUSTOM2_RS1_RS2 0x305b
++#define MASK_CUSTOM2_RS1_RS2 0x707f
++#define MATCH_VF 0x10202b
++#define MASK_VF 0x1f0707f
++#define MATCH_VFMVV 0x1000002b
++#define MASK_VFMVV 0xfff0707f
++#define MATCH_VFLSEGSTW 0x1400305b
++#define MASK_VFLSEGSTW 0x1e00707f
++#define MATCH_VXCPTRESTORE 0x200302b
++#define MASK_VXCPTRESTORE 0xfff07fff
++#define MATCH_VXCPTHOLD 0x800302b
++#define MASK_VXCPTHOLD 0xffffffff
++#define MATCH_SLTIU 0x3013
++#define MASK_SLTIU 0x707f
++#define MATCH_VFLSEGSTD 0x1600305b
++#define MASK_VFLSEGSTD 0x1e00707f
++#define MATCH_VFLD 0x1600205b
++#define MASK_VFLD 0xfff0707f
++#define MATCH_FMADD_S 0x43
++#define MASK_FMADD_S 0x600007f
++#define MATCH_VFLW 0x1400205b
++#define MASK_VFLW 0xfff0707f
++#define MATCH_FMADD_D 0x2000043
++#define MASK_FMADD_D 0x600007f
++#define MATCH_FMADD_H 0x4000043
++#define MASK_FMADD_H 0x600007f
++#define MATCH_SRET 0x80000073
++#define MASK_SRET 0xffffffff
++#define MATCH_VSSEGW 0x400207b
++#define MASK_VSSEGW 0x1ff0707f
++#define MATCH_CUSTOM0_RD_RS1 0x600b
++#define MASK_CUSTOM0_RD_RS1 0x707f
++#define MATCH_VSSEGH 0x200207b
++#define MASK_VSSEGH 0x1ff0707f
++#define MATCH_FRCSR 0x302073
++#define MASK_FRCSR 0xfffff07f
++#define MATCH_VSSEGD 0x600207b
++#define MASK_VSSEGD 0x1ff0707f
++#define MATCH_VSSEGB 0x207b
++#define MASK_VSSEGB 0x1ff0707f
++#define MATCH_FMIN_H 0xc4000053
++#define MASK_FMIN_H 0xfe00707f
++#define MATCH_FMIN_D 0x2a000053
++#define MASK_FMIN_D 0xfe00707f
++#define MATCH_BLTU 0x6063
++#define MASK_BLTU 0x707f
++#define MATCH_FMIN_S 0x28000053
++#define MASK_FMIN_S 0xfe00707f
++#define MATCH_SRLI_RV32 0x5013
++#define MASK_SRLI_RV32 0xfe00707f
++#define MATCH_SLLIW 0x101b
++#define MASK_SLLIW 0xfe00707f
++#define MATCH_FMAX_S 0x28001053
++#define MASK_FMAX_S 0xfe00707f
++#define MATCH_FCVT_D_H 0x8c000053
++#define MASK_FCVT_D_H 0xfff0007f
++#define MATCH_FCVT_D_W 0xd2000053
++#define MASK_FCVT_D_W 0xfff0007f
++#define MATCH_ADD 0x33
++#define MASK_ADD 0xfe00707f
++#define MATCH_FCVT_D_S 0x42000053
++#define MASK_FCVT_D_S 0xfff0007f
++#define MATCH_FMAX_D 0x2a001053
++#define MASK_FMAX_D 0xfe00707f
++#define MATCH_BNE 0x1063
++#define MASK_BNE 0x707f
++#define MATCH_CUSTOM1_RD 0x402b
++#define MASK_CUSTOM1_RD 0x707f
++#define MATCH_FSRM 0x201073
++#define MASK_FSRM 0xfff0707f
++#define MATCH_FDIV_D 0x1a000053
++#define MASK_FDIV_D 0xfe00007f
++#define MATCH_VSW 0x400207b
++#define MASK_VSW 0xfff0707f
++#define MATCH_FCVT_L_S 0xc0200053
++#define MASK_FCVT_L_S 0xfff0007f
++#define MATCH_FDIV_H 0x1c000053
++#define MASK_FDIV_H 0xfe00007f
++#define MATCH_VSB 0x207b
++#define MASK_VSB 0xfff0707f
++#define MATCH_FDIV_S 0x18000053
++#define MASK_FDIV_S 0xfe00007f
++#define MATCH_FSRMI 0x205073
++#define MASK_FSRMI 0xfff0707f
++#define MATCH_FCVT_L_H 0x44000053
++#define MASK_FCVT_L_H 0xfff0007f
++#define MATCH_VSH 0x200207b
++#define MASK_VSH 0xfff0707f
++#define MATCH_FCVT_L_D 0xc2200053
++#define MASK_FCVT_L_D 0xfff0007f
++#define MATCH_FCVT_H_S 0x90000053
++#define MASK_FCVT_H_S 0xfff0007f
++#define MATCH_SCALL 0x73
++#define MASK_SCALL 0xffffffff
++#define MATCH_FSFLAGSI 0x105073
++#define MASK_FSFLAGSI 0xfff0707f
++#define MATCH_FCVT_H_W 0x74000053
++#define MASK_FCVT_H_W 0xfff0007f
++#define MATCH_FCVT_H_L 0x64000053
++#define MASK_FCVT_H_L 0xfff0007f
++#define MATCH_SRLIW 0x501b
++#define MASK_SRLIW 0xfe00707f
++#define MATCH_FCVT_S_LU 0xd0300053
++#define MASK_FCVT_S_LU 0xfff0007f
++#define MATCH_FCVT_H_D 0x92000053
++#define MASK_FCVT_H_D 0xfff0007f
++#define MATCH_SBREAK 0x100073
++#define MASK_SBREAK 0xffffffff
++#define MATCH_RDINSTRETH 0xc8202073
++#define MASK_RDINSTRETH 0xfffff07f
++#define MATCH_SRA 0x40005033
++#define MASK_SRA 0xfe00707f
++#define MATCH_BGE 0x5063
++#define MASK_BGE 0x707f
++#define MATCH_SRL 0x5033
++#define MASK_SRL 0xfe00707f
++#define MATCH_VENQCMD 0xa00302b
++#define MASK_VENQCMD 0xfe007fff
++#define MATCH_OR 0x6033
++#define MASK_OR 0xfe00707f
++#define MATCH_SUBW 0x4000003b
++#define MASK_SUBW 0xfe00707f
++#define MATCH_FMV_X_D 0xe2000053
++#define MASK_FMV_X_D 0xfff0707f
++#define MATCH_RDTIME 0xc0102073
++#define MASK_RDTIME 0xfffff07f
++#define MATCH_AMOADD_D 0x302f
++#define MASK_AMOADD_D 0xf800707f
++#define MATCH_AMOMAX_W 0xa000202f
++#define MASK_AMOMAX_W 0xf800707f
++#define MATCH_C_MOVE 0x2
++#define MASK_C_MOVE 0x801f
++#define MATCH_FMOVN 0x6007077
++#define MASK_FMOVN 0xfe00707f
++#define MATCH_C_FSW 0x16
++#define MASK_C_FSW 0x1f
++#define MATCH_AMOADD_W 0x202f
++#define MASK_AMOADD_W 0xf800707f
++#define MATCH_AMOMAX_D 0xa000302f
++#define MASK_AMOMAX_D 0xf800707f
++#define MATCH_FMOVZ 0x4007077
++#define MASK_FMOVZ 0xfe00707f
++#define MATCH_CUSTOM1_RS1_RS2 0x302b
++#define MASK_CUSTOM1_RS1_RS2 0x707f
++#define MATCH_FMV_X_H 0xe4000053
++#define MASK_FMV_X_H 0xfff0707f
++#define MATCH_VSD 0x600207b
++#define MASK_VSD 0xfff0707f
++#define MATCH_VLSEGSTW 0x400305b
++#define MASK_VLSEGSTW 0x1e00707f
++#define MATCH_C_ADDI 0x1
++#define MASK_C_ADDI 0x1f
++#define MATCH_C_SLLIW 0x1819
++#define MASK_C_SLLIW 0x1c1f
++#define MATCH_VLSEGSTB 0x305b
++#define MASK_VLSEGSTB 0x1e00707f
++#define MATCH_VLSEGSTD 0x600305b
++#define MASK_VLSEGSTD 0x1e00707f
++#define MATCH_VLSEGSTH 0x200305b
++#define MASK_VLSEGSTH 0x1e00707f
++#define MATCH_MULHU 0x2003033
++#define MASK_MULHU 0xfe00707f
++#define MATCH_AMOMIN_W 0x8000202f
++#define MASK_AMOMIN_W 0xf800707f
++#define MATCH_C_SLLI32 0x419
++#define MASK_C_SLLI32 0x1c1f
++#define MATCH_C_ADD3 0x1c
++#define MASK_C_ADD3 0x31f
++#define MATCH_VGETVL 0x200400b
++#define MASK_VGETVL 0xfffff07f
++#define MATCH_AMOMIN_D 0x8000302f
++#define MASK_AMOMIN_D 0xf800707f
++#define MATCH_FCVT_W_H 0x54000053
++#define MASK_FCVT_W_H 0xfff0007f
++#define MATCH_VLSEGB 0x205b
++#define MASK_VLSEGB 0x1ff0707f
++#define MATCH_FSD 0x3027
++#define MASK_FSD 0x707f
++#define MATCH_VLSEGD 0x600205b
++#define MASK_VLSEGD 0x1ff0707f
++#define MATCH_FSH 0x1027
++#define MASK_FSH 0x707f
++#define MATCH_VLSEGH 0x200205b
++#define MASK_VLSEGH 0x1ff0707f
++#define MATCH_C_SUB 0x801a
++#define MASK_C_SUB 0x801f
++#define MATCH_VLSEGW 0x400205b
++#define MASK_VLSEGW 0x1ff0707f
++#define MATCH_FSW 0x2027
++#define MASK_FSW 0x707f
++#define MATCH_C_J 0x8002
++#define MASK_C_J 0x801f
++#define CSR_FFLAGS 0x1
++#define CSR_FRM 0x2
++#define CSR_FCSR 0x3
++#define CSR_STATS 0xc0
++#define CSR_SUP0 0x500
++#define CSR_SUP1 0x501
++#define CSR_EPC 0x502
++#define CSR_BADVADDR 0x503
++#define CSR_PTBR 0x504
++#define CSR_ASID 0x505
++#define CSR_COUNT 0x506
++#define CSR_COMPARE 0x507
++#define CSR_EVEC 0x508
++#define CSR_CAUSE 0x509
++#define CSR_STATUS 0x50a
++#define CSR_HARTID 0x50b
++#define CSR_IMPL 0x50c
++#define CSR_FATC 0x50d
++#define CSR_SEND_IPI 0x50e
++#define CSR_CLEAR_IPI 0x50f
++#define CSR_RESET 0x51d
++#define CSR_TOHOST 0x51e
++#define CSR_FROMHOST 0x51f
++#define CSR_CYCLE 0xc00
++#define CSR_TIME 0xc01
++#define CSR_INSTRET 0xc02
++#define CSR_UARCH0 0xcc0
++#define CSR_UARCH1 0xcc1
++#define CSR_UARCH2 0xcc2
++#define CSR_UARCH3 0xcc3
++#define CSR_UARCH4 0xcc4
++#define CSR_UARCH5 0xcc5
++#define CSR_UARCH6 0xcc6
++#define CSR_UARCH7 0xcc7
++#define CSR_UARCH8 0xcc8
++#define CSR_UARCH9 0xcc9
++#define CSR_UARCH10 0xcca
++#define CSR_UARCH11 0xccb
++#define CSR_UARCH12 0xccc
++#define CSR_UARCH13 0xccd
++#define CSR_UARCH14 0xcce
++#define CSR_UARCH15 0xccf
++#define CSR_COUNTH 0x586
++#define CSR_CYCLEH 0xc80
++#define CSR_TIMEH 0xc81
++#define CSR_INSTRETH 0xc82
++#define CAUSE_MISALIGNED_FETCH 0x0
++#define CAUSE_FAULT_FETCH 0x1
++#define CAUSE_ILLEGAL_INSTRUCTION 0x2
++#define CAUSE_PRIVILEGED_INSTRUCTION 0x3
++#define CAUSE_FP_DISABLED 0x4
++#define CAUSE_SYSCALL 0x6
++#define CAUSE_BREAKPOINT 0x7
++#define CAUSE_MISALIGNED_LOAD 0x8
++#define CAUSE_MISALIGNED_STORE 0x9
++#define CAUSE_FAULT_LOAD 0xa
++#define CAUSE_FAULT_STORE 0xb
++#define CAUSE_ACCELERATOR_DISABLED 0xc
++#endif
++#ifdef DECLARE_INSN
++DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
++DECLARE_INSN(vlsegstwu, MATCH_VLSEGSTWU, MASK_VLSEGSTWU)
++DECLARE_INSN(c_lw0, MATCH_C_LW0, MASK_C_LW0)
++DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
++DECLARE_INSN(vlh, MATCH_VLH, MASK_VLH)
++DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
++DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
++DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
++DECLARE_INSN(vld, MATCH_VLD, MASK_VLD)
++DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
++DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
++DECLARE_INSN(vlw, MATCH_VLW, MASK_VLW)
++DECLARE_INSN(vssegstw, MATCH_VSSEGSTW, MASK_VSSEGSTW)
++DECLARE_INSN(utidx, MATCH_UTIDX, MASK_UTIDX)
++DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
++DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
++DECLARE_INSN(vssegstd, MATCH_VSSEGSTD, MASK_VSSEGSTD)
++DECLARE_INSN(vssegstb, MATCH_VSSEGSTB, MASK_VSSEGSTB)
++DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
++DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
++DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
++DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
++DECLARE_INSN(vfmsv_s, MATCH_VFMSV_S, MASK_VFMSV_S)
++DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
++DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
++DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
++DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
++DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
++DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
++DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
++DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
++DECLARE_INSN(vfmsv_d, MATCH_VFMSV_D, MASK_VFMSV_D)
++DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
++DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
++DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
++DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
++DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
++DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
++DECLARE_INSN(vsstw, MATCH_VSSTW, MASK_VSSTW)
++DECLARE_INSN(vssth, MATCH_VSSTH, MASK_VSSTH)
++DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
++DECLARE_INSN(vsstb, MATCH_VSSTB, MASK_VSSTB)
++DECLARE_INSN(vsstd, MATCH_VSSTD, MASK_VSSTD)
++DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
++DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
++DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
++DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
++DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
++DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
++DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
++DECLARE_INSN(venqimm1, MATCH_VENQIMM1, MASK_VENQIMM1)
++DECLARE_INSN(venqimm2, MATCH_VENQIMM2, MASK_VENQIMM2)
++DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
++DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
++DECLARE_INSN(vlstw, MATCH_VLSTW, MASK_VLSTW)
++DECLARE_INSN(vlsth, MATCH_VLSTH, MASK_VLSTH)
++DECLARE_INSN(vlstb, MATCH_VLSTB, MASK_VLSTB)
++DECLARE_INSN(vlstd, MATCH_VLSTD, MASK_VLSTD)
++DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
++DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
++DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
++DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
++DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
++DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
++DECLARE_INSN(vlsegstbu, MATCH_VLSEGSTBU, MASK_VLSEGSTBU)
++DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
++DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
++DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
++DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
++DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
++DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
++DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
++DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
++DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
++DECLARE_INSN(vlstbu, MATCH_VLSTBU, MASK_VLSTBU)
++DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
++DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
++DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
++DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
++DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
++DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
++DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
++DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
++DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
++DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
++DECLARE_INSN(vxcptsave, MATCH_VXCPTSAVE, MASK_VXCPTSAVE)
++DECLARE_INSN(vmsv, MATCH_VMSV, MASK_VMSV)
++DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
++DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
++DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
++DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
++DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
++DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
++DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
++DECLARE_INSN(vxcptkill, MATCH_VXCPTKILL, MASK_VXCPTKILL)
++DECLARE_INSN(stop, MATCH_STOP, MASK_STOP)
++DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
++DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
++DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
++DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
++DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
++DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
++DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
++DECLARE_INSN(vssegsth, MATCH_VSSEGSTH, MASK_VSSEGSTH)
++DECLARE_INSN(sw, MATCH_SW, MASK_SW)
++DECLARE_INSN(vlstwu, MATCH_VLSTWU, MASK_VLSTWU)
++DECLARE_INSN(vfssegw, MATCH_VFSSEGW, MASK_VFSSEGW)
++DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
++DECLARE_INSN(sh, MATCH_SH, MASK_SH)
++DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
++DECLARE_INSN(vxcptaux, MATCH_VXCPTAUX, MASK_VXCPTAUX)
++DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
++DECLARE_INSN(vfssegd, MATCH_VFSSEGD, MASK_VFSSEGD)
++DECLARE_INSN(vlseghu, MATCH_VLSEGHU, MASK_VLSEGHU)
++DECLARE_INSN(movn, MATCH_MOVN, MASK_MOVN)
++DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
++DECLARE_INSN(vlsthu, MATCH_VLSTHU, MASK_VLSTHU)
++DECLARE_INSN(movz, MATCH_MOVZ, MASK_MOVZ)
++DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
++DECLARE_INSN(ld, MATCH_LD, MASK_LD)
++DECLARE_INSN(lb, MATCH_LB, MASK_LB)
++DECLARE_INSN(vlwu, MATCH_VLWU, MASK_VLWU)
++DECLARE_INSN(lh, MATCH_LH, MASK_LH)
++DECLARE_INSN(lw, MATCH_LW, MASK_LW)
++DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
++DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
++DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
++DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
++DECLARE_INSN(vflstd, MATCH_VFLSTD, MASK_VFLSTD)
++DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
++DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
++DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
++DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
++DECLARE_INSN(vflstw, MATCH_VFLSTW, MASK_VFLSTW)
++DECLARE_INSN(vxcptevac, MATCH_VXCPTEVAC, MASK_VXCPTEVAC)
++DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
++DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
++DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
++DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
++DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
++DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
++DECLARE_INSN(vflsegw, MATCH_VFLSEGW, MASK_VFLSEGW)
++DECLARE_INSN(c_ld0, MATCH_C_LD0, MASK_C_LD0)
++DECLARE_INSN(vlsegbu, MATCH_VLSEGBU, MASK_VLSEGBU)
++DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
++DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
++DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
++DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
++DECLARE_INSN(rem, MATCH_REM, MASK_REM)
++DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
++DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
++DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
++DECLARE_INSN(vlsegsthu, MATCH_VLSEGSTHU, MASK_VLSEGSTHU)
++DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
++DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
++DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
++DECLARE_INSN(vflsegd, MATCH_VFLSEGD, MASK_VFLSEGD)
++DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
++DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
++DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
++DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
++DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
++DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
++DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
++DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
++DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
++DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
++DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
++DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
++DECLARE_INSN(c_srli32, MATCH_C_SRLI32, MASK_C_SRLI32)
++DECLARE_INSN(vfsstw, MATCH_VFSSTW, MASK_VFSSTW)
++DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
++DECLARE_INSN(c_beq, MATCH_C_BEQ, MASK_C_BEQ)
++DECLARE_INSN(vfsstd, MATCH_VFSSTD, MASK_VFSSTD)
++DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
++DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
++DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
++DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
++DECLARE_INSN(vmvv, MATCH_VMVV, MASK_VMVV)
++DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
++DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
++DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
++DECLARE_INSN(and, MATCH_AND, MASK_AND)
++DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
++DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
++DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
++DECLARE_INSN(c_sub3, MATCH_C_SUB3, MASK_C_SUB3)
++DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
++DECLARE_INSN(vlb, MATCH_VLB, MASK_VLB)
++DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
++DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
++DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
++DECLARE_INSN(vlsegwu, MATCH_VLSEGWU, MASK_VLSEGWU)
++DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
++DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
++DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
++DECLARE_INSN(vfsw, MATCH_VFSW, MASK_VFSW)
++DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
++DECLARE_INSN(sb, MATCH_SB, MASK_SB)
++DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
++DECLARE_INSN(vfsd, MATCH_VFSD, MASK_VFSD)
++DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
++DECLARE_INSN(sd, MATCH_SD, MASK_SD)
++DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
++DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
++DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
++DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
++DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
++DECLARE_INSN(c_bne, MATCH_C_BNE, MASK_C_BNE)
++DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
++DECLARE_INSN(vxcptcause, MATCH_VXCPTCAUSE, MASK_VXCPTCAUSE)
++DECLARE_INSN(vgetcfg, MATCH_VGETCFG, MASK_VGETCFG)
++DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
++DECLARE_INSN(vsetcfg, MATCH_VSETCFG, MASK_VSETCFG)
++DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
++DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
++DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
++DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
++DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
++DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
++DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
++DECLARE_INSN(c_srai32, MATCH_C_SRAI32, MASK_C_SRAI32)
++DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
++DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
++DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
++DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
++DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
++DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
++DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
++DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
++DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
++DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
++DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
++DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
++DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
++DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
++DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
++DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
++DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
++DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
++DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
++DECLARE_INSN(venqcnt, MATCH_VENQCNT, MASK_VENQCNT)
++DECLARE_INSN(vlbu, MATCH_VLBU, MASK_VLBU)
++DECLARE_INSN(vlhu, MATCH_VLHU, MASK_VLHU)
++DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
++DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
++DECLARE_INSN(c_or3, MATCH_C_OR3, MASK_C_OR3)
++DECLARE_INSN(c_and3, MATCH_C_AND3, MASK_C_AND3)
++DECLARE_INSN(vfssegstw, MATCH_VFSSEGSTW, MASK_VFSSEGSTW)
++DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
++DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
++DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
++DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
++DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
++DECLARE_INSN(vfssegstd, MATCH_VFSSEGSTD, MASK_VFSSEGSTD)
++DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
++DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
++DECLARE_INSN(vf, MATCH_VF, MASK_VF)
++DECLARE_INSN(vfmvv, MATCH_VFMVV, MASK_VFMVV)
++DECLARE_INSN(vflsegstw, MATCH_VFLSEGSTW, MASK_VFLSEGSTW)
++DECLARE_INSN(vxcptrestore, MATCH_VXCPTRESTORE, MASK_VXCPTRESTORE)
++DECLARE_INSN(vxcpthold, MATCH_VXCPTHOLD, MASK_VXCPTHOLD)
++DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
++DECLARE_INSN(vflsegstd, MATCH_VFLSEGSTD, MASK_VFLSEGSTD)
++DECLARE_INSN(vfld, MATCH_VFLD, MASK_VFLD)
++DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
++DECLARE_INSN(vflw, MATCH_VFLW, MASK_VFLW)
++DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
++DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
++DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
++DECLARE_INSN(vssegw, MATCH_VSSEGW, MASK_VSSEGW)
++DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
++DECLARE_INSN(vssegh, MATCH_VSSEGH, MASK_VSSEGH)
++DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
++DECLARE_INSN(vssegd, MATCH_VSSEGD, MASK_VSSEGD)
++DECLARE_INSN(vssegb, MATCH_VSSEGB, MASK_VSSEGB)
++DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
++DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
++DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
++DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
++DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
++DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
++DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
++DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
++DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
++DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
++DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
++DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
++DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
++DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
++DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
++DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
++DECLARE_INSN(vsw, MATCH_VSW, MASK_VSW)
++DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
++DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
++DECLARE_INSN(vsb, MATCH_VSB, MASK_VSB)
++DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
++DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
++DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
++DECLARE_INSN(vsh, MATCH_VSH, MASK_VSH)
++DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
++DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
++DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
++DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
++DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
++DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
++DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
++DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
++DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
++DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
++DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
++DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
++DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
++DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
++DECLARE_INSN(venqcmd, MATCH_VENQCMD, MASK_VENQCMD)
++DECLARE_INSN(or, MATCH_OR, MASK_OR)
++DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
++DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
++DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
++DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
++DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
++DECLARE_INSN(c_move, MATCH_C_MOVE, MASK_C_MOVE)
++DECLARE_INSN(fmovn, MATCH_FMOVN, MASK_FMOVN)
++DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
++DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
++DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
++DECLARE_INSN(fmovz, MATCH_FMOVZ, MASK_FMOVZ)
++DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
++DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
++DECLARE_INSN(vsd, MATCH_VSD, MASK_VSD)
++DECLARE_INSN(vlsegstw, MATCH_VLSEGSTW, MASK_VLSEGSTW)
++DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
++DECLARE_INSN(c_slliw, MATCH_C_SLLIW, MASK_C_SLLIW)
++DECLARE_INSN(vlsegstb, MATCH_VLSEGSTB, MASK_VLSEGSTB)
++DECLARE_INSN(vlsegstd, MATCH_VLSEGSTD, MASK_VLSEGSTD)
++DECLARE_INSN(vlsegsth, MATCH_VLSEGSTH, MASK_VLSEGSTH)
++DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
++DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
++DECLARE_INSN(c_slli32, MATCH_C_SLLI32, MASK_C_SLLI32)
++DECLARE_INSN(c_add3, MATCH_C_ADD3, MASK_C_ADD3)
++DECLARE_INSN(vgetvl, MATCH_VGETVL, MASK_VGETVL)
++DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
++DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
++DECLARE_INSN(vlsegb, MATCH_VLSEGB, MASK_VLSEGB)
++DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
++DECLARE_INSN(vlsegd, MATCH_VLSEGD, MASK_VLSEGD)
++DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
++DECLARE_INSN(vlsegh, MATCH_VLSEGH, MASK_VLSEGH)
++DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
++DECLARE_INSN(vlsegw, MATCH_VLSEGW, MASK_VLSEGW)
++DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
++DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
++#endif
++#ifdef DECLARE_CSR
++DECLARE_CSR(fflags, CSR_FFLAGS)
++DECLARE_CSR(frm, CSR_FRM)
++DECLARE_CSR(fcsr, CSR_FCSR)
++DECLARE_CSR(stats, CSR_STATS)
++DECLARE_CSR(sup0, CSR_SUP0)
++DECLARE_CSR(sup1, CSR_SUP1)
++DECLARE_CSR(epc, CSR_EPC)
++DECLARE_CSR(badvaddr, CSR_BADVADDR)
++DECLARE_CSR(ptbr, CSR_PTBR)
++DECLARE_CSR(asid, CSR_ASID)
++DECLARE_CSR(count, CSR_COUNT)
++DECLARE_CSR(compare, CSR_COMPARE)
++DECLARE_CSR(evec, CSR_EVEC)
++DECLARE_CSR(cause, CSR_CAUSE)
++DECLARE_CSR(status, CSR_STATUS)
++DECLARE_CSR(hartid, CSR_HARTID)
++DECLARE_CSR(impl, CSR_IMPL)
++DECLARE_CSR(fatc, CSR_FATC)
++DECLARE_CSR(send_ipi, CSR_SEND_IPI)
++DECLARE_CSR(clear_ipi, CSR_CLEAR_IPI)
++DECLARE_CSR(reset, CSR_RESET)
++DECLARE_CSR(tohost, CSR_TOHOST)
++DECLARE_CSR(fromhost, CSR_FROMHOST)
++DECLARE_CSR(cycle, CSR_CYCLE)
++DECLARE_CSR(time, CSR_TIME)
++DECLARE_CSR(instret, CSR_INSTRET)
++DECLARE_CSR(uarch0, CSR_UARCH0)
++DECLARE_CSR(uarch1, CSR_UARCH1)
++DECLARE_CSR(uarch2, CSR_UARCH2)
++DECLARE_CSR(uarch3, CSR_UARCH3)
++DECLARE_CSR(uarch4, CSR_UARCH4)
++DECLARE_CSR(uarch5, CSR_UARCH5)
++DECLARE_CSR(uarch6, CSR_UARCH6)
++DECLARE_CSR(uarch7, CSR_UARCH7)
++DECLARE_CSR(uarch8, CSR_UARCH8)
++DECLARE_CSR(uarch9, CSR_UARCH9)
++DECLARE_CSR(uarch10, CSR_UARCH10)
++DECLARE_CSR(uarch11, CSR_UARCH11)
++DECLARE_CSR(uarch12, CSR_UARCH12)
++DECLARE_CSR(uarch13, CSR_UARCH13)
++DECLARE_CSR(uarch14, CSR_UARCH14)
++DECLARE_CSR(uarch15, CSR_UARCH15)
++DECLARE_CSR(counth, CSR_COUNTH)
++DECLARE_CSR(cycleh, CSR_CYCLEH)
++DECLARE_CSR(timeh, CSR_TIMEH)
++DECLARE_CSR(instreth, CSR_INSTRETH)
++#endif
++#ifdef DECLARE_CAUSE
++DECLARE_CAUSE("fflags", CAUSE_FFLAGS)
++DECLARE_CAUSE("frm", CAUSE_FRM)
++DECLARE_CAUSE("fcsr", CAUSE_FCSR)
++DECLARE_CAUSE("stats", CAUSE_STATS)
++DECLARE_CAUSE("sup0", CAUSE_SUP0)
++DECLARE_CAUSE("sup1", CAUSE_SUP1)
++DECLARE_CAUSE("epc", CAUSE_EPC)
++DECLARE_CAUSE("badvaddr", CAUSE_BADVADDR)
++DECLARE_CAUSE("ptbr", CAUSE_PTBR)
++DECLARE_CAUSE("asid", CAUSE_ASID)
++DECLARE_CAUSE("count", CAUSE_COUNT)
++DECLARE_CAUSE("compare", CAUSE_COMPARE)
++DECLARE_CAUSE("evec", CAUSE_EVEC)
++DECLARE_CAUSE("cause", CAUSE_CAUSE)
++DECLARE_CAUSE("status", CAUSE_STATUS)
++DECLARE_CAUSE("hartid", CAUSE_HARTID)
++DECLARE_CAUSE("impl", CAUSE_IMPL)
++DECLARE_CAUSE("fatc", CAUSE_FATC)
++DECLARE_CAUSE("send_ipi", CAUSE_SEND_IPI)
++DECLARE_CAUSE("clear_ipi", CAUSE_CLEAR_IPI)
++DECLARE_CAUSE("reset", CAUSE_RESET)
++DECLARE_CAUSE("tohost", CAUSE_TOHOST)
++DECLARE_CAUSE("fromhost", CAUSE_FROMHOST)
++DECLARE_CAUSE("cycle", CAUSE_CYCLE)
++DECLARE_CAUSE("time", CAUSE_TIME)
++DECLARE_CAUSE("instret", CAUSE_INSTRET)
++DECLARE_CAUSE("uarch0", CAUSE_UARCH0)
++DECLARE_CAUSE("uarch1", CAUSE_UARCH1)
++DECLARE_CAUSE("uarch2", CAUSE_UARCH2)
++DECLARE_CAUSE("uarch3", CAUSE_UARCH3)
++DECLARE_CAUSE("uarch4", CAUSE_UARCH4)
++DECLARE_CAUSE("uarch5", CAUSE_UARCH5)
++DECLARE_CAUSE("uarch6", CAUSE_UARCH6)
++DECLARE_CAUSE("uarch7", CAUSE_UARCH7)
++DECLARE_CAUSE("uarch8", CAUSE_UARCH8)
++DECLARE_CAUSE("uarch9", CAUSE_UARCH9)
++DECLARE_CAUSE("uarch10", CAUSE_UARCH10)
++DECLARE_CAUSE("uarch11", CAUSE_UARCH11)
++DECLARE_CAUSE("uarch12", CAUSE_UARCH12)
++DECLARE_CAUSE("uarch13", CAUSE_UARCH13)
++DECLARE_CAUSE("uarch14", CAUSE_UARCH14)
++DECLARE_CAUSE("uarch15", CAUSE_UARCH15)
++DECLARE_CAUSE("counth", CAUSE_COUNTH)
++DECLARE_CAUSE("cycleh", CAUSE_CYCLEH)
++DECLARE_CAUSE("timeh", CAUSE_TIMEH)
++DECLARE_CAUSE("instreth", CAUSE_INSTRETH)
++#endif
+diff -urN original-binutils/ld/configure.tgt binutils/ld/configure.tgt
+--- original-binutils/ld/configure.tgt 2014-10-14 09:32:04.000000000 +0200
++++ binutils-2.25/ld/configure.tgt 2015-03-07 09:55:02.383135671 +0100
+@@ -604,6 +604,12 @@
+ powerpc-*-beos*) targ_emul=aixppc ;;
+ powerpc-*-windiss*) targ_emul=elf32ppcwindiss ;;
+ powerpc-*-lynxos*) targ_emul=ppclynx ;;
++riscv32*-*-*) targ_emul=elf32lriscv
++ targ_extra_emuls="elf64lriscv"
++ targ_extra_libpath=$targ_extra_emuls ;;
++riscv*-*-*) targ_emul=elf64lriscv
++ targ_extra_emuls="elf32lriscv"
++ targ_extra_libpath=$targ_extra_emuls ;;
+ rs6000-*-aix[5-9]*) targ_emul=aix5rs6 ;;
+ rs6000-*-aix*) targ_emul=aixrs6
+ ;;
+diff -urN original-binutils/ld/emulparams/elf32lriscv-defs.sh binutils/ld/emulparams/elf32lriscv-defs.sh
+--- original-binutils/ld/emulparams/elf32lriscv-defs.sh 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/ld/emulparams/elf32lriscv-defs.sh 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,39 @@
++# This is an ELF platform.
++SCRIPT_NAME=elf
++ARCH=riscv
++OUTPUT_FORMAT="elf32-littleriscv"
++NO_REL_RELOCS=yes
++
++TEMPLATE_NAME=elf32
++EXTRA_EM_FILE=riscvelf
++
++case "$EMULATION_NAME" in
++elf32*) ELFSIZE=32; LIBPATH_SUFFIX=32 ;;
++elf64*) ELFSIZE=64; LIBPATH_SUFFIX= ;;
++*) echo $0: unhandled emulation $EMULATION_NAME >&2; exit 1 ;;
++esac
++
++if test `echo "$host" | sed -e s/64//` = `echo "$target" | sed -e s/64//`; then
++ case " $EMULATION_LIBPATH " in
++ *" ${EMULATION_NAME} "*)
++ NATIVE=yes
++ ;;
++ esac
++fi
++
++GENERATE_SHLIB_SCRIPT=yes
++GENERATE_PIE_SCRIPT=yes
++
++TEXT_START_ADDR=0x800000
++MAXPAGESIZE="CONSTANT (MAXPAGESIZE)"
++COMMONPAGESIZE="CONSTANT (COMMONPAGESIZE)"
++
++INITIAL_READONLY_SECTIONS=".interp ${RELOCATING-0} : { *(.interp) }"
++SDATA_START_SYMBOLS="_gp = . + 0x800;
++ *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) *(.srodata*)"
++if test -n "${CREATE_SHLIB}"; then
++ INITIAL_READONLY_SECTIONS=
++ SDATA_START_SYMBOLS=
++ OTHER_READONLY_SECTIONS=".srodata ${RELOCATING-0} : { *(.srodata.cst16) *(.srodata.cst8) *(.srodata.cst4) *(.srodata.cst2) *(.srodata*) }"
++ unset GOT
++fi
+diff -urN original-binutils/ld/emulparams/elf32lriscv.sh binutils/ld/emulparams/elf32lriscv.sh
+--- original-binutils/ld/emulparams/elf32lriscv.sh 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/ld/emulparams/elf32lriscv.sh 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,2 @@
++. ${srcdir}/emulparams/elf32lriscv-defs.sh
++OUTPUT_FORMAT="elf32-littleriscv"
+diff -urN original-binutils/ld/emulparams/elf64lriscv-defs.sh binutils/ld/emulparams/elf64lriscv-defs.sh
+--- original-binutils/ld/emulparams/elf64lriscv-defs.sh 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/ld/emulparams/elf64lriscv-defs.sh 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1 @@
++. ${srcdir}/emulparams/elf32lriscv-defs.sh
+diff -urN original-binutils/ld/emulparams/elf64lriscv.sh binutils/ld/emulparams/elf64lriscv.sh
+--- original-binutils/ld/emulparams/elf64lriscv.sh 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/ld/emulparams/elf64lriscv.sh 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,2 @@
++. ${srcdir}/emulparams/elf64lriscv-defs.sh
++OUTPUT_FORMAT="elf64-littleriscv"
+diff -urN original-binutils/ld/emultempl/riscvelf.em binutils/ld/emultempl/riscvelf.em
+--- original-binutils/ld/emultempl/riscvelf.em 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/ld/emultempl/riscvelf.em 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,68 @@
++# This shell script emits a C file. -*- C -*-
++# Copyright 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
++#
++# This file is part of the GNU Binutils.
++#
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program; if not, write to the Free Software
++# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++# MA 02110-1301, USA.
++
++fragment <<EOF
++
++#include "ldmain.h"
++#include "ldctor.h"
++#include "elf/riscv.h"
++#include "elfxx-riscv.h"
++
++static void
++riscv_elf_before_allocation (void)
++{
++ gld${EMULATION_NAME}_before_allocation ();
++
++ if (link_info.discard == discard_sec_merge)
++ link_info.discard = discard_l;
++
++ /* We always need at least some relaxation to handle code alignment. */
++ if (RELAXATION_DISABLED_BY_USER)
++ TARGET_ENABLE_RELAXATION;
++ else
++ ENABLE_RELAXATION;
++
++ link_info.relax_pass = 2;
++}
++
++static void
++gld${EMULATION_NAME}_after_allocation (void)
++{
++ int need_layout = 0;
++
++ /* Don't attempt to discard unused .eh_frame sections until the final link,
++ as we can't reliably tell if they're used until after relaxation. */
++ if (!link_info.relocatable)
++ {
++ need_layout = bfd_elf_discard_info (link_info.output_bfd, &link_info);
++ if (need_layout < 0)
++ {
++ einfo ("%X%P: .eh_frame/.stab edit: %E\n");
++ return;
++ }
++ }
++
++ gld${EMULATION_NAME}_map_segments (need_layout);
++}
++
++EOF
++
++LDEMUL_BEFORE_ALLOCATION=riscv_elf_before_allocation
++LDEMUL_AFTER_ALLOCATION=gld${EMULATION_NAME}_after_allocation
+diff -urN original-binutils/ld/Makefile.am binutils/ld/Makefile.am
+--- original-binutils/ld/Makefile.am 2014-10-14 09:32:04.000000000 +0200
++++ binutils-2.25/ld/Makefile.am 2015-03-07 09:55:02.383135671 +0100
+@@ -258,6 +258,7 @@
+ eelf32ppcsim.c \
+ eelf32ppcvxworks.c \
+ eelf32ppcwindiss.c \
++ eelf32lriscv.c \
+ eelf32rl78.c \
+ eelf32rx.c \
+ eelf32tilegx.c \
+@@ -464,6 +465,7 @@
+ eelf64btsmip_fbsd.c \
+ eelf64hppa.c \
+ eelf64lppc.c \
++ eelf64lriscv.c \
+ eelf64ltsmip.c \
+ eelf64ltsmip_fbsd.c \
+ eelf64mmix.c \
+@@ -1104,6 +1106,11 @@
+ ldemul-list.h \
+ $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+
++eelf32lriscv.c: $(srcdir)/emulparams/elf32lriscv.sh \
++ $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++ $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++ ${GEN_DEPENDS}
++
+ eelf32lsmip.c: $(srcdir)/emulparams/elf32lsmip.sh \
+ $(srcdir)/emulparams/elf32lmip.sh $(srcdir)/emulparams/elf32bmip.sh \
+ $(ELF_DEPS) $(srcdir)/emultempl/mipself.em $(srcdir)/scripttempl/elf.sc \
+@@ -1861,6 +1868,12 @@
+ ldemul-list.h \
+ $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+
++eelf64lriscv.c: $(srcdir)/emulparams/elf64lriscv.sh \
++ $(srcdir)/emulparams/elf64lriscv-defs.sh \
++ $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++ $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++ ${GEN_DEPENDS}
++
+ eelf64ltsmip.c: $(srcdir)/emulparams/elf64ltsmip.sh \
+ $(srcdir)/emulparams/elf64btsmip.sh $(srcdir)/emulparams/elf64bmip-defs.sh \
+ $(srcdir)/emulparams/elf32bmipn32-defs.sh $(ELF_DEPS) \
+diff -urN original-binutils/ld/Makefile.in binutils/ld/Makefile.in
+--- original-binutils/ld/Makefile.in 2014-10-14 09:32:04.000000000 +0200
++++ binutils-2.25/ld/Makefile.in 2015-03-07 09:55:02.383135671 +0100
+@@ -546,6 +546,7 @@
+ eelf32lppclinux.c \
+ eelf32lppcnto.c \
+ eelf32lppcsim.c \
++ eelf32lriscv.c \
+ eelf32m32c.c \
+ eelf32mb_linux.c \
+ eelf32mbel_linux.c \
+@@ -771,6 +772,7 @@
+ eelf64btsmip_fbsd.c \
+ eelf64hppa.c \
+ eelf64lppc.c \
++ eelf64lriscv.c \
+ eelf64ltsmip.c \
+ eelf64ltsmip_fbsd.c \
+ eelf64mmix.c \
+@@ -1157,6 +1159,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lppclinux.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lppcnto.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lppcsim.Po@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lriscv.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lr5900.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lr5900n32.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf32lsmip.Po@am__quote@
+@@ -1211,6 +1214,7 @@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64btsmip_fbsd.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64hppa.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64lppc.Po@am__quote@
++@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64lriscv.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64ltsmip.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64ltsmip_fbsd.Po@am__quote@
+ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/eelf64mmix.Po@am__quote@
+@@ -2545,6 +2549,11 @@
+ ldemul-list.h \
+ $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+
++eelf32lriscv.c: $(srcdir)/emulparams/elf32lriscv.sh \
++ $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++ $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++ ${GEN_DEPENDS}
++
+ eelf32lsmip.c: $(srcdir)/emulparams/elf32lsmip.sh \
+ $(srcdir)/emulparams/elf32lmip.sh $(srcdir)/emulparams/elf32bmip.sh \
+ $(ELF_DEPS) $(srcdir)/emultempl/mipself.em $(srcdir)/scripttempl/elf.sc \
+@@ -3302,6 +3311,12 @@
+ ldemul-list.h \
+ $(ELF_DEPS) $(srcdir)/scripttempl/elf.sc ${GEN_DEPENDS}
+
++eelf64lriscv.c: $(srcdir)/emulparams/elf64lriscv.sh \
++ $(srcdir)/emulparams/elf64lriscv-defs.sh \
++ $(srcdir)/emulparams/elf32lriscv-defs.sh $(ELF_DEPS) \
++ $(srcdir)/emultempl/riscvelf.em $(srcdir)/scripttempl/elf.sc \
++ ${GEN_DEPENDS}
++
+ eelf64ltsmip.c: $(srcdir)/emulparams/elf64ltsmip.sh \
+ $(srcdir)/emulparams/elf64btsmip.sh $(srcdir)/emulparams/elf64bmip-defs.sh \
+ $(srcdir)/emulparams/elf32bmipn32-defs.sh $(ELF_DEPS) \
+diff -urN original-binutils/opcodes/configure binutils/opcodes/configure
+--- original-binutils/opcodes/configure 2014-12-23 15:22:07.000000000 +0100
++++ binutils-2.25/opcodes/configure 2015-03-07 09:55:02.387135671 +0100
+@@ -12590,6 +12590,7 @@
+ bfd_powerpc_arch) ta="$ta ppc-dis.lo ppc-opc.lo" ;;
+ bfd_powerpc_64_arch) ta="$ta ppc-dis.lo ppc-opc.lo" ;;
+ bfd_pyramid_arch) ;;
++ bfd_riscv_arch) ta="$ta riscv-dis.lo riscv-opc.lo" ;;
+ bfd_romp_arch) ;;
+ bfd_rs6000_arch) ta="$ta ppc-dis.lo ppc-opc.lo" ;;
+ bfd_rl78_arch) ta="$ta rl78-dis.lo rl78-decode.lo";;
+diff -urN original-binutils/opcodes/disassemble.c binutils/opcodes/disassemble.c
+--- original-binutils/opcodes/disassemble.c 2014-10-14 09:32:04.000000000 +0200
++++ binutils-2.25/opcodes/disassemble.c 2015-03-07 09:55:02.391135671 +0100
+@@ -373,6 +373,11 @@
+ disassemble = print_insn_little_powerpc;
+ break;
+ #endif
++#ifdef ARCH_riscv
++ case bfd_arch_riscv:
++ disassemble = print_insn_riscv;
++ break;
++#endif
+ #ifdef ARCH_rs6000
+ case bfd_arch_rs6000:
+ if (bfd_get_mach (abfd) == bfd_mach_ppc_620)
+@@ -545,6 +550,9 @@
+ #ifdef ARCH_powerpc
+ print_ppc_disassembler_options (stream);
+ #endif
++#ifdef ARCH_riscv
++ print_riscv_disassembler_options (stream);
++#endif
+ #ifdef ARCH_i386
+ print_i386_disassembler_options (stream);
+ #endif
+diff -urN original-binutils/opcodes/riscv-dis.c binutils/opcodes/riscv-dis.c
+--- original-binutils/opcodes/riscv-dis.c 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/opcodes/riscv-dis.c 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,492 @@
++/* RISC-V disassembler
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target.
++
++ This file is part of the GNU opcodes library.
++
++ This library is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ It is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "dis-asm.h"
++#include "libiberty.h"
++#include "opcode/riscv.h"
++#include "opintl.h"
++#include "elf-bfd.h"
++#include "elf/riscv.h"
++
++#include <stdint.h>
++#include <assert.h>
++
++struct riscv_private_data
++{
++ bfd_vma gp;
++ bfd_vma print_addr;
++ bfd_vma hi_addr[OP_MASK_RD + 1];
++};
++
++static const char * const *riscv_gpr_names;
++static const char * const *riscv_fpr_names;
++
++/* Other options */
++static int no_aliases; /* If set disassemble as most general inst. */
++
++static void
++set_default_riscv_dis_options (void)
++{
++ riscv_gpr_names = riscv_gpr_names_abi;
++ riscv_fpr_names = riscv_fpr_names_abi;
++ no_aliases = 0;
++}
++
++static void
++parse_riscv_dis_option (const char *option)
++{
++ if (CONST_STRNEQ (option, "no-aliases"))
++ no_aliases = 1;
++ else if (CONST_STRNEQ (option, "numeric"))
++ {
++ riscv_gpr_names = riscv_gpr_names_numeric;
++ riscv_fpr_names = riscv_fpr_names_numeric;
++ }
++
++ /* Invalid option. */
++ fprintf (stderr, _("Unrecognized disassembler option: %s\n"), option);
++}
++
++static void
++parse_riscv_dis_options (const char *opts_in)
++{
++ char *opts = xstrdup (opts_in), *opt = opts, *opt_end = opts;
++
++ set_default_riscv_dis_options ();
++
++ for ( ; opt_end != NULL; opt = opt_end + 1)
++ {
++ if ((opt_end = strchr (opt, ',')) != NULL)
++ *opt_end = 0;
++ parse_riscv_dis_option (opt);
++ }
++
++ free (opts);
++}
++
++/* Print one argument from an array. */
++
++static void
++arg_print (struct disassemble_info *info, unsigned long val,
++ const char* const* array, size_t size)
++{
++ const char *s = val >= size || array[val] == NULL ? "unknown" : array[val];
++ (*info->fprintf_func) (info->stream, "%s", s);
++}
++
++static void
++maybe_print_address (struct riscv_private_data *pd, int base_reg, int offset)
++{
++ if (pd->hi_addr[base_reg] != (bfd_vma)-1)
++ {
++ pd->print_addr = pd->hi_addr[base_reg] + offset;
++ pd->hi_addr[base_reg] = -1;
++ }
++ else if (base_reg == X_GP && pd->gp != (bfd_vma)-1)
++ pd->print_addr = pd->gp + offset;
++ else if (base_reg == X_TP)
++ pd->print_addr = offset;
++}
++
++/* Print insn arguments for 32/64-bit code. */
++
++static void
++print_insn_args (const char *d, insn_t l, bfd_vma pc, disassemble_info *info)
++{
++ struct riscv_private_data *pd = info->private_data;
++ int rs1 = (l >> OP_SH_RS1) & OP_MASK_RS1;
++ int rd = (l >> OP_SH_RD) & OP_MASK_RD;
++
++ if (*d != '\0')
++ (*info->fprintf_func) (info->stream, "\t");
++
++ for (; *d != '\0'; d++)
++ {
++ switch (*d)
++ {
++ /* Xcustom */
++ case '^':
++ switch (*++d)
++ {
++ case 'd':
++ (*info->fprintf_func) (info->stream, "%d", rd);
++ break;
++ case 's':
++ (*info->fprintf_func) (info->stream, "%d", rs1);
++ break;
++ case 't':
++ (*info->fprintf_func)
++ ( info->stream, "%d", (int)((l >> OP_SH_RS2) & OP_MASK_RS2));
++ break;
++ case 'j':
++ (*info->fprintf_func)
++ ( info->stream, "%d", (int)((l >> OP_SH_CUSTOM_IMM) & OP_MASK_CUSTOM_IMM));
++ break;
++ }
++ break;
++
++ /* Xhwacha */
++ case '#':
++ switch ( *++d ) {
++ case 'g':
++ (*info->fprintf_func)
++ ( info->stream, "%d",
++ (int)((l >> OP_SH_IMMNGPR) & OP_MASK_IMMNGPR));
++ break;
++ case 'f':
++ (*info->fprintf_func)
++ ( info->stream, "%d",
++ (int)((l >> OP_SH_IMMNFPR) & OP_MASK_IMMNFPR));
++ break;
++ case 'p':
++ (*info->fprintf_func)
++ ( info->stream, "%d",
++ (int)((l >> OP_SH_CUSTOM_IMM) & OP_MASK_CUSTOM_IMM));
++ break;
++ case 'n':
++ (*info->fprintf_func)
++ ( info->stream, "%d",
++ (int)(((l >> OP_SH_IMMSEGNELM) & OP_MASK_IMMSEGNELM) + 1));
++ break;
++ case 'd':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_gpr_names[(l >> OP_SH_VRD) & OP_MASK_VRD]);
++ break;
++ case 's':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_gpr_names[(l >> OP_SH_VRS) & OP_MASK_VRS]);
++ break;
++ case 't':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_gpr_names[(l >> OP_SH_VRT) & OP_MASK_VRT]);
++ break;
++ case 'r':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_gpr_names[(l >> OP_SH_VRR) & OP_MASK_VRR]);
++ break;
++ case 'D':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_fpr_names[(l >> OP_SH_VFD) & OP_MASK_VFD]);
++ break;
++ case 'S':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_fpr_names[(l >> OP_SH_VFS) & OP_MASK_VFS]);
++ break;
++ case 'T':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_fpr_names[(l >> OP_SH_VFT) & OP_MASK_VFT]);
++ break;
++ case 'R':
++ (*info->fprintf_func)
++ ( info->stream, "%s",
++ riscv_vec_fpr_names[(l >> OP_SH_VFR) & OP_MASK_VFR]);
++ break;
++ }
++ break;
++
++ case ',':
++ case '(':
++ case ')':
++ case '[':
++ case ']':
++ (*info->fprintf_func) (info->stream, "%c", *d);
++ break;
++
++ case '0':
++ break;
++
++ case 'b':
++ case 's':
++ (*info->fprintf_func) (info->stream, "%s", riscv_gpr_names[rs1]);
++ break;
++
++ case 't':
++ (*info->fprintf_func) (info->stream, "%s",
++ riscv_gpr_names[(l >> OP_SH_RS2) & OP_MASK_RS2]);
++ break;
++
++ case 'u':
++ (*info->fprintf_func) (info->stream, "0x%x", (unsigned)EXTRACT_UTYPE_IMM (l) >> RISCV_IMM_BITS);
++ break;
++
++ case 'm':
++ arg_print(info, (l >> OP_SH_RM) & OP_MASK_RM,
++ riscv_rm, ARRAY_SIZE(riscv_rm));
++ break;
++
++ case 'P':
++ arg_print(info, (l >> OP_SH_PRED) & OP_MASK_PRED,
++ riscv_pred_succ, ARRAY_SIZE(riscv_pred_succ));
++ break;
++
++ case 'Q':
++ arg_print(info, (l >> OP_SH_SUCC) & OP_MASK_SUCC,
++ riscv_pred_succ, ARRAY_SIZE(riscv_pred_succ));
++ break;
++
++ case 'o':
++ maybe_print_address (pd, rs1, EXTRACT_ITYPE_IMM (l));
++ case 'j':
++ if ((l & MASK_ADDI) == MATCH_ADDI || (l & MASK_JALR) == MATCH_JALR)
++ maybe_print_address (pd, rs1, EXTRACT_ITYPE_IMM (l));
++ (*info->fprintf_func) (info->stream, "%d", (int)EXTRACT_ITYPE_IMM (l));
++ break;
++
++ case 'q':
++ maybe_print_address (pd, rs1, EXTRACT_STYPE_IMM (l));
++ (*info->fprintf_func) (info->stream, "%d", (int)EXTRACT_STYPE_IMM (l));
++ break;
++
++ case 'a':
++ info->target = EXTRACT_UJTYPE_IMM (l) + pc;
++ (*info->print_address_func) (info->target, info);
++ break;
++
++ case 'p':
++ info->target = EXTRACT_SBTYPE_IMM (l) + pc;
++ (*info->print_address_func) (info->target, info);
++ break;
++
++ case 'd':
++ if ((l & MASK_AUIPC) == MATCH_AUIPC)
++ pd->hi_addr[rd] = pc + EXTRACT_UTYPE_IMM (l);
++ else if ((l & MASK_LUI) == MATCH_LUI)
++ pd->hi_addr[rd] = EXTRACT_UTYPE_IMM (l);
++ (*info->fprintf_func) (info->stream, "%s", riscv_gpr_names[rd]);
++ break;
++
++ case 'z':
++ (*info->fprintf_func) (info->stream, "%s", riscv_gpr_names[0]);
++ break;
++
++ case '>':
++ (*info->fprintf_func) (info->stream, "0x%x",
++ (unsigned)((l >> OP_SH_SHAMT) & OP_MASK_SHAMT));
++ break;
++
++ case '<':
++ (*info->fprintf_func) (info->stream, "0x%x",
++ (unsigned)((l >> OP_SH_SHAMTW) & OP_MASK_SHAMTW));
++ break;
++
++ case 'S':
++ case 'U':
++ (*info->fprintf_func) (info->stream, "%s", riscv_fpr_names[rs1]);
++ break;
++
++ case 'T':
++ (*info->fprintf_func) (info->stream, "%s",
++ riscv_fpr_names[(l >> OP_SH_RS2) & OP_MASK_RS2]);
++ break;
++
++ case 'D':
++ (*info->fprintf_func) (info->stream, "%s", riscv_fpr_names[rd]);
++ break;
++
++ case 'R':
++ (*info->fprintf_func) (info->stream, "%s",
++ riscv_fpr_names[(l >> OP_SH_RS3) & OP_MASK_RS3]);
++ break;
++
++ case 'E':
++ {
++ const char* csr_name = NULL;
++ unsigned int csr = (l >> OP_SH_CSR) & OP_MASK_CSR;
++ switch (csr)
++ {
++ #define DECLARE_CSR(name, num) case num: csr_name = #name; break;
++ #include "opcode/riscv-opc.h"
++ #undef DECLARE_CSR
++ }
++ if (csr_name)
++ (*info->fprintf_func) (info->stream, "%s", csr_name);
++ else
++ (*info->fprintf_func) (info->stream, "0x%x", csr);
++ break;
++ }
++
++ case 'Z':
++ (*info->fprintf_func) (info->stream, "%d", rs1);
++ break;
++
++ default:
++ /* xgettext:c-format */
++ (*info->fprintf_func) (info->stream,
++ _("# internal error, undefined modifier (%c)"),
++ *d);
++ return;
++ }
++ }
++}
++
++/* Print the RISC-V instruction at address MEMADDR in debugged memory,
++ on using INFO. Returns length of the instruction, in bytes.
++ BIGENDIAN must be 1 if this is big-endian code, 0 if
++ this is little-endian code. */
++
++static int
++riscv_disassemble_insn (bfd_vma memaddr, insn_t word, disassemble_info *info)
++{
++ const struct riscv_opcode *op;
++ static bfd_boolean init = 0;
++ static const char *extension = NULL;
++ static const struct riscv_opcode *riscv_hash[OP_MASK_OP + 1];
++ struct riscv_private_data *pd;
++ int insnlen;
++
++ /* Build a hash table to shorten the search time. */
++ if (! init)
++ {
++ unsigned int i;
++ unsigned int e_flags = elf_elfheader (info->section->owner)->e_flags;
++ extension = riscv_elf_flag_to_name(EF_GET_RISCV_EXT(e_flags));
++
++ for (i = 0; i <= OP_MASK_OP; i++)
++ for (op = riscv_opcodes; op < &riscv_opcodes[NUMOPCODES]; op++)
++ if (i == ((op->match >> OP_SH_OP) & OP_MASK_OP))
++ {
++ riscv_hash[i] = op;
++ break;
++ }
++
++ init = 1;
++ }
++
++ if (info->private_data == NULL)
++ {
++ int i;
++
++ pd = info->private_data = calloc(1, sizeof (struct riscv_private_data));
++ pd->gp = -1;
++ pd->print_addr = -1;
++ for (i = 0; i < (int) ARRAY_SIZE(pd->hi_addr); i++)
++ pd->hi_addr[i] = -1;
++
++ for (i = 0; i < info->symtab_size; i++)
++ if (strcmp (bfd_asymbol_name (info->symtab[i]), "_gp") == 0)
++ pd->gp = bfd_asymbol_value (info->symtab[i]);
++ }
++ else
++ pd = info->private_data;
++
++ insnlen = riscv_insn_length (word);
++
++ info->bytes_per_chunk = insnlen % 4 == 0 ? 4 : 2;
++ info->bytes_per_line = 8;
++ info->display_endian = info->endian;
++ info->insn_info_valid = 1;
++ info->branch_delay_insns = 0;
++ info->data_size = 0;
++ info->insn_type = dis_nonbranch;
++ info->target = 0;
++ info->target2 = 0;
++
++ op = riscv_hash[(word >> OP_SH_OP) & OP_MASK_OP];
++ if (op != NULL)
++ {
++ for (; op < &riscv_opcodes[NUMOPCODES]; op++)
++ {
++ if ((op->match_func) (op, word)
++ && !(no_aliases && (op->pinfo & INSN_ALIAS))
++ && !(op->subset[0] == 'X' && strcmp(op->subset, extension)))
++ {
++ (*info->fprintf_func) (info->stream, "%s", op->name);
++ print_insn_args (op->args, word, memaddr, info);
++ if (pd->print_addr != (bfd_vma)-1)
++ {
++ info->target = pd->print_addr;
++ (*info->fprintf_func) (info->stream, " # ");
++ (*info->print_address_func) (info->target, info);
++ pd->print_addr = -1;
++ }
++ return insnlen;
++ }
++ }
++ }
++
++ /* Handle undefined instructions. */
++ info->insn_type = dis_noninsn;
++ (*info->fprintf_func) (info->stream, "0x%llx", (unsigned long long)word);
++ return insnlen;
++}
++
++int
++print_insn_riscv (bfd_vma memaddr, struct disassemble_info *info)
++{
++ uint16_t i2;
++ insn_t insn = 0;
++ bfd_vma n;
++ int status;
++
++ if (info->disassembler_options != NULL)
++ {
++ parse_riscv_dis_options (info->disassembler_options);
++ /* Avoid repeatedly parsing the options. */
++ info->disassembler_options = NULL;
++ }
++ else if (riscv_gpr_names == NULL)
++ set_default_riscv_dis_options ();
++
++ /* Instructions are a sequence of 2-byte packets in little-endian order. */
++ for (n = 0; n < sizeof(insn) && n < riscv_insn_length (insn); n += 2)
++ {
++ status = (*info->read_memory_func) (memaddr + n, (bfd_byte*)&i2, 2, info);
++ if (status != 0)
++ {
++ if (n > 0) /* Don't fail just because we fell off the end. */
++ break;
++ (*info->memory_error_func) (status, memaddr, info);
++ return status;
++ }
++
++ i2 = bfd_getl16 (&i2);
++ insn |= (insn_t)i2 << (8*n);
++ }
++
++ return riscv_disassemble_insn (memaddr, insn, info);
++}
++
++void
++print_riscv_disassembler_options (FILE *stream)
++{
++ fprintf (stream, _("\n\
++The following RISC-V-specific disassembler options are supported for use\n\
++with the -M switch (multiple options should be separated by commas):\n"));
++
++ fprintf (stream, _("\n\
++ numeric Print numeric reigster names, rather than ABI names.\n"));
++
++ fprintf (stream, _("\n\
++ no-aliases Disassemble only into canonical instructions, rather\n\
++ than into pseudoinstructions.\n"));
++
++ fprintf (stream, _("\n"));
++}
+diff -urN original-binutils/opcodes/riscv-opc.c binutils/opcodes/riscv-opc.c
+--- original-binutils/opcodes/riscv-opc.c 1970-01-01 01:00:00.000000000 +0100
++++ binutils-2.25/opcodes/riscv-opc.c 2015-03-07 09:51:45.659139025 +0100
+@@ -0,0 +1,729 @@
++/* RISC-V opcode list
++ Copyright 2011-2014 Free Software Foundation, Inc.
++
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target.
++
++ This file is part of the GNU opcodes library.
++
++ This library is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 3, or (at your option)
++ any later version.
++
++ It is distributed in the hope that it will be useful, but WITHOUT
++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++ License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this file; see the file COPYING. If not, write to the
++ Free Software Foundation, 51 Franklin Street - Fifth Floor, Boston,
++ MA 02110-1301, USA. */
++
++#include "sysdep.h"
++#include "opcode/riscv.h"
++#include <stdio.h>
++
++/* Register names used by gas and objdump. */
++
++const char * const riscv_gpr_names_numeric[32] =
++{
++ "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
++ "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
++ "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
++ "x24", "x25", "x26", "x27", "x28", "x29", "x30", "x31"
++};
++
++const char * const riscv_gpr_names_abi[32] = {
++ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
++ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
++ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
++ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6"
++};
++
++const char * const riscv_fpr_names_numeric[32] =
++{
++ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
++ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
++ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
++ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
++};
++
++const char * const riscv_fpr_names_abi[32] = {
++ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
++ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
++ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
++ "fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"
++};
++
++const char * const riscv_vec_gpr_names[32] =
++{
++ "vx0", "vx1", "vx2", "vx3", "vx4", "vx5", "vx6", "vx7",
++ "vx8", "vx9", "vx10", "vx11", "vx12", "vx13", "vx14", "vx15",
++ "vx16", "vx17", "vx18", "vx19", "vx20", "vx21", "vx22", "vx23",
++ "vx24", "vx25", "vx26", "vx27", "vx28", "vx29", "vx30", "vx31"
++};
++
++const char * const riscv_vec_fpr_names[32] =
++{
++ "vf0", "vf1", "vf2", "vf3", "vf4", "vf5", "vf6", "vf7",
++ "vf8", "vf9", "vf10", "vf11", "vf12", "vf13", "vf14", "vf15",
++ "vf16", "vf17", "vf18", "vf19", "vf20", "vf21", "vf22", "vf23",
++ "vf24", "vf25", "vf26", "vf27", "vf28", "vf29", "vf30", "vf31"
++};
++
++/* The order of overloaded instructions matters. Label arguments and
++ register arguments look the same. Instructions that can have either
++ for arguments must apear in the correct order in this table for the
++ assembler to pick the right one. In other words, entries with
++ immediate operands must apear after the same instruction with
++ registers.
++
++ Because of the lookup algorithm used, entries with the same opcode
++ name must be contiguous. */
++
++#define WR_xd INSN_WRITE_GPR_D
++#define WR_fd INSN_WRITE_FPR_D
++#define RD_xs1 INSN_READ_GPR_S
++#define RD_xs2 INSN_READ_GPR_T
++#define RD_fs1 INSN_READ_FPR_S
++#define RD_fs2 INSN_READ_FPR_T
++#define RD_fs3 INSN_READ_FPR_R
++
++#define MASK_RS1 (OP_MASK_RS1 << OP_SH_RS1)
++#define MASK_RS2 (OP_MASK_RS2 << OP_SH_RS2)
++#define MASK_RD (OP_MASK_RD << OP_SH_RD)
++#define MASK_IMM ENCODE_ITYPE_IMM(-1U)
++#define MASK_UIMM ENCODE_UTYPE_IMM(-1U)
++#define MASK_RM (OP_MASK_RM << OP_SH_RM)
++#define MASK_PRED (OP_MASK_PRED << OP_SH_PRED)
++#define MASK_SUCC (OP_MASK_SUCC << OP_SH_SUCC)
++#define MASK_AQ (OP_MASK_AQ << OP_SH_AQ)
++#define MASK_RL (OP_MASK_RL << OP_SH_RL)
++#define MASK_AQRL (MASK_AQ | MASK_RL)
++
++static int match_opcode(const struct riscv_opcode *op, insn_t insn)
++{
++ return (insn & op->mask) == op->match;
++}
++
++static int match_never(const struct riscv_opcode *op ATTRIBUTE_UNUSED,
++ insn_t insn ATTRIBUTE_UNUSED)
++{
++ return 0;
++}
++
++static int match_rs1_eq_rs2(const struct riscv_opcode *op, insn_t insn)
++{
++ return match_opcode(op, insn) &&
++ ((insn & MASK_RS1) >> OP_SH_RS1) == ((insn & MASK_RS2) >> OP_SH_RS2);
++}
++
++const struct riscv_opcode riscv_builtin_opcodes[] =
++{
++/* These instructions appear first so that the disassembler will find
++ them first. The assemblers uses a hash table based on the
++ instruction name anyhow. */
++/* name, isa, operands, match, mask, pinfo */
++{"unimp", "I", "", 0, 0xffff, match_opcode, 0 },
++{"nop", "I", "", MATCH_ADDI, MASK_ADDI | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
++{"li", "I", "d,j", MATCH_ADDI, MASK_ADDI | MASK_RS1, match_opcode, INSN_ALIAS|WR_xd }, /* addi */
++{"li", "I", "d,I", 0, (int) M_LI, match_never, INSN_MACRO },
++{"mv", "I", "d,s", MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"move", "I", "d,s", MATCH_ADDI, MASK_ADDI | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"andi", "I", "d,s,j", MATCH_ANDI, MASK_ANDI, match_opcode, WR_xd|RD_xs1 },
++{"and", "I", "d,s,t", MATCH_AND, MASK_AND, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"and", "I", "d,s,j", MATCH_ANDI, MASK_ANDI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"beqz", "I", "s,p", MATCH_BEQ, MASK_BEQ | MASK_RS2, match_opcode, INSN_ALIAS|RD_xs1 },
++{"beq", "I", "s,t,p", MATCH_BEQ, MASK_BEQ, match_opcode, RD_xs1|RD_xs2 },
++{"blez", "I", "t,p", MATCH_BGE, MASK_BGE | MASK_RS1, match_opcode, INSN_ALIAS|RD_xs2 },
++{"bgez", "I", "s,p", MATCH_BGE, MASK_BGE | MASK_RS2, match_opcode, INSN_ALIAS|RD_xs1 },
++{"ble", "I", "t,s,p", MATCH_BGE, MASK_BGE, match_opcode, INSN_ALIAS|RD_xs1|RD_xs2 },
++{"bleu", "I", "t,s,p", MATCH_BGEU, MASK_BGEU, match_opcode, INSN_ALIAS|RD_xs1|RD_xs2 },
++{"bge", "I", "s,t,p", MATCH_BGE, MASK_BGE, match_opcode, RD_xs1|RD_xs2 },
++{"bgeu", "I", "s,t,p", MATCH_BGEU, MASK_BGEU, match_opcode, RD_xs1|RD_xs2 },
++{"bltz", "I", "s,p", MATCH_BLT, MASK_BLT | MASK_RS2, match_opcode, INSN_ALIAS|RD_xs1 },
++{"bgtz", "I", "t,p", MATCH_BLT, MASK_BLT | MASK_RS1, match_opcode, INSN_ALIAS|RD_xs2 },
++{"blt", "I", "s,t,p", MATCH_BLT, MASK_BLT, match_opcode, RD_xs1|RD_xs2 },
++{"bltu", "I", "s,t,p", MATCH_BLTU, MASK_BLTU, match_opcode, RD_xs1|RD_xs2 },
++{"bgt", "I", "t,s,p", MATCH_BLT, MASK_BLT, match_opcode, INSN_ALIAS|RD_xs1|RD_xs2 },
++{"bgtu", "I", "t,s,p", MATCH_BLTU, MASK_BLTU, match_opcode, INSN_ALIAS|RD_xs1|RD_xs2 },
++{"bnez", "I", "s,p", MATCH_BNE, MASK_BNE | MASK_RS2, match_opcode, INSN_ALIAS|RD_xs1 },
++{"bne", "I", "s,t,p", MATCH_BNE, MASK_BNE, match_opcode, RD_xs1|RD_xs2 },
++{"addi", "I", "d,s,j", MATCH_ADDI, MASK_ADDI, match_opcode, WR_xd|RD_xs1 },
++{"add", "I", "d,s,t", MATCH_ADD, MASK_ADD, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"add", "I", "d,s,t,0",MATCH_ADD, MASK_ADD, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"add", "I", "d,s,j", MATCH_ADDI, MASK_ADDI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"la", "I", "d,A", 0, (int) M_LA, match_never, INSN_MACRO },
++{"lla", "I", "d,A", 0, (int) M_LLA, match_never, INSN_MACRO },
++{"la.tls.gd", "I", "d,A", 0, (int) M_LA_TLS_GD, match_never, INSN_MACRO },
++{"la.tls.ie", "I", "d,A", 0, (int) M_LA_TLS_IE, match_never, INSN_MACRO },
++{"neg", "I", "d,t", MATCH_SUB, MASK_SUB | MASK_RS1, match_opcode, INSN_ALIAS|WR_xd|RD_xs2 }, /* sub 0 */
++{"slli", "I", "d,s,>", MATCH_SLLI, MASK_SLLI, match_opcode, WR_xd|RD_xs1 },
++{"sll", "I", "d,s,t", MATCH_SLL, MASK_SLL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"sll", "I", "d,s,>", MATCH_SLLI, MASK_SLLI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"srli", "I", "d,s,>", MATCH_SRLI, MASK_SRLI, match_opcode, WR_xd|RD_xs1 },
++{"srl", "I", "d,s,t", MATCH_SRL, MASK_SRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"srl", "I", "d,s,>", MATCH_SRLI, MASK_SRLI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"srai", "I", "d,s,>", MATCH_SRAI, MASK_SRAI, match_opcode, WR_xd|RD_xs1 },
++{"sra", "I", "d,s,t", MATCH_SRA, MASK_SRA, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"sra", "I", "d,s,>", MATCH_SRAI, MASK_SRAI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"sub", "I", "d,s,t", MATCH_SUB, MASK_SUB, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"ret", "I", "", MATCH_JALR | (X_RA << OP_SH_RS1), MASK_JALR | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"j", "I", "a", MATCH_JAL, MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS },
++{"jal", "I", "a", MATCH_JAL | (X_RA << OP_SH_RD), MASK_JAL | MASK_RD, match_opcode, INSN_ALIAS|WR_xd },
++{"jal", "I", "d,a", MATCH_JAL, MASK_JAL, match_opcode, WR_xd },
++{"call", "I", "c", (X_T0 << OP_SH_RS1) | (X_RA << OP_SH_RD), (int) M_CALL, match_never, INSN_MACRO },
++{"tail", "I", "c", (X_T0 << OP_SH_RS1), (int) M_CALL, match_never, INSN_MACRO },
++{"jump", "I", "c,s", 0, (int) M_CALL, match_never, INSN_MACRO },
++{"jr", "I", "s", MATCH_JALR, MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"jr", "I", "s,j", MATCH_JALR, MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"jalr", "I", "s", MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"jalr", "I", "s,j", MATCH_JALR | (X_RA << OP_SH_RD), MASK_JALR | MASK_RD, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"jalr", "I", "d,s", MATCH_JALR, MASK_JALR | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"jalr", "I", "d,s,j", MATCH_JALR, MASK_JALR, match_opcode, WR_xd|RD_xs1 },
++{"lb", "I", "d,o(s)", MATCH_LB, MASK_LB, match_opcode, WR_xd|RD_xs1 },
++{"lb", "I", "d,A", 0, (int) M_LB, match_never, INSN_MACRO },
++{"lbu", "I", "d,o(s)", MATCH_LBU, MASK_LBU, match_opcode, WR_xd|RD_xs1 },
++{"lbu", "I", "d,A", 0, (int) M_LBU, match_never, INSN_MACRO },
++{"lh", "I", "d,o(s)", MATCH_LH, MASK_LH, match_opcode, WR_xd|RD_xs1 },
++{"lh", "I", "d,A", 0, (int) M_LH, match_never, INSN_MACRO },
++{"lhu", "I", "d,o(s)", MATCH_LHU, MASK_LHU, match_opcode, WR_xd|RD_xs1 },
++{"lhu", "I", "d,A", 0, (int) M_LHU, match_never, INSN_MACRO },
++{"lw", "I", "d,o(s)", MATCH_LW, MASK_LW, match_opcode, WR_xd|RD_xs1 },
++{"lw", "I", "d,A", 0, (int) M_LW, match_never, INSN_MACRO },
++{"lui", "I", "d,u", MATCH_LUI, MASK_LUI, match_opcode, WR_xd },
++{"not", "I", "d,s", MATCH_XORI | MASK_IMM, MASK_XORI | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"ori", "I", "d,s,j", MATCH_ORI, MASK_ORI, match_opcode, WR_xd|RD_xs1 },
++{"or", "I", "d,s,t", MATCH_OR, MASK_OR, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"or", "I", "d,s,j", MATCH_ORI, MASK_ORI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"auipc", "I", "d,u", MATCH_AUIPC, MASK_AUIPC, match_opcode, WR_xd },
++{"seqz", "I", "d,s", MATCH_SLTIU | ENCODE_ITYPE_IMM(1), MASK_SLTIU | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"snez", "I", "d,t", MATCH_SLTU, MASK_SLTU | MASK_RS1, match_opcode, INSN_ALIAS|WR_xd|RD_xs2 },
++{"sltz", "I", "d,s", MATCH_SLT, MASK_SLT | MASK_RS2, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"sgtz", "I", "d,t", MATCH_SLT, MASK_SLT | MASK_RS1, match_opcode, INSN_ALIAS|WR_xd|RD_xs2 },
++{"slti", "I", "d,s,j", MATCH_SLTI, MASK_SLTI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"slt", "I", "d,s,t", MATCH_SLT, MASK_SLT, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"slt", "I", "d,s,j", MATCH_SLTI, MASK_SLTI, match_opcode, WR_xd|RD_xs1 },
++{"sltiu", "I", "d,s,j", MATCH_SLTIU, MASK_SLTIU, match_opcode, WR_xd|RD_xs1 },
++{"sltu", "I", "d,s,t", MATCH_SLTU, MASK_SLTU, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"sltu", "I", "d,s,j", MATCH_SLTIU, MASK_SLTIU, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"sgt", "I", "d,t,s", MATCH_SLT, MASK_SLT, match_opcode, INSN_ALIAS|WR_xd|RD_xs1|RD_xs2 },
++{"sgtu", "I", "d,t,s", MATCH_SLTU, MASK_SLTU, match_opcode, INSN_ALIAS|WR_xd|RD_xs1|RD_xs2 },
++{"sb", "I", "t,q(s)", MATCH_SB, MASK_SB, match_opcode, RD_xs1|RD_xs2 },
++{"sb", "I", "t,A,s", 0, (int) M_SB, match_never, INSN_MACRO },
++{"sh", "I", "t,q(s)", MATCH_SH, MASK_SH, match_opcode, RD_xs1|RD_xs2 },
++{"sh", "I", "t,A,s", 0, (int) M_SH, match_never, INSN_MACRO },
++{"sw", "I", "t,q(s)", MATCH_SW, MASK_SW, match_opcode, RD_xs1|RD_xs2 },
++{"sw", "I", "t,A,s", 0, (int) M_SW, match_never, INSN_MACRO },
++{"fence", "I", "", MATCH_FENCE | MASK_PRED | MASK_SUCC, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, INSN_ALIAS },
++{"fence", "I", "P,Q", MATCH_FENCE, MASK_FENCE | MASK_RD | MASK_RS1 | (MASK_IMM & ~MASK_PRED & ~MASK_SUCC), match_opcode, 0 },
++{"fence.i", "I", "", MATCH_FENCE_I, MASK_FENCE | MASK_RD | MASK_RS1 | MASK_IMM, match_opcode, 0 },
++{"rdcycle", "I", "d", MATCH_RDCYCLE, MASK_RDCYCLE, match_opcode, WR_xd },
++{"rdinstret", "I", "d", MATCH_RDINSTRET, MASK_RDINSTRET, match_opcode, WR_xd },
++{"rdtime", "I", "d", MATCH_RDTIME, MASK_RDTIME, match_opcode, WR_xd },
++{"rdcycleh", "32I", "d", MATCH_RDCYCLEH, MASK_RDCYCLEH, match_opcode, WR_xd },
++{"rdinstreth","32I", "d", MATCH_RDINSTRETH, MASK_RDINSTRETH, match_opcode, WR_xd },
++{"rdtimeh", "32I", "d", MATCH_RDTIMEH, MASK_RDTIMEH, match_opcode, WR_xd },
++{"sbreak", "I", "", MATCH_SBREAK, MASK_SBREAK, match_opcode, 0 },
++{"scall", "I", "", MATCH_SCALL, MASK_SCALL, match_opcode, 0 },
++{"xori", "I", "d,s,j", MATCH_XORI, MASK_XORI, match_opcode, WR_xd|RD_xs1 },
++{"xor", "I", "d,s,t", MATCH_XOR, MASK_XOR, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"xor", "I", "d,s,j", MATCH_XORI, MASK_XORI, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"lwu", "64I", "d,o(s)", MATCH_LWU, MASK_LWU, match_opcode, WR_xd|RD_xs1 },
++{"lwu", "64I", "d,A", 0, (int) M_LWU, match_never, INSN_MACRO },
++{"ld", "64I", "d,o(s)", MATCH_LD, MASK_LD, match_opcode, WR_xd|RD_xs1 },
++{"ld", "64I", "d,A", 0, (int) M_LD, match_never, INSN_MACRO },
++{"sd", "64I", "t,q(s)", MATCH_SD, MASK_SD, match_opcode, RD_xs1|RD_xs2 },
++{"sd", "64I", "t,A,s", 0, (int) M_SD, match_never, INSN_MACRO },
++{"sext.w", "64I", "d,s", MATCH_ADDIW, MASK_ADDIW | MASK_IMM, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"addiw", "64I", "d,s,j", MATCH_ADDIW, MASK_ADDIW, match_opcode, WR_xd|RD_xs1 },
++{"addw", "64I", "d,s,t", MATCH_ADDW, MASK_ADDW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"addw", "64I", "d,s,j", MATCH_ADDIW, MASK_ADDIW, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"negw", "64I", "d,t", MATCH_SUBW, MASK_SUBW | MASK_RS1, match_opcode, INSN_ALIAS|WR_xd|RD_xs2 }, /* sub 0 */
++{"slliw", "64I", "d,s,<", MATCH_SLLIW, MASK_SLLIW, match_opcode, WR_xd|RD_xs1 },
++{"sllw", "64I", "d,s,t", MATCH_SLLW, MASK_SLLW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"sllw", "64I", "d,s,<", MATCH_SLLIW, MASK_SLLIW, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"srliw", "64I", "d,s,<", MATCH_SRLIW, MASK_SRLIW, match_opcode, WR_xd|RD_xs1 },
++{"srlw", "64I", "d,s,t", MATCH_SRLW, MASK_SRLW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"srlw", "64I", "d,s,<", MATCH_SRLIW, MASK_SRLIW, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"sraiw", "64I", "d,s,<", MATCH_SRAIW, MASK_SRAIW, match_opcode, WR_xd|RD_xs1 },
++{"sraw", "64I", "d,s,t", MATCH_SRAW, MASK_SRAW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"sraw", "64I", "d,s,<", MATCH_SRAIW, MASK_SRAIW, match_opcode, INSN_ALIAS|WR_xd|RD_xs1 },
++{"subw", "64I", "d,s,t", MATCH_SUBW, MASK_SUBW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++
++/* Atomic memory operation instruction subset */
++{"lr.w", "A", "d,0(s)", MATCH_LR_W, MASK_LR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.w", "A", "d,t,0(s)", MATCH_SC_W, MASK_SC_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.w", "A", "d,t,0(s)", MATCH_AMOADD_W, MASK_AMOADD_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.w", "A", "d,t,0(s)", MATCH_AMOSWAP_W, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.w", "A", "d,t,0(s)", MATCH_AMOAND_W, MASK_AMOAND_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.w", "A", "d,t,0(s)", MATCH_AMOOR_W, MASK_AMOOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.w", "A", "d,t,0(s)", MATCH_AMOXOR_W, MASK_AMOXOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.w", "A", "d,t,0(s)", MATCH_AMOMAX_W, MASK_AMOMAX_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.w", "A", "d,t,0(s)", MATCH_AMOMAXU_W, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.w", "A", "d,t,0(s)", MATCH_AMOMIN_W, MASK_AMOMIN_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.w", "A", "d,t,0(s)", MATCH_AMOMINU_W, MASK_AMOMINU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.w.aq", "A", "d,0(s)", MATCH_LR_W | MASK_AQ, MASK_LR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.w.aq", "A", "d,t,0(s)", MATCH_SC_W | MASK_AQ, MASK_SC_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.w.aq", "A", "d,t,0(s)", MATCH_AMOADD_W | MASK_AQ, MASK_AMOADD_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.w.aq", "A", "d,t,0(s)", MATCH_AMOSWAP_W | MASK_AQ, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.w.aq", "A", "d,t,0(s)", MATCH_AMOAND_W | MASK_AQ, MASK_AMOAND_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.w.aq", "A", "d,t,0(s)", MATCH_AMOOR_W | MASK_AQ, MASK_AMOOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.w.aq", "A", "d,t,0(s)", MATCH_AMOXOR_W | MASK_AQ, MASK_AMOXOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.w.aq", "A", "d,t,0(s)", MATCH_AMOMAX_W | MASK_AQ, MASK_AMOMAX_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.w.aq", "A", "d,t,0(s)", MATCH_AMOMAXU_W | MASK_AQ, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.w.aq", "A", "d,t,0(s)", MATCH_AMOMIN_W | MASK_AQ, MASK_AMOMIN_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.w.aq", "A", "d,t,0(s)", MATCH_AMOMINU_W | MASK_AQ, MASK_AMOMINU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.w.rl", "A", "d,0(s)", MATCH_LR_W | MASK_RL, MASK_LR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.w.rl", "A", "d,t,0(s)", MATCH_SC_W | MASK_RL, MASK_SC_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.w.rl", "A", "d,t,0(s)", MATCH_AMOADD_W | MASK_RL, MASK_AMOADD_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.w.rl", "A", "d,t,0(s)", MATCH_AMOSWAP_W | MASK_RL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.w.rl", "A", "d,t,0(s)", MATCH_AMOAND_W | MASK_RL, MASK_AMOAND_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.w.rl", "A", "d,t,0(s)", MATCH_AMOOR_W | MASK_RL, MASK_AMOOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.w.rl", "A", "d,t,0(s)", MATCH_AMOXOR_W | MASK_RL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.w.rl", "A", "d,t,0(s)", MATCH_AMOMAX_W | MASK_RL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.w.rl", "A", "d,t,0(s)", MATCH_AMOMAXU_W | MASK_RL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.w.rl", "A", "d,t,0(s)", MATCH_AMOMIN_W | MASK_RL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.w.rl", "A", "d,t,0(s)", MATCH_AMOMINU_W | MASK_RL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.w.sc", "A", "d,0(s)", MATCH_LR_W | MASK_AQRL, MASK_LR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.w.sc", "A", "d,t,0(s)", MATCH_SC_W | MASK_AQRL, MASK_SC_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.w.sc", "A", "d,t,0(s)", MATCH_AMOADD_W | MASK_AQRL, MASK_AMOADD_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.w.sc", "A", "d,t,0(s)", MATCH_AMOSWAP_W | MASK_AQRL, MASK_AMOSWAP_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.w.sc", "A", "d,t,0(s)", MATCH_AMOAND_W | MASK_AQRL, MASK_AMOAND_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.w.sc", "A", "d,t,0(s)", MATCH_AMOOR_W | MASK_AQRL, MASK_AMOOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.w.sc", "A", "d,t,0(s)", MATCH_AMOXOR_W | MASK_AQRL, MASK_AMOXOR_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.w.sc", "A", "d,t,0(s)", MATCH_AMOMAX_W | MASK_AQRL, MASK_AMOMAX_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.w.sc", "A", "d,t,0(s)", MATCH_AMOMAXU_W | MASK_AQRL, MASK_AMOMAXU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.w.sc", "A", "d,t,0(s)", MATCH_AMOMIN_W | MASK_AQRL, MASK_AMOMIN_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.w.sc", "A", "d,t,0(s)", MATCH_AMOMINU_W | MASK_AQRL, MASK_AMOMINU_W | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.d", "64A", "d,0(s)", MATCH_LR_D, MASK_LR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.d", "64A", "d,t,0(s)", MATCH_SC_D, MASK_SC_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.d", "64A", "d,t,0(s)", MATCH_AMOADD_D, MASK_AMOADD_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.d", "64A", "d,t,0(s)", MATCH_AMOSWAP_D, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.d", "64A", "d,t,0(s)", MATCH_AMOAND_D, MASK_AMOAND_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.d", "64A", "d,t,0(s)", MATCH_AMOOR_D, MASK_AMOOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.d", "64A", "d,t,0(s)", MATCH_AMOXOR_D, MASK_AMOXOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.d", "64A", "d,t,0(s)", MATCH_AMOMAX_D, MASK_AMOMAX_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.d", "64A", "d,t,0(s)", MATCH_AMOMAXU_D, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.d", "64A", "d,t,0(s)", MATCH_AMOMIN_D, MASK_AMOMIN_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.d", "64A", "d,t,0(s)", MATCH_AMOMINU_D, MASK_AMOMINU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.d.aq", "64A", "d,0(s)", MATCH_LR_D | MASK_AQ, MASK_LR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.d.aq", "64A", "d,t,0(s)", MATCH_SC_D | MASK_AQ, MASK_SC_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.d.aq", "64A", "d,t,0(s)", MATCH_AMOADD_D | MASK_AQ, MASK_AMOADD_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.d.aq", "64A", "d,t,0(s)", MATCH_AMOSWAP_D | MASK_AQ, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.d.aq", "64A", "d,t,0(s)", MATCH_AMOAND_D | MASK_AQ, MASK_AMOAND_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.d.aq", "64A", "d,t,0(s)", MATCH_AMOOR_D | MASK_AQ, MASK_AMOOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.d.aq", "64A", "d,t,0(s)", MATCH_AMOXOR_D | MASK_AQ, MASK_AMOXOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.d.aq", "64A", "d,t,0(s)", MATCH_AMOMAX_D | MASK_AQ, MASK_AMOMAX_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.d.aq", "64A", "d,t,0(s)", MATCH_AMOMAXU_D | MASK_AQ, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.d.aq", "64A", "d,t,0(s)", MATCH_AMOMIN_D | MASK_AQ, MASK_AMOMIN_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.d.aq", "64A", "d,t,0(s)", MATCH_AMOMINU_D | MASK_AQ, MASK_AMOMINU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.d.rl", "64A", "d,0(s)", MATCH_LR_D | MASK_RL, MASK_LR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.d.rl", "64A", "d,t,0(s)", MATCH_SC_D | MASK_RL, MASK_SC_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.d.rl", "64A", "d,t,0(s)", MATCH_AMOADD_D | MASK_RL, MASK_AMOADD_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.d.rl", "64A", "d,t,0(s)", MATCH_AMOSWAP_D | MASK_RL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.d.rl", "64A", "d,t,0(s)", MATCH_AMOAND_D | MASK_RL, MASK_AMOAND_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.d.rl", "64A", "d,t,0(s)", MATCH_AMOOR_D | MASK_RL, MASK_AMOOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.d.rl", "64A", "d,t,0(s)", MATCH_AMOXOR_D | MASK_RL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.d.rl", "64A", "d,t,0(s)", MATCH_AMOMAX_D | MASK_RL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.d.rl", "64A", "d,t,0(s)", MATCH_AMOMAXU_D | MASK_RL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.d.rl", "64A", "d,t,0(s)", MATCH_AMOMIN_D | MASK_RL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.d.rl", "64A", "d,t,0(s)", MATCH_AMOMINU_D | MASK_RL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"lr.d.sc", "64A", "d,0(s)", MATCH_LR_D | MASK_AQRL, MASK_LR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1 },
++{"sc.d.sc", "64A", "d,t,0(s)", MATCH_SC_D | MASK_AQRL, MASK_SC_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoadd.d.sc", "64A", "d,t,0(s)", MATCH_AMOADD_D | MASK_AQRL, MASK_AMOADD_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoswap.d.sc", "64A", "d,t,0(s)", MATCH_AMOSWAP_D | MASK_AQRL, MASK_AMOSWAP_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoand.d.sc", "64A", "d,t,0(s)", MATCH_AMOAND_D | MASK_AQRL, MASK_AMOAND_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoor.d.sc", "64A", "d,t,0(s)", MATCH_AMOOR_D | MASK_AQRL, MASK_AMOOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amoxor.d.sc", "64A", "d,t,0(s)", MATCH_AMOXOR_D | MASK_AQRL, MASK_AMOXOR_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomax.d.sc", "64A", "d,t,0(s)", MATCH_AMOMAX_D | MASK_AQRL, MASK_AMOMAX_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomaxu.d.sc", "64A", "d,t,0(s)", MATCH_AMOMAXU_D | MASK_AQRL, MASK_AMOMAXU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amomin.d.sc", "64A", "d,t,0(s)", MATCH_AMOMIN_D | MASK_AQRL, MASK_AMOMIN_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"amominu.d.sc", "64A", "d,t,0(s)", MATCH_AMOMINU_D | MASK_AQRL, MASK_AMOMINU_D | MASK_AQRL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++
++/* Multiply/Divide instruction subset */
++{"mul", "M", "d,s,t", MATCH_MUL, MASK_MUL, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"mulh", "M", "d,s,t", MATCH_MULH, MASK_MULH, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"mulhu", "M", "d,s,t", MATCH_MULHU, MASK_MULHU, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"mulhsu", "M", "d,s,t", MATCH_MULHSU, MASK_MULHSU, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"div", "M", "d,s,t", MATCH_DIV, MASK_DIV, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"divu", "M", "d,s,t", MATCH_DIVU, MASK_DIVU, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"rem", "M", "d,s,t", MATCH_REM, MASK_REM, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"remu", "M", "d,s,t", MATCH_REMU, MASK_REMU, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"mulw", "64M", "d,s,t", MATCH_MULW, MASK_MULW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"divw", "64M", "d,s,t", MATCH_DIVW, MASK_DIVW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"divuw", "64M", "d,s,t", MATCH_DIVUW, MASK_DIVUW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"remw", "64M", "d,s,t", MATCH_REMW, MASK_REMW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++{"remuw", "64M", "d,s,t", MATCH_REMUW, MASK_REMUW, match_opcode, WR_xd|RD_xs1|RD_xs2 },
++
++/* Single-precision floating-point instruction subset */
++{"frsr", "F", "d", MATCH_FRCSR, MASK_FRCSR, match_opcode, WR_xd },
++{"fssr", "F", "s", MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, RD_xs1 },
++{"fssr", "F", "d,s", MATCH_FSCSR, MASK_FSCSR, match_opcode, WR_xd|RD_xs1 },
++{"frcsr", "F", "d", MATCH_FRCSR, MASK_FRCSR, match_opcode, WR_xd },
++{"fscsr", "F", "s", MATCH_FSCSR, MASK_FSCSR | MASK_RD, match_opcode, RD_xs1 },
++{"fscsr", "F", "d,s", MATCH_FSCSR, MASK_FSCSR, match_opcode, WR_xd|RD_xs1 },
++{"frrm", "F", "d", MATCH_FRRM, MASK_FRRM, match_opcode, WR_xd },
++{"fsrm", "F", "s", MATCH_FSRM, MASK_FSRM | MASK_RD, match_opcode, RD_xs1 },
++{"fsrm", "F", "d,s", MATCH_FSRM, MASK_FSRM, match_opcode, WR_xd|RD_xs1 },
++{"frflags", "F", "d", MATCH_FRFLAGS, MASK_FRFLAGS, match_opcode, WR_xd },
++{"fsflags", "F", "s", MATCH_FSFLAGS, MASK_FSFLAGS | MASK_RD, match_opcode, RD_xs1 },
++{"fsflags", "F", "d,s", MATCH_FSFLAGS, MASK_FSFLAGS, match_opcode, WR_xd|RD_xs1 },
++{"flw", "F", "D,o(s)", MATCH_FLW, MASK_FLW, match_opcode, WR_fd|RD_xs1 },
++{"flw", "F", "D,A,s", 0, (int) M_FLW, match_never, INSN_MACRO },
++{"fsw", "F", "T,q(s)", MATCH_FSW, MASK_FSW, match_opcode, RD_xs1|RD_fs2 },
++{"fsw", "F", "T,A,s", 0, (int) M_FSW, match_never, INSN_MACRO },
++{"fmv.x.s", "F", "d,S", MATCH_FMV_X_S, MASK_FMV_X_S, match_opcode, WR_xd|RD_fs1 },
++{"fmv.s.x", "F", "D,s", MATCH_FMV_S_X, MASK_FMV_S_X, match_opcode, WR_fd|RD_xs1 },
++{"fmv.s", "F", "D,U", MATCH_FSGNJ_S, MASK_FSGNJ_S, match_rs1_eq_rs2, INSN_ALIAS|WR_fd|RD_fs1|RD_fs2 },
++{"fneg.s", "F", "D,U", MATCH_FSGNJN_S, MASK_FSGNJN_S, match_rs1_eq_rs2, INSN_ALIAS|WR_fd|RD_fs1|RD_fs2 },
++{"fabs.s", "F", "D,U", MATCH_FSGNJX_S, MASK_FSGNJX_S, match_rs1_eq_rs2, INSN_ALIAS|WR_fd|RD_fs1|RD_fs2 },
++{"fsgnj.s", "F", "D,S,T", MATCH_FSGNJ_S, MASK_FSGNJ_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsgnjn.s", "F", "D,S,T", MATCH_FSGNJN_S, MASK_FSGNJN_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsgnjx.s", "F", "D,S,T", MATCH_FSGNJX_S, MASK_FSGNJX_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fadd.s", "F", "D,S,T", MATCH_FADD_S | MASK_RM, MASK_FADD_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fadd.s", "F", "D,S,T,m", MATCH_FADD_S, MASK_FADD_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsub.s", "F", "D,S,T", MATCH_FSUB_S | MASK_RM, MASK_FSUB_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsub.s", "F", "D,S,T,m", MATCH_FSUB_S, MASK_FSUB_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmul.s", "F", "D,S,T", MATCH_FMUL_S | MASK_RM, MASK_FMUL_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmul.s", "F", "D,S,T,m", MATCH_FMUL_S, MASK_FMUL_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fdiv.s", "F", "D,S,T", MATCH_FDIV_S | MASK_RM, MASK_FDIV_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fdiv.s", "F", "D,S,T,m", MATCH_FDIV_S, MASK_FDIV_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsqrt.s", "F", "D,S", MATCH_FSQRT_S | MASK_RM, MASK_FSQRT_S | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fsqrt.s", "F", "D,S,m", MATCH_FSQRT_S, MASK_FSQRT_S, match_opcode, WR_fd|RD_fs1 },
++{"fmin.s", "F", "D,S,T", MATCH_FMIN_S, MASK_FMIN_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmax.s", "F", "D,S,T", MATCH_FMAX_S, MASK_FMAX_S, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmadd.s", "F", "D,S,T,R", MATCH_FMADD_S | MASK_RM, MASK_FMADD_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmadd.s", "F", "D,S,T,R,m", MATCH_FMADD_S, MASK_FMADD_S, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmadd.s", "F", "D,S,T,R", MATCH_FNMADD_S | MASK_RM, MASK_FNMADD_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmadd.s", "F", "D,S,T,R,m", MATCH_FNMADD_S, MASK_FNMADD_S, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmsub.s", "F", "D,S,T,R", MATCH_FMSUB_S | MASK_RM, MASK_FMSUB_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmsub.s", "F", "D,S,T,R,m", MATCH_FMSUB_S, MASK_FMSUB_S, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmsub.s", "F", "D,S,T,R", MATCH_FNMSUB_S | MASK_RM, MASK_FNMSUB_S | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmsub.s", "F", "D,S,T,R,m", MATCH_FNMSUB_S, MASK_FNMSUB_S, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fcvt.w.s", "F", "d,S", MATCH_FCVT_W_S | MASK_RM, MASK_FCVT_W_S | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.w.s", "F", "d,S,m", MATCH_FCVT_W_S, MASK_FCVT_W_S, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.wu.s", "F", "d,S", MATCH_FCVT_WU_S | MASK_RM, MASK_FCVT_WU_S | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.wu.s", "F", "d,S,m", MATCH_FCVT_WU_S, MASK_FCVT_WU_S, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.s.w", "F", "D,s", MATCH_FCVT_S_W | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.s.w", "F", "D,s,m", MATCH_FCVT_S_W, MASK_FCVT_S_W, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.s.wu", "F", "D,s", MATCH_FCVT_S_WU | MASK_RM, MASK_FCVT_S_W | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.s.wu", "F", "D,s,m", MATCH_FCVT_S_WU, MASK_FCVT_S_WU, match_opcode, WR_fd|RD_xs1 },
++{"fclass.s", "F", "d,S", MATCH_FCLASS_S, MASK_FCLASS_S, match_opcode, WR_xd|RD_fs1 },
++{"feq.s", "F", "d,S,T", MATCH_FEQ_S, MASK_FEQ_S, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"flt.s", "F", "d,S,T", MATCH_FLT_S, MASK_FLT_S, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fle.s", "F", "d,S,T", MATCH_FLE_S, MASK_FLE_S, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fgt.s", "F", "d,T,S", MATCH_FLT_S, MASK_FLT_S, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fge.s", "F", "d,T,S", MATCH_FLE_S, MASK_FLE_S, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fcvt.l.s", "64F", "d,S", MATCH_FCVT_L_S | MASK_RM, MASK_FCVT_L_S | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.l.s", "64F", "d,S,m", MATCH_FCVT_L_S, MASK_FCVT_L_S, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.lu.s", "64F", "d,S", MATCH_FCVT_LU_S | MASK_RM, MASK_FCVT_LU_S | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.lu.s", "64F", "d,S,m", MATCH_FCVT_LU_S, MASK_FCVT_LU_S, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.s.l", "64F", "D,s", MATCH_FCVT_S_L | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.s.l", "64F", "D,s,m", MATCH_FCVT_S_L, MASK_FCVT_S_L, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.s.lu", "64F", "D,s", MATCH_FCVT_S_LU | MASK_RM, MASK_FCVT_S_L | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.s.lu", "64F", "D,s,m", MATCH_FCVT_S_LU, MASK_FCVT_S_LU, match_opcode, WR_fd|RD_xs1 },
++
++/* Double-precision floating-point instruction subset */
++{"fld", "D", "D,o(s)", MATCH_FLD, MASK_FLD, match_opcode, WR_fd|RD_xs1 },
++{"fld", "D", "D,A,s", 0, (int) M_FLD, match_never, INSN_MACRO },
++{"fsd", "D", "T,q(s)", MATCH_FSD, MASK_FSD, match_opcode, RD_xs1|RD_fs2 },
++{"fsd", "D", "T,A,s", 0, (int) M_FSD, match_never, INSN_MACRO },
++{"fmv.d", "D", "D,U", MATCH_FSGNJ_D, MASK_FSGNJ_D, match_rs1_eq_rs2, INSN_ALIAS|WR_fd|RD_fs1|RD_fs2 },
++{"fneg.d", "D", "D,U", MATCH_FSGNJN_D, MASK_FSGNJN_D, match_rs1_eq_rs2, INSN_ALIAS|WR_fd|RD_fs1|RD_fs2 },
++{"fabs.d", "D", "D,U", MATCH_FSGNJX_D, MASK_FSGNJX_D, match_rs1_eq_rs2, INSN_ALIAS|WR_fd|RD_fs1|RD_fs2 },
++{"fsgnj.d", "D", "D,S,T", MATCH_FSGNJ_D, MASK_FSGNJ_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsgnjn.d", "D", "D,S,T", MATCH_FSGNJN_D, MASK_FSGNJN_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsgnjx.d", "D", "D,S,T", MATCH_FSGNJX_D, MASK_FSGNJX_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fadd.d", "D", "D,S,T", MATCH_FADD_D | MASK_RM, MASK_FADD_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fadd.d", "D", "D,S,T,m", MATCH_FADD_D, MASK_FADD_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsub.d", "D", "D,S,T", MATCH_FSUB_D | MASK_RM, MASK_FSUB_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsub.d", "D", "D,S,T,m", MATCH_FSUB_D, MASK_FSUB_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmul.d", "D", "D,S,T", MATCH_FMUL_D | MASK_RM, MASK_FMUL_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmul.d", "D", "D,S,T,m", MATCH_FMUL_D, MASK_FMUL_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fdiv.d", "D", "D,S,T", MATCH_FDIV_D | MASK_RM, MASK_FDIV_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fdiv.d", "D", "D,S,T,m", MATCH_FDIV_D, MASK_FDIV_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsqrt.d", "D", "D,S", MATCH_FSQRT_D | MASK_RM, MASK_FSQRT_D | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fsqrt.d", "D", "D,S,m", MATCH_FSQRT_D, MASK_FSQRT_D, match_opcode, WR_fd|RD_fs1 },
++{"fmin.d", "D", "D,S,T", MATCH_FMIN_D, MASK_FMIN_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmax.d", "D", "D,S,T", MATCH_FMAX_D, MASK_FMAX_D, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmadd.d", "D", "D,S,T,R", MATCH_FMADD_D | MASK_RM, MASK_FMADD_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmadd.d", "D", "D,S,T,R,m", MATCH_FMADD_D, MASK_FMADD_D, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmadd.d", "D", "D,S,T,R", MATCH_FNMADD_D | MASK_RM, MASK_FNMADD_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmadd.d", "D", "D,S,T,R,m", MATCH_FNMADD_D, MASK_FNMADD_D, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmsub.d", "D", "D,S,T,R", MATCH_FMSUB_D | MASK_RM, MASK_FMSUB_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmsub.d", "D", "D,S,T,R,m", MATCH_FMSUB_D, MASK_FMSUB_D, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmsub.d", "D", "D,S,T,R", MATCH_FNMSUB_D | MASK_RM, MASK_FNMSUB_D | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmsub.d", "D", "D,S,T,R,m", MATCH_FNMSUB_D, MASK_FNMSUB_D, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fcvt.w.d", "D", "d,S", MATCH_FCVT_W_D | MASK_RM, MASK_FCVT_W_D | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.w.d", "D", "d,S,m", MATCH_FCVT_W_D, MASK_FCVT_W_D, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.wu.d", "D", "d,S", MATCH_FCVT_WU_D | MASK_RM, MASK_FCVT_WU_D | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.wu.d", "D", "d,S,m", MATCH_FCVT_WU_D, MASK_FCVT_WU_D, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.d.w", "D", "D,s", MATCH_FCVT_D_W, MASK_FCVT_D_W | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.d.wu", "D", "D,s", MATCH_FCVT_D_WU, MASK_FCVT_D_WU | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.d.s", "D", "D,S", MATCH_FCVT_D_S, MASK_FCVT_D_S | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.s.d", "D", "D,S", MATCH_FCVT_S_D | MASK_RM, MASK_FCVT_S_D | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.s.d", "D", "D,S,m", MATCH_FCVT_S_D, MASK_FCVT_S_D, match_opcode, WR_fd|RD_fs1 },
++{"fclass.d", "D", "d,S", MATCH_FCLASS_D, MASK_FCLASS_D, match_opcode, WR_xd|RD_fs1 },
++{"feq.d", "D", "d,S,T", MATCH_FEQ_D, MASK_FEQ_D, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"flt.d", "D", "d,S,T", MATCH_FLT_D, MASK_FLT_D, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fle.d", "D", "d,S,T", MATCH_FLE_D, MASK_FLE_D, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fgt.d", "D", "d,T,S", MATCH_FLT_D, MASK_FLT_D, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fge.d", "D", "d,T,S", MATCH_FLE_D, MASK_FLE_D, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fmv.x.d", "64D", "d,S", MATCH_FMV_X_D, MASK_FMV_X_D, match_opcode, WR_xd|RD_fs1 },
++{"fmv.d.x", "64D", "D,s", MATCH_FMV_D_X, MASK_FMV_D_X, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.l.d", "64D", "d,S", MATCH_FCVT_L_D | MASK_RM, MASK_FCVT_L_D | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.l.d", "64D", "d,S,m", MATCH_FCVT_L_D, MASK_FCVT_L_D, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.lu.d", "64D", "d,S", MATCH_FCVT_LU_D | MASK_RM, MASK_FCVT_LU_D | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.lu.d", "64D", "d,S,m", MATCH_FCVT_LU_D, MASK_FCVT_LU_D, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.d.l", "64D", "D,s", MATCH_FCVT_D_L | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.d.l", "64D", "D,s,m", MATCH_FCVT_D_L, MASK_FCVT_D_L, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.d.lu", "64D", "D,s", MATCH_FCVT_D_LU | MASK_RM, MASK_FCVT_D_L | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.d.lu", "64D", "D,s,m", MATCH_FCVT_D_LU, MASK_FCVT_D_LU, match_opcode, WR_fd|RD_xs1 },
++
++/* Supervisor instructions */
++{"csrr", "I", "d,E", MATCH_CSRRS, MASK_CSRRS | MASK_RS1, match_opcode, WR_xd },
++{"csrwi", "I", "E,Z", MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrw", "I", "E,s", MATCH_CSRRW, MASK_CSRRW | MASK_RD, match_opcode, RD_xs1 },
++{"csrw", "I", "E,Z", MATCH_CSRRWI, MASK_CSRRWI | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrsi", "I", "E,Z", MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrs", "I", "E,s", MATCH_CSRRS, MASK_CSRRS | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrs", "I", "E,Z", MATCH_CSRRSI, MASK_CSRRSI | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrci", "I", "E,Z", MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrc", "I", "E,s", MATCH_CSRRC, MASK_CSRRC | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrc", "I", "E,Z", MATCH_CSRRCI, MASK_CSRRCI | MASK_RD, match_opcode, WR_xd|RD_xs1 },
++{"csrrw", "I", "d,E,s", MATCH_CSRRW, MASK_CSRRW, match_opcode, WR_xd|RD_xs1 },
++{"csrrw", "I", "d,E,Z", MATCH_CSRRWI, MASK_CSRRWI, match_opcode, WR_xd|RD_xs1 },
++{"csrrs", "I", "d,E,s", MATCH_CSRRS, MASK_CSRRS, match_opcode, WR_xd|RD_xs1 },
++{"csrrs", "I", "d,E,Z", MATCH_CSRRSI, MASK_CSRRSI, match_opcode, WR_xd|RD_xs1 },
++{"csrrc", "I", "d,E,s", MATCH_CSRRC, MASK_CSRRC, match_opcode, WR_xd|RD_xs1 },
++{"csrrc", "I", "d,E,Z", MATCH_CSRRCI, MASK_CSRRCI, match_opcode, WR_xd|RD_xs1 },
++{"csrrwi", "I", "d,E,Z", MATCH_CSRRWI, MASK_CSRRWI, match_opcode, WR_xd|RD_xs1 },
++{"csrrsi", "I", "d,E,Z", MATCH_CSRRSI, MASK_CSRRSI, match_opcode, WR_xd|RD_xs1 },
++{"csrrci", "I", "d,E,Z", MATCH_CSRRCI, MASK_CSRRCI, match_opcode, WR_xd|RD_xs1 },
++{"sret", "I", "", MATCH_SRET, MASK_SRET, match_opcode, 0 },
++
++/* Half-precision floating-point instruction subset */
++{"flh", "Xhwacha", "D,o(s)", MATCH_FLH, MASK_FLH, match_opcode, WR_fd|RD_xs1 },
++{"fsh", "Xhwacha", "T,q(s)", MATCH_FSH, MASK_FSH, match_opcode, RD_xs1|RD_fs2 },
++{"fsgnj.h", "Xhwacha", "D,S,T", MATCH_FSGNJ_H, MASK_FSGNJ_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsgnjn.h", "Xhwacha", "D,S,T", MATCH_FSGNJN_H, MASK_FSGNJN_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsgnjx.h", "Xhwacha", "D,S,T", MATCH_FSGNJX_H, MASK_FSGNJX_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fadd.h", "Xhwacha", "D,S,T", MATCH_FADD_H | MASK_RM, MASK_FADD_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fadd.h", "Xhwacha", "D,S,T,m", MATCH_FADD_H, MASK_FADD_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsub.h", "Xhwacha", "D,S,T", MATCH_FSUB_H | MASK_RM, MASK_FSUB_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsub.h", "Xhwacha", "D,S,T,m", MATCH_FSUB_H, MASK_FSUB_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmul.h", "Xhwacha", "D,S,T", MATCH_FMUL_H | MASK_RM, MASK_FMUL_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmul.h", "Xhwacha", "D,S,T,m", MATCH_FMUL_H, MASK_FMUL_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fdiv.h", "Xhwacha", "D,S,T", MATCH_FDIV_H | MASK_RM, MASK_FDIV_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fdiv.h", "Xhwacha", "D,S,T,m", MATCH_FDIV_H, MASK_FDIV_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fsqrt.h", "Xhwacha", "D,S", MATCH_FSQRT_H | MASK_RM, MASK_FSQRT_H | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fsqrt.h", "Xhwacha", "D,S,m", MATCH_FSQRT_H, MASK_FSQRT_H, match_opcode, WR_fd|RD_fs1 },
++{"fmin.h", "Xhwacha", "D,S,T", MATCH_FMIN_H, MASK_FMIN_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmax.h", "Xhwacha", "D,S,T", MATCH_FMAX_H, MASK_FMAX_H, match_opcode, WR_fd|RD_fs1|RD_fs2 },
++{"fmadd.h", "Xhwacha", "D,S,T,R", MATCH_FMADD_H | MASK_RM, MASK_FMADD_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmadd.h", "Xhwacha", "D,S,T,R,m", MATCH_FMADD_H, MASK_FMADD_H, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmadd.h", "Xhwacha", "D,S,T,R", MATCH_FNMADD_H | MASK_RM, MASK_FNMADD_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmadd.h", "Xhwacha", "D,S,T,R,m", MATCH_FNMADD_H, MASK_FNMADD_H, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmsub.h", "Xhwacha", "D,S,T,R", MATCH_FMSUB_H | MASK_RM, MASK_FMSUB_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fmsub.h", "Xhwacha", "D,S,T,R,m", MATCH_FMSUB_H, MASK_FMSUB_H, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmsub.h", "Xhwacha", "D,S,T,R", MATCH_FNMSUB_H | MASK_RM, MASK_FNMSUB_H | MASK_RM, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fnmsub.h", "Xhwacha", "D,S,T,R,m", MATCH_FNMSUB_H, MASK_FNMSUB_H, match_opcode, WR_fd|RD_fs1|RD_fs2|RD_fs3 },
++{"fcvt.s.h", "Xhwacha", "D,S", MATCH_FCVT_S_H, MASK_FCVT_S_H | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.h.s", "Xhwacha", "D,S", MATCH_FCVT_H_S | MASK_RM, MASK_FCVT_H_S | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.h.s", "Xhwacha", "D,S,m", MATCH_FCVT_H_S, MASK_FCVT_H_S, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.d.h", "Xhwacha", "D,S", MATCH_FCVT_D_H, MASK_FCVT_D_H | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.h.d", "Xhwacha", "D,S", MATCH_FCVT_H_D | MASK_RM, MASK_FCVT_H_D | MASK_RM, match_opcode, WR_fd|RD_fs1 },
++{"fcvt.h.d", "Xhwacha", "D,S,m", MATCH_FCVT_H_D, MASK_FCVT_H_D, match_opcode, WR_fd|RD_fs1 },
++{"feq.h", "Xhwacha", "d,S,T", MATCH_FEQ_H, MASK_FEQ_H, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"flt.h", "Xhwacha", "d,S,T", MATCH_FLT_H, MASK_FLT_H, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fle.h", "Xhwacha", "d,S,T", MATCH_FLE_H, MASK_FLE_H, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fgt.h", "Xhwacha", "d,T,S", MATCH_FLT_H, MASK_FLT_H, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fge.h", "Xhwacha", "d,T,S", MATCH_FLE_H, MASK_FLE_H, match_opcode, WR_xd|RD_fs1|RD_fs2 },
++{"fmv.x.h", "Xhwacha", "d,S", MATCH_FMV_X_H, MASK_FMV_X_H, match_opcode, WR_xd|RD_fs1 },
++{"fmv.h.x", "Xhwacha", "D,s", MATCH_FMV_H_X, MASK_FMV_H_X, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.w.h", "Xhwacha", "d,S", MATCH_FCVT_W_H | MASK_RM, MASK_FCVT_W_H | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.w.h", "Xhwacha", "d,S,m", MATCH_FCVT_W_H, MASK_FCVT_W_H, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.wu.h", "Xhwacha", "d,S", MATCH_FCVT_WU_H | MASK_RM, MASK_FCVT_WU_H | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.wu.h", "Xhwacha", "d,S,m", MATCH_FCVT_WU_H, MASK_FCVT_WU_H, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.h.w", "Xhwacha", "D,s", MATCH_FCVT_H_W, MASK_FCVT_H_W | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.h.wu", "Xhwacha", "D,s", MATCH_FCVT_H_WU, MASK_FCVT_H_WU | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.l.h", "Xhwacha", "d,S", MATCH_FCVT_L_H | MASK_RM, MASK_FCVT_L_H | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.l.h", "Xhwacha", "d,S,m", MATCH_FCVT_L_H, MASK_FCVT_L_H, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.lu.h", "Xhwacha", "d,S", MATCH_FCVT_LU_H | MASK_RM, MASK_FCVT_LU_H | MASK_RM, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.lu.h", "Xhwacha", "d,S,m", MATCH_FCVT_LU_H, MASK_FCVT_LU_H, match_opcode, WR_xd|RD_fs1 },
++{"fcvt.h.l", "Xhwacha", "D,s", MATCH_FCVT_H_L | MASK_RM, MASK_FCVT_H_L | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.h.l", "Xhwacha", "D,s,m", MATCH_FCVT_H_L, MASK_FCVT_H_L, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.h.lu", "Xhwacha", "D,s", MATCH_FCVT_H_LU | MASK_RM, MASK_FCVT_H_L | MASK_RM, match_opcode, WR_fd|RD_xs1 },
++{"fcvt.h.lu", "Xhwacha", "D,s,m", MATCH_FCVT_H_LU, MASK_FCVT_H_LU, match_opcode, WR_fd|RD_xs1 },
++
++/* Rocket Custom Coprocessor extension */
++{"custom0", "Xcustom", "d,s,t,^j", MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2, match_opcode, 0},
++{"custom0", "Xcustom", "d,s,^t,^j", MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1, match_opcode, 0},
++{"custom0", "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD, match_opcode, 0},
++{"custom0", "Xcustom", "^d,s,t,^j", MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2, match_opcode, 0},
++{"custom0", "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1, match_opcode, 0},
++{"custom0", "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM0, MASK_CUSTOM0, match_opcode, 0},
++{"custom1", "Xcustom", "d,s,t,^j", MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2, match_opcode, 0},
++{"custom1", "Xcustom", "d,s,^t,^j", MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1, match_opcode, 0},
++{"custom1", "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD, match_opcode, 0},
++{"custom1", "Xcustom", "^d,s,t,^j", MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2, match_opcode, 0},
++{"custom1", "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1, match_opcode, 0},
++{"custom1", "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM1, MASK_CUSTOM1, match_opcode, 0},
++{"custom2", "Xcustom", "d,s,t,^j", MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2, match_opcode, 0},
++{"custom2", "Xcustom", "d,s,^t,^j", MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1, match_opcode, 0},
++{"custom2", "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD, match_opcode, 0},
++{"custom2", "Xcustom", "^d,s,t,^j", MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2, match_opcode, 0},
++{"custom2", "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1, match_opcode, 0},
++{"custom2", "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM2, MASK_CUSTOM2, match_opcode, 0},
++{"custom3", "Xcustom", "d,s,t,^j", MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2, match_opcode, 0},
++{"custom3", "Xcustom", "d,s,^t,^j", MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1, match_opcode, 0},
++{"custom3", "Xcustom", "d,^s,^t,^j", MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD, match_opcode, 0},
++{"custom3", "Xcustom", "^d,s,t,^j", MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2, match_opcode, 0},
++{"custom3", "Xcustom", "^d,s,^t,^j", MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1, match_opcode, 0},
++{"custom3", "Xcustom", "^d,^s,^t,^j", MATCH_CUSTOM3, MASK_CUSTOM3, match_opcode, 0},
++
++/* Xhwacha extension */
++{"stop", "Xhwacha", "", MATCH_STOP, MASK_STOP, match_opcode, 0},
++{"utidx", "Xhwacha", "d", MATCH_UTIDX, MASK_UTIDX, match_opcode, WR_xd},
++{"movz", "Xhwacha", "d,s,t", MATCH_MOVZ, MASK_MOVZ, match_opcode, WR_xd|RD_xs1|RD_xs2},
++{"movn", "Xhwacha", "d,s,t", MATCH_MOVN, MASK_MOVN, match_opcode, WR_xd|RD_xs1|RD_xs2},
++{"fmovz", "Xhwacha", "D,s,T", MATCH_FMOVZ, MASK_FMOVZ, match_opcode, WR_fd|RD_xs1|RD_fs2},
++{"fmovn", "Xhwacha", "D,s,T", MATCH_FMOVN, MASK_FMOVN, match_opcode, WR_fd|RD_xs1|RD_fs2},
++
++/* unit stride */
++/* xloads */
++{"vld", "Xhwacha", "#d,s", MATCH_VLD, MASK_VLD, match_opcode, 0},
++{"vlw", "Xhwacha", "#d,s", MATCH_VLW, MASK_VLW, match_opcode, 0},
++{"vlwu", "Xhwacha", "#d,s", MATCH_VLWU, MASK_VLWU, match_opcode, 0},
++{"vlh", "Xhwacha", "#d,s", MATCH_VLH, MASK_VLH, match_opcode, 0},
++{"vlhu", "Xhwacha", "#d,s", MATCH_VLHU, MASK_VLHU, match_opcode, 0},
++{"vlb", "Xhwacha", "#d,s", MATCH_VLB, MASK_VLB, match_opcode, 0},
++{"vlbu", "Xhwacha", "#d,s", MATCH_VLBU, MASK_VLBU, match_opcode, 0},
++/* floads */
++{"vfld", "Xhwacha", "#D,s", MATCH_VFLD, MASK_VFLD, match_opcode, 0},
++{"vflw", "Xhwacha", "#D,s", MATCH_VFLW, MASK_VFLW, match_opcode, 0},
++
++/* stride */
++/* xloads */
++{"vlstd", "Xhwacha", "#d,s,t", MATCH_VLSTD, MASK_VLSTD, match_opcode, 0},
++{"vlstw", "Xhwacha", "#d,s,t", MATCH_VLSTW, MASK_VLSTW, match_opcode, 0},
++{"vlstwu", "Xhwacha", "#d,s,t", MATCH_VLSTWU, MASK_VLSTWU, match_opcode, 0},
++{"vlsth", "Xhwacha", "#d,s,t", MATCH_VLSTH, MASK_VLSTH, match_opcode, 0},
++{"vlsthu", "Xhwacha", "#d,s,t", MATCH_VLSTHU, MASK_VLSTHU, match_opcode, 0},
++{"vlstb", "Xhwacha", "#d,s,t", MATCH_VLSTB, MASK_VLSTB, match_opcode, 0},
++{"vlstbu", "Xhwacha", "#d,s,t", MATCH_VLSTBU, MASK_VLSTBU, match_opcode, 0},
++/* floads */
++{"vflstd", "Xhwacha", "#D,s,t", MATCH_VFLSTD, MASK_VFLSTD, match_opcode, 0},
++{"vflstw", "Xhwacha", "#D,s,t", MATCH_VFLSTW, MASK_VFLSTW, match_opcode, 0},
++
++/* segment */
++/* xloads */
++{"vlsegd", "Xhwacha", "#d,s,#n", MATCH_VLSEGD, MASK_VLSEGD, match_opcode, 0},
++{"vlsegw", "Xhwacha", "#d,s,#n", MATCH_VLSEGW, MASK_VLSEGW, match_opcode, 0},
++{"vlsegwu", "Xhwacha", "#d,s,#n", MATCH_VLSEGWU, MASK_VLSEGWU, match_opcode, 0},
++{"vlsegh", "Xhwacha", "#d,s,#n", MATCH_VLSEGH, MASK_VLSEGH, match_opcode, 0},
++{"vlseghu", "Xhwacha", "#d,s,#n", MATCH_VLSEGHU, MASK_VLSEGHU, match_opcode, 0},
++{"vlsegb", "Xhwacha", "#d,s,#n", MATCH_VLSEGB, MASK_VLSEGB, match_opcode, 0},
++{"vlsegbu", "Xhwacha", "#d,s,#n", MATCH_VLSEGBU, MASK_VLSEGBU, match_opcode, 0},
++/* floads */
++{"vflsegd", "Xhwacha", "#D,s,#n", MATCH_VFLSEGD, MASK_VFLSEGD, match_opcode, 0},
++{"vflsegw", "Xhwacha", "#D,s,#n", MATCH_VFLSEGW, MASK_VFLSEGW, match_opcode, 0},
++
++/* stride segment */
++/* xloads */
++{"vlsegstd", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTD, MASK_VLSEGSTD, match_opcode, 0},
++{"vlsegstw", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTW, MASK_VLSEGSTW, match_opcode, 0},
++{"vlsegstwu", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTWU, MASK_VLSEGSTWU, match_opcode, 0},
++{"vlsegsth", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTH, MASK_VLSEGSTH, match_opcode, 0},
++{"vlsegsthu", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTHU, MASK_VLSEGSTHU, match_opcode, 0},
++{"vlsegstb", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTB, MASK_VLSEGSTB, match_opcode, 0},
++{"vlsegstbu", "Xhwacha", "#d,s,t,#n", MATCH_VLSEGSTBU, MASK_VLSEGSTBU, match_opcode, 0},
++/* floads */
++{"vflsegstd", "Xhwacha", "#D,s,t,#n", MATCH_VFLSEGSTD, MASK_VFLSEGSTD, match_opcode, 0},
++{"vflsegstw", "Xhwacha", "#D,s,t,#n", MATCH_VFLSEGSTW, MASK_VFLSEGSTW, match_opcode, 0},
++
++/* unit stride */
++/* xstores */
++{"vsd", "Xhwacha", "#d,s", MATCH_VSD, MASK_VSD, match_opcode, 0},
++{"vsw", "Xhwacha", "#d,s", MATCH_VSW, MASK_VSW, match_opcode, 0},
++{"vsh", "Xhwacha", "#d,s", MATCH_VSH, MASK_VSH, match_opcode, 0},
++{"vsb", "Xhwacha", "#d,s", MATCH_VSB, MASK_VSB, match_opcode, 0},
++/* fstores */
++{"vfsd", "Xhwacha", "#D,s", MATCH_VFSD, MASK_VFSD, match_opcode, 0},
++{"vfsw", "Xhwacha", "#D,s", MATCH_VFSW, MASK_VFSW, match_opcode, 0},
++
++/* stride */
++/* xstores */
++{"vsstd", "Xhwacha", "#d,s,t", MATCH_VSSTD, MASK_VSSTD, match_opcode, 0},
++{"vsstw", "Xhwacha", "#d,s,t", MATCH_VSSTW, MASK_VSSTW, match_opcode, 0},
++{"vssth", "Xhwacha", "#d,s,t", MATCH_VSSTH, MASK_VSSTH, match_opcode, 0},
++{"vsstb", "Xhwacha", "#d,s,t", MATCH_VSSTB, MASK_VSSTB, match_opcode, 0},
++/* fstores */
++{"vfsstd", "Xhwacha", "#D,s,t", MATCH_VFSSTD, MASK_VFSSTD, match_opcode, 0},
++{"vfsstw", "Xhwacha", "#D,s,t", MATCH_VFSSTW, MASK_VFSSTW, match_opcode, 0},
++
++/* segment */
++/* xstores */
++{"vssegd", "Xhwacha", "#d,s,#n", MATCH_VSSEGD, MASK_VSSEGD, match_opcode, 0},
++{"vssegw", "Xhwacha", "#d,s,#n", MATCH_VSSEGW, MASK_VSSEGW, match_opcode, 0},
++{"vssegh", "Xhwacha", "#d,s,#n", MATCH_VSSEGH, MASK_VSSEGH, match_opcode, 0},
++{"vssegb", "Xhwacha", "#d,s,#n", MATCH_VSSEGB, MASK_VSSEGB, match_opcode, 0},
++/* fstores */
++{"vfssegd", "Xhwacha", "#D,s,#n", MATCH_VFSSEGD, MASK_VFSSEGD, match_opcode, 0},
++{"vfssegw", "Xhwacha", "#D,s,#n", MATCH_VFSSEGW, MASK_VFSSEGW, match_opcode, 0},
++
++/* stride segment */
++/* xsegstores */
++{"vssegstd", "Xhwacha", "#d,s,t,#n", MATCH_VSSEGSTD, MASK_VSSEGSTD, match_opcode, 0},
++{"vssegstw", "Xhwacha", "#d,s,t,#n", MATCH_VSSEGSTW, MASK_VSSEGSTW, match_opcode, 0},
++{"vssegsth", "Xhwacha", "#d,s,t,#n", MATCH_VSSEGSTH, MASK_VSSEGSTH, match_opcode, 0},
++{"vssegstb", "Xhwacha", "#d,s,t,#n", MATCH_VSSEGSTB, MASK_VSSEGSTB, match_opcode, 0},
++/* fsegstores */
++{"vfssegstd", "Xhwacha", "#D,s,t,#n", MATCH_VFSSEGSTD, MASK_VFSSEGSTD, match_opcode, 0},
++{"vfssegstw", "Xhwacha", "#D,s,t,#n", MATCH_VFSSEGSTW, MASK_VFSSEGSTW, match_opcode, 0},
++
++{"vsetcfg", "Xhwacha", "s", MATCH_VSETCFG, MASK_VSETCFG | MASK_IMM, match_opcode, 0},
++{"vsetcfg", "Xhwacha", "#g,#f", MATCH_VSETCFG, MASK_VSETCFG | MASK_RS1, match_opcode, 0},
++{"vsetcfg", "Xhwacha", "s,#g,#f", MATCH_VSETCFG, MASK_VSETCFG, match_opcode, 0},
++{"vsetucfg", "Xhwacha", "d,u", MATCH_LUI, MASK_LUI, match_opcode, INSN_ALIAS | WR_xd},
++{"vsetvl", "Xhwacha", "d,s", MATCH_VSETVL, MASK_VSETVL, match_opcode, 0},
++{"vgetcfg", "Xhwacha", "d", MATCH_VGETCFG, MASK_VGETCFG, match_opcode, 0},
++{"vgetvl", "Xhwacha", "d", MATCH_VGETVL, MASK_VGETVL, match_opcode, 0},
++
++{"vmvv", "Xhwacha", "#d,#s", MATCH_VMVV, MASK_VMVV, match_opcode, 0},
++{"vmsv", "Xhwacha", "#d,s", MATCH_VMSV, MASK_VMSV, match_opcode, 0},
++{"vfmvv", "Xhwacha", "#D,#S", MATCH_VFMVV, MASK_VFMVV, match_opcode, 0},
++{"vfmsv.d", "Xhwacha", "#D,s", MATCH_VFMSV_D, MASK_VFMSV_D, match_opcode, 0},
++{"vfmsv.s", "Xhwacha", "#D,s", MATCH_VFMSV_S, MASK_VFMSV_S, match_opcode, 0},
++
++{"vf", "Xhwacha", "q(s)", MATCH_VF, MASK_VF, match_opcode, 0},
++{"vf", "Xhwacha", "A,s", 0, (int) M_VF, match_never, INSN_MACRO },
++
++{"vxcptcause", "Xhwacha", "d", MATCH_VXCPTCAUSE, MASK_VXCPTCAUSE, match_opcode, 0},
++{"vxcptaux", "Xhwacha", "d", MATCH_VXCPTAUX, MASK_VXCPTAUX, match_opcode, 0},
++
++{"vxcptsave", "Xhwacha", "s", MATCH_VXCPTSAVE, MASK_VXCPTSAVE, match_opcode, 0},
++{"vxcptrestore", "Xhwacha", "s", MATCH_VXCPTRESTORE, MASK_VXCPTRESTORE, match_opcode, 0},
++{"vxcptkill", "Xhwacha", "", MATCH_VXCPTKILL, MASK_VXCPTKILL, match_opcode, 0},
++
++{"vxcptevac", "Xhwacha", "s", MATCH_VXCPTEVAC, MASK_VXCPTEVAC, match_opcode, 0},
++{"vxcpthold", "Xhwacha", "", MATCH_VXCPTHOLD, MASK_VXCPTHOLD, match_opcode, 0},
++{"venqcmd", "Xhwacha", "s,t", MATCH_VENQCMD, MASK_VENQCMD, match_opcode, 0},
++{"venqimm1", "Xhwacha", "s,t", MATCH_VENQIMM1, MASK_VENQIMM1, match_opcode, 0},
++{"venqimm2", "Xhwacha", "s,t", MATCH_VENQIMM2, MASK_VENQIMM2, match_opcode, 0},
++{"venqcnt", "Xhwacha", "s,t", MATCH_VENQCNT, MASK_VENQCNT, match_opcode, 0},
++};
++
++#define RISCV_NUM_OPCODES \
++ ((sizeof riscv_builtin_opcodes) / (sizeof (riscv_builtin_opcodes[0])))
++const int bfd_riscv_num_builtin_opcodes = RISCV_NUM_OPCODES;
++
++/* const removed from the following to allow for dynamic extensions to the
++ * built-in instruction set. */
++struct riscv_opcode *riscv_opcodes =
++ (struct riscv_opcode *) riscv_builtin_opcodes;
++int bfd_riscv_num_opcodes = RISCV_NUM_OPCODES;
++#undef RISCV_NUM_OPCODES
diff --git a/util/crossgcc/patches/gcc-4.9.2_riscv.patch b/util/crossgcc/patches/gcc-4.9.2_riscv.patch
new file mode 100644
index 0000000000..942b48f815
--- /dev/null
+++ b/util/crossgcc/patches/gcc-4.9.2_riscv.patch
@@ -0,0 +1,11296 @@
+Created from https://github.com/riscv/riscv-gnu-toolchain,
+commit ddce5d17f14831f4957e57c415aca77817c2a82c
+
+diff -urN original-gcc/config.sub gcc/config.sub
+--- original-gcc/config.sub 2013-10-01 18:50:56.000000000 +0200
++++ gcc-4.9.2/config.sub 2015-03-07 09:57:54.195132741 +0100
+@@ -334,6 +334,9 @@
+ ms1)
+ basic_machine=mt-unknown
+ ;;
++ riscv)
++ basic_machine=riscv-ucb
++ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+diff -urN original-gcc/gcc/common/config/riscv/riscv-common.c gcc/gcc/common/config/riscv/riscv-common.c
+--- original-gcc/gcc/common/config/riscv/riscv-common.c 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/common/config/riscv/riscv-common.c 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,128 @@
++/* Common hooks for RISC-V.
++ Copyright (C) 1989-2014 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "common/common-target.h"
++#include "common/common-target-def.h"
++#include "opts.h"
++#include "flags.h"
++#include "errors.h"
++
++/* Parse a RISC-V ISA string into an option mask. */
++
++static void
++riscv_parse_arch_string (const char *isa, int *flags)
++{
++ const char *p = isa;
++
++ if (strncmp (p, "RV32", 4) == 0)
++ *flags |= MASK_32BIT, p += 4;
++ else if (strncmp (p, "RV64", 4) == 0)
++ *flags &= ~MASK_32BIT, p += 4;
++
++ if (*p++ != 'I')
++ {
++ error ("-march=%s: ISA strings must begin with I, RV32I, or RV64I", isa);
++ return;
++ }
++
++ *flags &= ~MASK_MULDIV;
++ if (*p == 'M')
++ *flags |= MASK_MULDIV, p++;
++
++ *flags &= ~MASK_ATOMIC;
++ if (*p == 'A')
++ *flags |= MASK_ATOMIC, p++;
++
++ *flags |= MASK_SOFT_FLOAT_ABI;
++ if (*p == 'F')
++ *flags &= ~MASK_SOFT_FLOAT_ABI, p++;
++
++ if (*p == 'D')
++ {
++ p++;
++ if (!TARGET_HARD_FLOAT)
++ {
++ error ("-march=%s: the D extension requires the F extension", isa);
++ return;
++ }
++ }
++ else if (TARGET_HARD_FLOAT)
++ {
++ error ("-march=%s: single-precision-only is not yet supported", isa);
++ return;
++ }
++
++ if (*p)
++ {
++ error ("-march=%s: unsupported ISA substring %s", isa, p);
++ return;
++ }
++}
++
++static int
++riscv_flags_from_arch_string (const char *isa)
++{
++ int flags = 0;
++ riscv_parse_arch_string (isa, &flags);
++ return flags;
++}
++
++/* Implement TARGET_HANDLE_OPTION. */
++
++static bool
++riscv_handle_option (struct gcc_options *opts,
++ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
++ const struct cl_decoded_option *decoded,
++ location_t loc ATTRIBUTE_UNUSED)
++{
++ switch (decoded->opt_index)
++ {
++ case OPT_march_:
++ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags);
++ return true;
++
++ default:
++ return true;
++ }
++}
++
++/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
++static const struct default_options riscv_option_optimization_table[] =
++ {
++ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
++ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
++ { OPT_LEVELS_NONE, 0, NULL, 0 }
++ };
++
++#undef TARGET_OPTION_OPTIMIZATION_TABLE
++#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
++
++#undef TARGET_DEFAULT_TARGET_FLAGS
++#define TARGET_DEFAULT_TARGET_FLAGS \
++ (riscv_flags_from_arch_string (RISCV_ARCH_STRING_DEFAULT) \
++ | (TARGET_64BIT_DEFAULT ? 0 : MASK_32BIT))
++
++#undef TARGET_HANDLE_OPTION
++#define TARGET_HANDLE_OPTION riscv_handle_option
++
++struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
+diff -urN original-gcc/gcc/config/riscv/constraints.md gcc/gcc/config/riscv/constraints.md
+--- original-gcc/gcc/config/riscv/constraints.md 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/constraints.md 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,90 @@
++;; Constraint definitions for RISC-V target.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Register constraints
++
++(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
++ "A floating-point register (if available).")
++
++(define_register_constraint "b" "ALL_REGS"
++ "@internal")
++
++(define_register_constraint "j" "T_REGS"
++ "@internal")
++
++;; Integer constraints
++
++(define_constraint "Z"
++ "@internal"
++ (and (match_code "const_int")
++ (match_test "1")))
++
++(define_constraint "I"
++ "An I-type 12-bit signed immediate."
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (ival)")))
++
++(define_constraint "J"
++ "Integer zero."
++ (and (match_code "const_int")
++ (match_test "ival == 0")))
++
++;; Floating-point constraints
++
++(define_constraint "G"
++ "Floating-point zero."
++ (and (match_code "const_double")
++ (match_test "op == CONST0_RTX (mode)")))
++
++;; General constraints
++
++(define_constraint "Q"
++ "@internal"
++ (match_operand 0 "const_arith_operand"))
++
++(define_memory_constraint "A"
++ "An address that is held in a general-purpose register."
++ (and (match_code "mem")
++ (match_test "GET_CODE(XEXP(op,0)) == REG")))
++
++(define_constraint "S"
++ "@internal
++ A constant call address."
++ (and (match_operand 0 "call_insn_operand")
++ (match_test "CONSTANT_P (op)")))
++
++(define_constraint "T"
++ "@internal
++ A constant @code{move_operand}."
++ (and (match_operand 0 "move_operand")
++ (match_test "CONSTANT_P (op)")))
++
++(define_memory_constraint "W"
++ "@internal
++ A memory address based on a member of @code{BASE_REG_CLASS}."
++ (and (match_code "mem")
++ (match_operand 0 "memory_operand")))
++
++(define_constraint "YG"
++ "@internal
++ A vector zero."
++ (and (match_code "const_vector")
++ (match_test "op == CONST0_RTX (mode)")))
+diff -urN original-gcc/gcc/config/riscv/default-32.h gcc/gcc/config/riscv/default-32.h
+--- original-gcc/gcc/config/riscv/default-32.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/default-32.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,22 @@
++/* Definitions of target machine for GCC, for RISC-V,
++ defaulting to 32-bit code generation.
++
++ Copyright (C) 1999-2014 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#define TARGET_64BIT_DEFAULT 0
+diff -urN original-gcc/gcc/config/riscv/elf.h gcc/gcc/config/riscv/elf.h
+--- original-gcc/gcc/config/riscv/elf.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/elf.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,31 @@
++/* Target macros for riscv*-elf targets.
++ Copyright (C) 1994, 1997, 1999, 2000, 2002, 2003, 2004, 2007, 2010
++ Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++/* Leave the linker script to choose the appropriate libraries. */
++#undef LIB_SPEC
++#define LIB_SPEC ""
++
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC "crtend%O%s"
++
++#define NO_IMPLICIT_EXTERN_C 1
+diff -urN original-gcc/gcc/config/riscv/generic.md gcc/gcc/config/riscv/generic.md
+--- original-gcc/gcc/config/riscv/generic.md 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/generic.md 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,98 @@
++;; Generic DFA-based pipeline description for RISC-V targets.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++;; License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++
++;; This file is derived from the old define_function_unit description.
++;; Each reservation can be overridden on a processor-by-processor basis.
++
++(define_insn_reservation "generic_alu" 1
++ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
++ "alu")
++
++(define_insn_reservation "generic_load" 3
++ (eq_attr "type" "load,fpload,fpidxload")
++ "alu")
++
++(define_insn_reservation "generic_store" 1
++ (eq_attr "type" "store,fpstore,fpidxstore")
++ "alu")
++
++(define_insn_reservation "generic_xfer" 2
++ (eq_attr "type" "mfc,mtc")
++ "alu")
++
++(define_insn_reservation "generic_branch" 1
++ (eq_attr "type" "branch,jump,call")
++ "alu")
++
++(define_insn_reservation "generic_imul" 17
++ (eq_attr "type" "imul")
++ "imuldiv*17")
++
++(define_insn_reservation "generic_idiv" 38
++ (eq_attr "type" "idiv")
++ "imuldiv*38")
++
++(define_insn_reservation "generic_fcvt" 1
++ (eq_attr "type" "fcvt")
++ "alu")
++
++(define_insn_reservation "generic_fmove" 2
++ (eq_attr "type" "fmove")
++ "alu")
++
++(define_insn_reservation "generic_fcmp" 3
++ (eq_attr "type" "fcmp")
++ "alu")
++
++(define_insn_reservation "generic_fadd" 4
++ (eq_attr "type" "fadd")
++ "alu")
++
++(define_insn_reservation "generic_fmul_single" 7
++ (and (eq_attr "type" "fmul,fmadd")
++ (eq_attr "mode" "SF"))
++ "alu")
++
++(define_insn_reservation "generic_fmul_double" 8
++ (and (eq_attr "type" "fmul,fmadd")
++ (eq_attr "mode" "DF"))
++ "alu")
++
++(define_insn_reservation "generic_fdiv_single" 23
++ (and (eq_attr "type" "fdiv")
++ (eq_attr "mode" "SF"))
++ "alu")
++
++(define_insn_reservation "generic_fdiv_double" 36
++ (and (eq_attr "type" "fdiv")
++ (eq_attr "mode" "DF"))
++ "alu")
++
++(define_insn_reservation "generic_fsqrt_single" 54
++ (and (eq_attr "type" "fsqrt")
++ (eq_attr "mode" "SF"))
++ "alu")
++
++(define_insn_reservation "generic_fsqrt_double" 112
++ (and (eq_attr "type" "fsqrt")
++ (eq_attr "mode" "DF"))
++ "alu")
+diff -urN original-gcc/gcc/config/riscv/linux64.h gcc/gcc/config/riscv/linux64.h
+--- original-gcc/gcc/config/riscv/linux64.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/linux64.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,43 @@
++/* Definitions for 64-bit RISC-V GNU/Linux systems with ELF format.
++ Copyright 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2010, 2011
++ Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++/* Force the default ABI flags onto the command line
++ in order to make the other specs easier to write. */
++#undef LIB_SPEC
++#define LIB_SPEC "\
++%{pthread:-lpthread} \
++%{shared:-lc} \
++%{!shared: \
++ %{profile:-lc_p} %{!profile:-lc}}"
++
++#define GLIBC_DYNAMIC_LINKER32 "/lib32/ld.so.1"
++#define GLIBC_DYNAMIC_LINKER64 "/lib/ld.so.1"
++
++#undef LINK_SPEC
++#define LINK_SPEC "\
++%{shared} \
++ %{!shared: \
++ %{!static: \
++ %{rdynamic:-export-dynamic} \
++ %{" OPT_ARCH64 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER64 "} \
++ %{" OPT_ARCH32 ": -dynamic-linker " GNU_USER_DYNAMIC_LINKER32 "}} \
++ %{static:-static}} \
++%{" OPT_ARCH64 ":-melf64lriscv} \
++%{" OPT_ARCH32 ":-melf32lriscv}"
+diff -urN original-gcc/gcc/config/riscv/linux.h gcc/gcc/config/riscv/linux.h
+--- original-gcc/gcc/config/riscv/linux.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/linux.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,60 @@
++/* Definitions for RISC-V GNU/Linux systems with ELF format.
++ Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006,
++ 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#undef WCHAR_TYPE
++#define WCHAR_TYPE "int"
++
++#undef WCHAR_TYPE_SIZE
++#define WCHAR_TYPE_SIZE 32
++
++#define TARGET_OS_CPP_BUILTINS() \
++ do { \
++ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
++ /* The GNU C++ standard library requires this. */ \
++ if (c_dialect_cxx ()) \
++ builtin_define ("_GNU_SOURCE"); \
++ } while (0)
++
++#undef SUBTARGET_CPP_SPEC
++#define SUBTARGET_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
++
++#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
++
++/* Borrowed from sparc/linux.h */
++#undef LINK_SPEC
++#define LINK_SPEC \
++ "%{shared:-shared} \
++ %{!shared: \
++ %{!static: \
++ %{rdynamic:-export-dynamic} \
++ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
++ %{static:-static}}"
++
++#undef LIB_SPEC
++#define LIB_SPEC "\
++%{pthread:-lpthread} \
++%{shared:-lc} \
++%{!shared: \
++ %{profile:-lc_p} %{!profile:-lc}}"
++
++/* Similar to standard Linux, but adding -ffast-math support. */
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC \
++ "%{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s"
+diff -urN original-gcc/gcc/config/riscv/opcode-riscv.h gcc/gcc/config/riscv/opcode-riscv.h
+--- original-gcc/gcc/config/riscv/opcode-riscv.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/opcode-riscv.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,149 @@
++/* RISC-V ISA encoding.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GDB, GAS, and the GNU binutils.
++
++GDB, GAS, and the GNU binutils are free software; you can redistribute
++them and/or modify them under the terms of the GNU General Public
++License as published by the Free Software Foundation; either version
++1, or (at your option) any later version.
++
++GDB, GAS, and the GNU binutils are distributed in the hope that they
++will be useful, but WITHOUT ANY WARRANTY; without even the implied
++warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
++the GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this file; see the file COPYING. If not, write to the Free
++Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
++
++#ifndef _RISCV_H_
++#define _RISCV_H_
++
++#define RV_X(x, s, n) (((x) >> (s)) & ((1<<(n))-1))
++#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
++
++#define EXTRACT_ITYPE_IMM(x) \
++ (RV_X(x, 20, 12) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_STYPE_IMM(x) \
++ (RV_X(x, 7, 5) | (RV_X(x, 25, 7) << 5) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_SBTYPE_IMM(x) \
++ ((RV_X(x, 8, 4) << 1) | (RV_X(x, 25, 6) << 5) | (RV_X(x, 7, 1) << 11) | (RV_IMM_SIGN(x) << 12))
++#define EXTRACT_UTYPE_IMM(x) \
++ ((RV_X(x, 12, 20) << 20) | (RV_IMM_SIGN(x) << 32))
++#define EXTRACT_UJTYPE_IMM(x) \
++ ((RV_X(x, 21, 10) << 1) | (RV_X(x, 20, 1) << 11) | (RV_X(x, 12, 8) << 12) | (RV_IMM_SIGN(x) << 20))
++
++#define ENCODE_ITYPE_IMM(x) \
++ (RV_X(x, 0, 12) << 20)
++#define ENCODE_STYPE_IMM(x) \
++ ((RV_X(x, 0, 5) << 7) | (RV_X(x, 5, 7) << 25))
++#define ENCODE_SBTYPE_IMM(x) \
++ ((RV_X(x, 1, 4) << 8) | (RV_X(x, 5, 6) << 25) | (RV_X(x, 11, 1) << 7) | (RV_X(x, 12, 1) << 31))
++#define ENCODE_UTYPE_IMM(x) \
++ (RV_X(x, 12, 20) << 12)
++#define ENCODE_UJTYPE_IMM(x) \
++ ((RV_X(x, 1, 10) << 21) | (RV_X(x, 11, 1) << 20) | (RV_X(x, 12, 8) << 12) | (RV_X(x, 20, 1) << 31))
++
++#define VALID_ITYPE_IMM(x) (EXTRACT_ITYPE_IMM(ENCODE_ITYPE_IMM(x)) == (x))
++#define VALID_STYPE_IMM(x) (EXTRACT_STYPE_IMM(ENCODE_STYPE_IMM(x)) == (x))
++#define VALID_SBTYPE_IMM(x) (EXTRACT_SBTYPE_IMM(ENCODE_SBTYPE_IMM(x)) == (x))
++#define VALID_UTYPE_IMM(x) (EXTRACT_UTYPE_IMM(ENCODE_UTYPE_IMM(x)) == (x))
++#define VALID_UJTYPE_IMM(x) (EXTRACT_UJTYPE_IMM(ENCODE_UJTYPE_IMM(x)) == (x))
++
++#define RISCV_RTYPE(insn, rd, rs1, rs2) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2))
++#define RISCV_ITYPE(insn, rd, rs1, imm) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ((rs1) << OP_SH_RS1) | ENCODE_ITYPE_IMM(imm))
++#define RISCV_STYPE(insn, rs1, rs2, imm) \
++ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_STYPE_IMM(imm))
++#define RISCV_SBTYPE(insn, rs1, rs2, target) \
++ ((MATCH_ ## insn) | ((rs1) << OP_SH_RS1) | ((rs2) << OP_SH_RS2) | ENCODE_SBTYPE_IMM(target))
++#define RISCV_UTYPE(insn, rd, bigimm) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UTYPE_IMM(bigimm))
++#define RISCV_UJTYPE(insn, rd, target) \
++ ((MATCH_ ## insn) | ((rd) << OP_SH_RD) | ENCODE_UJTYPE_IMM(target))
++
++#define RISCV_NOP RISCV_ITYPE(ADDI, 0, 0, 0)
++
++#define RISCV_CONST_HIGH_PART(VALUE) \
++ (((VALUE) + (RISCV_IMM_REACH/2)) & ~(RISCV_IMM_REACH-1))
++#define RISCV_CONST_LOW_PART(VALUE) ((VALUE) - RISCV_CONST_HIGH_PART (VALUE))
++
++/* RV fields */
++
++#define OP_MASK_OP 0x7f
++#define OP_SH_OP 0
++#define OP_MASK_RS2 0x1f
++#define OP_SH_RS2 20
++#define OP_MASK_RS1 0x1f
++#define OP_SH_RS1 15
++#define OP_MASK_RS3 0x1f
++#define OP_SH_RS3 27
++#define OP_MASK_RD 0x1f
++#define OP_SH_RD 7
++#define OP_MASK_SHAMT 0x3f
++#define OP_SH_SHAMT 20
++#define OP_MASK_SHAMTW 0x1f
++#define OP_SH_SHAMTW 20
++#define OP_MASK_RM 0x7
++#define OP_SH_RM 12
++#define OP_MASK_PRED 0xf
++#define OP_SH_PRED 24
++#define OP_MASK_SUCC 0xf
++#define OP_SH_SUCC 20
++#define OP_MASK_AQ 0x1
++#define OP_SH_AQ 26
++#define OP_MASK_RL 0x1
++#define OP_SH_RL 25
++
++#define OP_MASK_VRD 0x1f
++#define OP_SH_VRD 7
++#define OP_MASK_VRS 0x1f
++#define OP_SH_VRS 15
++#define OP_MASK_VRT 0x1f
++#define OP_SH_VRT 20
++#define OP_MASK_VRR 0x1f
++#define OP_SH_VRR 25
++
++#define OP_MASK_VFD 0x1f
++#define OP_SH_VFD 7
++#define OP_MASK_VFS 0x1f
++#define OP_SH_VFS 15
++#define OP_MASK_VFT 0x1f
++#define OP_SH_VFT 20
++#define OP_MASK_VFR 0x1f
++#define OP_SH_VFR 25
++
++#define OP_MASK_IMMNGPR 0x3f
++#define OP_SH_IMMNGPR 20
++#define OP_MASK_IMMNFPR 0x3f
++#define OP_SH_IMMNFPR 26
++#define OP_MASK_IMMSEGNELM 0x1f
++#define OP_SH_IMMSEGNELM 17
++#define OP_MASK_IMMSEGSTNELM 0x1f
++#define OP_SH_IMMSEGSTNELM 12
++#define OP_MASK_CUSTOM_IMM 0x7f
++#define OP_SH_CUSTOM_IMM 25
++
++#define LINK_REG 1
++
++#define RISCV_JUMP_BITS RISCV_BIGIMM_BITS
++#define RISCV_JUMP_ALIGN_BITS 1
++#define RISCV_JUMP_ALIGN (1 << RISCV_JUMP_ALIGN_BITS)
++#define RISCV_JUMP_REACH ((1ULL<<RISCV_JUMP_BITS)*RISCV_JUMP_ALIGN)
++
++#define RISCV_IMM_BITS 12
++#define RISCV_BIGIMM_BITS (32-RISCV_IMM_BITS)
++#define RISCV_IMM_REACH (1LL<<RISCV_IMM_BITS)
++#define RISCV_BIGIMM_REACH (1LL<<RISCV_BIGIMM_BITS)
++#define RISCV_BRANCH_BITS RISCV_IMM_BITS
++#define RISCV_BRANCH_ALIGN_BITS RISCV_JUMP_ALIGN_BITS
++#define RISCV_BRANCH_ALIGN (1 << RISCV_BRANCH_ALIGN_BITS)
++#define RISCV_BRANCH_REACH (RISCV_IMM_REACH*RISCV_BRANCH_ALIGN)
++
++#include "riscv-opc.h"
++
++#endif /* _RISCV_H_ */
+diff -urN original-gcc/gcc/config/riscv/peephole.md gcc/gcc/config/riscv/peephole.md
+--- original-gcc/gcc/config/riscv/peephole.md 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/peephole.md 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,100 @@
++;;........................
++;; DI -> SI optimizations
++;;........................
++
++;; Simplify (int)(a + 1), etc.
++(define_peephole2
++ [(set (match_operand:DI 0 "register_operand")
++ (match_operator:DI 4 "modular_operator"
++ [(match_operand:DI 1 "register_operand")
++ (match_operand:DI 2 "arith_operand")]))
++ (set (match_operand:SI 3 "register_operand")
++ (truncate:SI (match_dup 0)))]
++ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))
++ && (GET_CODE (operands[4]) != ASHIFT || (CONST_INT_P (operands[2]) && INTVAL (operands[2]) < 32))"
++ [(set (match_dup 3)
++ (truncate:SI
++ (match_op_dup:DI 4
++ [(match_operand:DI 1 "register_operand")
++ (match_operand:DI 2 "arith_operand")])))])
++
++;; Simplify (int)a + 1, etc.
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand")
++ (truncate:SI (match_operand:DI 1 "register_operand")))
++ (set (match_operand:SI 3 "register_operand")
++ (match_operator:SI 4 "modular_operator"
++ [(match_dup 0)
++ (match_operand:SI 2 "arith_operand")]))]
++ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
++ [(set (match_dup 3)
++ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
++
++;; Simplify -(int)a, etc.
++(define_peephole2
++ [(set (match_operand:SI 0 "register_operand")
++ (truncate:SI (match_operand:DI 2 "register_operand")))
++ (set (match_operand:SI 3 "register_operand")
++ (match_operator:SI 4 "modular_operator"
++ [(match_operand:SI 1 "reg_or_0_operand")
++ (match_dup 0)]))]
++ "TARGET_64BIT && (REGNO (operands[0]) == REGNO (operands[3]) || peep2_reg_dead_p (2, operands[0]))"
++ [(set (match_dup 3)
++ (match_op_dup:SI 4 [(match_dup 1) (match_dup 2)]))])
++
++;; Simplify PIC loads to static variables.
++;; These will go away once we figure out how to emit auipc discretely.
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYI 0 "register_operand" "=r")
++ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
++ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
++ "<load>\t%0,%1"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
++ "<load>\t%0,%1,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && !TARGET_64BIT && flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
++ "<load>\t%0,%1,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_loadu<mode>"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
++ (zero_extend:SUPERQI (mem:SUBDI (match_operand 1 "absolute_symbolic_operand" ""))))]
++ "flag_pic && SYMBOL_REF_LOCAL_P (operands[1])"
++ "<load>u\t%0,%1"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storedi<mode>"
++ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
++ "<store>\t%z1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storesi<mode>"
++ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "!TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
++ "<store>\t%z1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storedi<mode>"
++ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYF 1 "register_operand" "f"))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
++ "<store>\t%1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++(define_insn "*local_pic_storesi<mode>"
++ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYF 1 "register_operand" "f"))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "TARGET_HARD_FLOAT && !TARGET_64BIT && (flag_pic && SYMBOL_REF_LOCAL_P (operands[0]))"
++ "<store>\t%1,%0,%2"
++ [(set (attr "length") (const_int 8))])
+diff -urN original-gcc/gcc/config/riscv/predicates.md gcc/gcc/config/riscv/predicates.md
+--- original-gcc/gcc/config/riscv/predicates.md 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/predicates.md 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,182 @@
++;; Predicate description for RISC-V target.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_predicate "const_arith_operand"
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (INTVAL (op))")))
++
++(define_predicate "arith_operand"
++ (ior (match_operand 0 "const_arith_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "sle_operand"
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
++
++(define_predicate "sleu_operand"
++ (and (match_operand 0 "sle_operand")
++ (match_test "INTVAL (op) + 1 != 0")))
++
++(define_predicate "const_0_operand"
++ (and (match_code "const_int,const_double,const_vector")
++ (match_test "op == CONST0_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_0_operand"
++ (ior (match_operand 0 "const_0_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "const_1_operand"
++ (and (match_code "const_int,const_double,const_vector")
++ (match_test "op == CONST1_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_1_operand"
++ (ior (match_operand 0 "const_1_operand")
++ (match_operand 0 "register_operand")))
++
++;; This is used for indexing into vectors, and hence only accepts const_int.
++(define_predicate "const_0_or_1_operand"
++ (and (match_code "const_int")
++ (ior (match_test "op == CONST0_RTX (GET_MODE (op))")
++ (match_test "op == CONST1_RTX (GET_MODE (op))"))))
++
++(define_special_predicate "pc_or_label_operand"
++ (match_code "pc,label_ref"))
++
++;; A legitimate CONST_INT operand that takes more than one instruction
++;; to load.
++(define_predicate "splittable_const_int_operand"
++ (match_code "const_int")
++{
++ /* Don't handle multi-word moves this way; we don't want to introduce
++ the individual word-mode moves until after reload. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ return false;
++
++ /* Otherwise check whether the constant can be loaded in a single
++ instruction. */
++ return !LUI_INT (op) && !SMALL_INT (op);
++})
++
++(define_predicate "move_operand"
++ (match_operand 0 "general_operand")
++{
++ enum riscv_symbol_type symbol_type;
++
++ /* The thinking here is as follows:
++
++ (1) The move expanders should split complex load sequences into
++ individual instructions. Those individual instructions can
++ then be optimized by all rtl passes.
++
++ (2) The target of pre-reload load sequences should not be used
++ to store temporary results. If the target register is only
++ assigned one value, reload can rematerialize that value
++ on demand, rather than spill it to the stack.
++
++ (3) If we allowed pre-reload passes like combine and cse to recreate
++ complex load sequences, we would want to be able to split the
++ sequences before reload as well, so that the pre-reload scheduler
++ can see the individual instructions. This falls foul of (2);
++ the splitter would be forced to reuse the target register for
++ intermediate results.
++
++ (4) We want to define complex load splitters for combine. These
++ splitters can request a temporary scratch register, which avoids
++ the problem in (2). They allow things like:
++
++ (set (reg T1) (high SYM))
++ (set (reg T2) (low (reg T1) SYM))
++ (set (reg X) (plus (reg T2) (const_int OFFSET)))
++
++ to be combined into:
++
++ (set (reg T3) (high SYM+OFFSET))
++ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
++
++ if T2 is only used this once. */
++ switch (GET_CODE (op))
++ {
++ case CONST_INT:
++ return !splittable_const_int_operand (op, mode);
++
++ case CONST:
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return (riscv_symbolic_constant_p (op, &symbol_type)
++ && !riscv_hi_relocs[symbol_type]);
++
++ case HIGH:
++ op = XEXP (op, 0);
++ return riscv_symbolic_constant_p (op, &symbol_type);
++
++ default:
++ return true;
++ }
++})
++
++(define_predicate "consttable_operand"
++ (match_test "CONSTANT_P (op)"))
++
++(define_predicate "symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return riscv_symbolic_constant_p (op, &type);
++})
++
++(define_predicate "absolute_symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return (riscv_symbolic_constant_p (op, &type)
++ && type == SYMBOL_ABSOLUTE);
++})
++
++(define_predicate "plt_symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return (riscv_symbolic_constant_p (op, &type)
++ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
++})
++
++(define_predicate "call_insn_operand"
++ (ior (match_operand 0 "absolute_symbolic_operand")
++ (match_operand 0 "plt_symbolic_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "symbol_ref_operand"
++ (match_code "symbol_ref"))
++
++(define_predicate "modular_operator"
++ (match_code "plus,minus,mult,ashift"))
++
++(define_predicate "equality_operator"
++ (match_code "eq,ne"))
++
++(define_predicate "order_operator"
++ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
++
++(define_predicate "fp_order_operator"
++ (match_code "eq,lt,le,gt,ge"))
++
++(define_predicate "fp_unorder_operator"
++ (match_code "ordered,unordered"))
+diff -urN original-gcc/gcc/config/riscv/riscv.c gcc/gcc/config/riscv/riscv.c
+--- original-gcc/gcc/config/riscv/riscv.c 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv.c 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,4292 @@
++/* Subroutines used for code generation for RISC-V.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "regs.h"
++#include "hard-reg-set.h"
++#include "insn-config.h"
++#include "conditions.h"
++#include "insn-attr.h"
++#include "recog.h"
++#include "output.h"
++#include "tree.h"
++#include "varasm.h"
++#include "stor-layout.h"
++#include "calls.h"
++#include "function.h"
++#include "expr.h"
++#include "optabs.h"
++#include "libfuncs.h"
++#include "flags.h"
++#include "reload.h"
++#include "tm_p.h"
++#include "ggc.h"
++#include "gstab.h"
++#include "hashtab.h"
++#include "debug.h"
++#include "target.h"
++#include "target-def.h"
++#include "langhooks.h"
++#include "sched-int.h"
++#include "bitmap.h"
++#include "diagnostic.h"
++#include "target-globals.h"
++#include "symcat.h"
++#include <stdint.h>
++
++/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
++#define UNSPEC_ADDRESS_P(X) \
++ (GET_CODE (X) == UNSPEC \
++ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
++ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
++
++/* Extract the symbol or label from UNSPEC wrapper X. */
++#define UNSPEC_ADDRESS(X) \
++ XVECEXP (X, 0, 0)
++
++/* Extract the symbol type from UNSPEC wrapper X. */
++#define UNSPEC_ADDRESS_TYPE(X) \
++ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
++
++/* The maximum distance between the top of the stack frame and the
++ value sp has when we save and restore registers. This is set by the
++ range of load/store offsets and must also preserve stack alignment. */
++#define RISCV_MAX_FIRST_STACK_STEP (RISCV_IMM_REACH/2 - 16)
++
++/* True if INSN is a riscv.md pattern or asm statement. */
++#define USEFUL_INSN_P(INSN) \
++ (NONDEBUG_INSN_P (INSN) \
++ && GET_CODE (PATTERN (INSN)) != USE \
++ && GET_CODE (PATTERN (INSN)) != CLOBBER \
++ && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
++ && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
++
++/* True if bit BIT is set in VALUE. */
++#define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
++
++/* Classifies an address.
++
++ ADDRESS_REG
++ A natural register + offset address. The register satisfies
++ riscv_valid_base_register_p and the offset is a const_arith_operand.
++
++ ADDRESS_LO_SUM
++ A LO_SUM rtx. The first operand is a valid base register and
++ the second operand is a symbolic address.
++
++ ADDRESS_CONST_INT
++ A signed 16-bit constant address.
++
++ ADDRESS_SYMBOLIC:
++ A constant symbolic address. */
++enum riscv_address_type {
++ ADDRESS_REG,
++ ADDRESS_LO_SUM,
++ ADDRESS_CONST_INT,
++ ADDRESS_SYMBOLIC
++};
++
++/* Macros to create an enumeration identifier for a function prototype. */
++#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
++#define RISCV_FTYPE_NAME2(A, B, C) RISCV_##A##_FTYPE_##B##_##C
++#define RISCV_FTYPE_NAME3(A, B, C, D) RISCV_##A##_FTYPE_##B##_##C##_##D
++#define RISCV_FTYPE_NAME4(A, B, C, D, E) RISCV_##A##_FTYPE_##B##_##C##_##D##_##E
++
++/* Classifies the prototype of a built-in function. */
++enum riscv_function_type {
++#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++ RISCV_MAX_FTYPE_MAX
++};
++
++/* Specifies how a built-in function should be converted into rtl. */
++enum riscv_builtin_type {
++ /* The function corresponds directly to an .md pattern. The return
++ value is mapped to operand 0 and the arguments are mapped to
++ operands 1 and above. */
++ RISCV_BUILTIN_DIRECT,
++
++ /* The function corresponds directly to an .md pattern. There is no return
++ value and the arguments are mapped to operands 0 and above. */
++ RISCV_BUILTIN_DIRECT_NO_TARGET
++};
++
++/* Information about a function's frame layout. */
++struct GTY(()) riscv_frame_info {
++ /* The size of the frame in bytes. */
++ HOST_WIDE_INT total_size;
++
++ /* Bit X is set if the function saves or restores GPR X. */
++ unsigned int mask;
++
++ /* Likewise FPR X. */
++ unsigned int fmask;
++
++ /* Offsets of fixed-point and floating-point save areas from frame bottom */
++ HOST_WIDE_INT gp_sp_offset;
++ HOST_WIDE_INT fp_sp_offset;
++
++ /* Offset of virtual frame pointer from stack pointer/frame bottom */
++ HOST_WIDE_INT frame_pointer_offset;
++
++ /* Offset of hard frame pointer from stack pointer/frame bottom */
++ HOST_WIDE_INT hard_frame_pointer_offset;
++
++ /* The offset of arg_pointer_rtx from the bottom of the frame. */
++ HOST_WIDE_INT arg_pointer_offset;
++};
++
++struct GTY(()) machine_function {
++ /* The number of extra stack bytes taken up by register varargs.
++ This area is allocated by the callee at the very top of the frame. */
++ int varargs_size;
++
++ /* The current frame information, calculated by riscv_compute_frame_info. */
++ struct riscv_frame_info frame;
++};
++
++/* Information about a single argument. */
++struct riscv_arg_info {
++ /* True if the argument is passed in a floating-point register, or
++ would have been if we hadn't run out of registers. */
++ bool fpr_p;
++
++ /* The number of words passed in registers, rounded up. */
++ unsigned int reg_words;
++
++ /* For EABI, the offset of the first register from GP_ARG_FIRST or
++ FP_ARG_FIRST. For other ABIs, the offset of the first register from
++ the start of the ABI's argument structure (see the CUMULATIVE_ARGS
++ comment for details).
++
++ The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
++ on the stack. */
++ unsigned int reg_offset;
++
++ /* The number of words that must be passed on the stack, rounded up. */
++ unsigned int stack_words;
++
++ /* The offset from the start of the stack overflow area of the argument's
++ first stack word. Only meaningful when STACK_WORDS is nonzero. */
++ unsigned int stack_offset;
++};
++
++/* Information about an address described by riscv_address_type.
++
++ ADDRESS_CONST_INT
++ No fields are used.
++
++ ADDRESS_REG
++ REG is the base register and OFFSET is the constant offset.
++
++ ADDRESS_LO_SUM
++ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
++ is the type of symbol it references.
++
++ ADDRESS_SYMBOLIC
++ SYMBOL_TYPE is the type of symbol that the address references. */
++struct riscv_address_info {
++ enum riscv_address_type type;
++ rtx reg;
++ rtx offset;
++ enum riscv_symbol_type symbol_type;
++};
++
++/* One stage in a constant building sequence. These sequences have
++ the form:
++
++ A = VALUE[0]
++ A = A CODE[1] VALUE[1]
++ A = A CODE[2] VALUE[2]
++ ...
++
++ where A is an accumulator, each CODE[i] is a binary rtl operation
++ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
++struct riscv_integer_op {
++ enum rtx_code code;
++ unsigned HOST_WIDE_INT value;
++};
++
++/* The largest number of operations needed to load an integer constant.
++ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI,
++ but we may attempt and reject even worse sequences. */
++#define RISCV_MAX_INTEGER_OPS 32
++
++/* Costs of various operations on the different architectures. */
++
++struct riscv_tune_info
++{
++ unsigned short fp_add[2];
++ unsigned short fp_mul[2];
++ unsigned short fp_div[2];
++ unsigned short int_mul[2];
++ unsigned short int_div[2];
++ unsigned short issue_rate;
++ unsigned short branch_cost;
++ unsigned short fp_to_int_cost;
++ unsigned short memory_cost;
++};
++
++/* Information about one CPU we know about. */
++struct riscv_cpu_info {
++ /* This CPU's canonical name. */
++ const char *name;
++
++ /* The RISC-V ISA and extensions supported by this CPU. */
++ const char *isa;
++
++ /* Tuning parameters for this CPU. */
++ const struct riscv_tune_info *tune_info;
++};
++
++/* Global variables for machine-dependent things. */
++
++/* Which tuning parameters to use. */
++static const struct riscv_tune_info *tune_info;
++
++/* Index [M][R] is true if register R is allowed to hold a value of mode M. */
++bool riscv_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
++
++/* riscv_lo_relocs[X] is the relocation to use when a symbol of type X
++ appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
++ if they are matched by a special .md file pattern. */
++const char *riscv_lo_relocs[NUM_SYMBOL_TYPES];
++
++/* Likewise for HIGHs. */
++const char *riscv_hi_relocs[NUM_SYMBOL_TYPES];
++
++/* Index R is the smallest register class that contains register R. */
++const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, T_REGS, T_REGS, T_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ T_REGS, T_REGS, T_REGS, T_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FRAME_REGS, FRAME_REGS,
++};
++
++/* Costs to use when optimizing for size. */
++static const struct riscv_tune_info rocket_tune_info = {
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
++ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
++ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
++ 1, /* issue_rate */
++ 3, /* branch_cost */
++ COSTS_N_INSNS (2), /* fp_to_int_cost */
++ 5 /* memory_cost */
++};
++
++/* Costs to use when optimizing for size. */
++static const struct riscv_tune_info optimize_size_tune_info = {
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
++ 1, /* issue_rate */
++ 1, /* branch_cost */
++ COSTS_N_INSNS (1), /* fp_to_int_cost */
++ 1 /* memory_cost */
++};
++
++/* A table describing all the processors GCC knows about. */
++static const struct riscv_cpu_info riscv_cpu_info_table[] = {
++ /* Entries for generic ISAs. */
++ { "rocket", "IMAFD", &rocket_tune_info },
++};
++
++/* Return the riscv_cpu_info entry for the given name string. */
++
++static const struct riscv_cpu_info *
++riscv_parse_cpu (const char *cpu_string)
++{
++ unsigned int i;
++
++ for (i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
++ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
++ return riscv_cpu_info_table + i;
++
++ error ("unknown cpu `%s'", cpu_string);
++ return riscv_cpu_info_table;
++}
++
++/* Fill CODES with a sequence of rtl operations to load VALUE.
++ Return the number of operations needed. */
++
++static int
++riscv_build_integer_1 (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++ enum machine_mode mode)
++{
++ HOST_WIDE_INT low_part = RISCV_CONST_LOW_PART (value);
++ int cost = INT_MAX, alt_cost;
++ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++
++ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
++ {
++ /* Simply ADDI or LUI */
++ codes[0].code = UNKNOWN;
++ codes[0].value = value;
++ return 1;
++ }
++
++ /* End with ADDI */
++ if (low_part != 0
++ && !(mode == HImode && (int16_t)(value - low_part) != (value - low_part)))
++ {
++ cost = 1 + riscv_build_integer_1 (codes, value - low_part, mode);
++ codes[cost-1].code = PLUS;
++ codes[cost-1].value = low_part;
++ }
++
++ /* End with XORI */
++ if (cost > 2 && (low_part < 0 || mode == HImode))
++ {
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
++ alt_codes[alt_cost-1].code = XOR;
++ alt_codes[alt_cost-1].value = low_part;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
++ }
++
++ /* Eliminate trailing zeros and end with SLLI */
++ if (cost > 2 && (value & 1) == 0)
++ {
++ int shift = 0;
++ while ((value & 1) == 0)
++ shift++, value >>= 1;
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value, mode);
++ alt_codes[alt_cost-1].code = ASHIFT;
++ alt_codes[alt_cost-1].value = shift;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof(alt_codes));
++ }
++
++ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
++ return cost;
++}
++
++static int
++riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++ enum machine_mode mode)
++{
++ int cost = riscv_build_integer_1 (codes, value, mode);
++
++ /* Eliminate leading zeros and end with SRLI */
++ if (value > 0 && cost > 2)
++ {
++ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++ int alt_cost, shift = 0;
++ HOST_WIDE_INT shifted_val;
++
++ /* Try filling trailing bits with 1s */
++ while ((value << shift) >= 0)
++ shift++;
++ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++ alt_codes[alt_cost-1].code = LSHIFTRT;
++ alt_codes[alt_cost-1].value = shift;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++
++ /* Try filling trailing bits with 0s */
++ shifted_val = value << shift;
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++ alt_codes[alt_cost-1].code = LSHIFTRT;
++ alt_codes[alt_cost-1].value = shift;
++ if (alt_cost < cost)
++ cost = alt_cost, memcpy (codes, alt_codes, sizeof (alt_codes));
++ }
++
++ return cost;
++}
++
++static int
++riscv_split_integer_cost (HOST_WIDE_INT val)
++{
++ int cost;
++ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++
++ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
++ if (loval != hival)
++ cost += riscv_build_integer (codes, hival, VOIDmode);
++
++ return cost;
++}
++
++static int
++riscv_integer_cost (HOST_WIDE_INT val)
++{
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++ return MIN (riscv_build_integer (codes, val, VOIDmode),
++ riscv_split_integer_cost (val));
++}
++
++/* Try to split a 64b integer into 32b parts, then reassemble. */
++
++static rtx
++riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
++{
++ int32_t loval = val, hival = (val - (int32_t)val) >> 32;
++ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
++
++ riscv_move_integer (hi, hi, hival);
++ riscv_move_integer (lo, lo, loval);
++
++ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
++ hi = force_reg (mode, hi);
++
++ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
++}
++
++/* Return true if X is a thread-local symbol. */
++
++static bool
++riscv_tls_symbol_p (const_rtx x)
++{
++ return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
++}
++
++static bool
++riscv_symbol_binds_local_p (const_rtx x)
++{
++ return (SYMBOL_REF_DECL (x)
++ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
++ : SYMBOL_REF_LOCAL_P (x));
++}
++
++/* Return the method that should be used to access SYMBOL_REF or
++ LABEL_REF X in context CONTEXT. */
++
++static enum riscv_symbol_type
++riscv_classify_symbol (const_rtx x)
++{
++ if (riscv_tls_symbol_p (x))
++ return SYMBOL_TLS;
++
++ if (GET_CODE (x) == LABEL_REF)
++ {
++ if (LABEL_REF_NONLOCAL_P (x))
++ return SYMBOL_GOT_DISP;
++ return SYMBOL_ABSOLUTE;
++ }
++
++ gcc_assert (GET_CODE (x) == SYMBOL_REF);
++
++ if (flag_pic && !riscv_symbol_binds_local_p (x))
++ return SYMBOL_GOT_DISP;
++
++ return SYMBOL_ABSOLUTE;
++}
++
++/* Classify the base of symbolic expression X, given that X appears in
++ context CONTEXT. */
++
++static enum riscv_symbol_type
++riscv_classify_symbolic_expression (rtx x)
++{
++ rtx offset;
++
++ split_const (x, &x, &offset);
++ if (UNSPEC_ADDRESS_P (x))
++ return UNSPEC_ADDRESS_TYPE (x);
++
++ return riscv_classify_symbol (x);
++}
++
++/* Return true if X is a symbolic constant that can be used in context
++ CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
++
++bool
++riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
++{
++ rtx offset;
++
++ split_const (x, &x, &offset);
++ if (UNSPEC_ADDRESS_P (x))
++ {
++ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
++ x = UNSPEC_ADDRESS (x);
++ }
++ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
++ *symbol_type = riscv_classify_symbol (x);
++ else
++ return false;
++
++ if (offset == const0_rtx)
++ return true;
++
++ /* Check whether a nonzero offset is valid for the underlying
++ relocations. */
++ switch (*symbol_type)
++ {
++ case SYMBOL_ABSOLUTE:
++ case SYMBOL_TLS_LE:
++ return (int32_t) INTVAL (offset) == INTVAL (offset);
++
++ default:
++ return false;
++ }
++ gcc_unreachable ();
++}
++
++/* Returns the number of instructions necessary to reference a symbol. */
++
++static int riscv_symbol_insns (enum riscv_symbol_type type)
++{
++ switch (type)
++ {
++ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
++ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference itself */
++ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference itself */
++ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference itself */
++ default: gcc_unreachable();
++ }
++}
++
++/* A for_each_rtx callback. Stop the search if *X references a
++ thread-local symbol. */
++
++static int
++riscv_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
++{
++ return riscv_tls_symbol_p (*x);
++}
++
++/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
++
++static bool
++riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ return riscv_const_insns (x) > 0;
++}
++
++/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
++
++static bool
++riscv_cannot_force_const_mem (enum machine_mode mode, rtx x)
++{
++ enum riscv_symbol_type type;
++ rtx base, offset;
++
++ /* There is no assembler syntax for expressing an address-sized
++ high part. */
++ if (GET_CODE (x) == HIGH)
++ return true;
++
++ /* As an optimization, reject constants that riscv_legitimize_move
++ can expand inline.
++
++ Suppose we have a multi-instruction sequence that loads constant C
++ into register R. If R does not get allocated a hard register, and
++ R is used in an operand that allows both registers and memory
++ references, reload will consider forcing C into memory and using
++ one of the instruction's memory alternatives. Returning false
++ here will force it to use an input reload instead. */
++ if (CONST_INT_P (x) && riscv_legitimate_constant_p (mode, x))
++ return true;
++
++ split_const (x, &base, &offset);
++ if (riscv_symbolic_constant_p (base, &type))
++ {
++ /* The same optimization as for CONST_INT. */
++ if (SMALL_INT (offset) && riscv_symbol_insns (type) > 0)
++ return true;
++
++ /* It's not worth creating additional dynamic relocations. */
++ if (flag_pic)
++ return true;
++ }
++
++ /* TLS symbols must be computed by riscv_legitimize_move. */
++ if (for_each_rtx (&x, &riscv_tls_symbol_ref_1, NULL))
++ return true;
++
++ return false;
++}
++
++/* Return true if register REGNO is a valid base register for mode MODE.
++ STRICT_P is true if REG_OK_STRICT is in effect. */
++
++int
++riscv_regno_mode_ok_for_base_p (int regno, enum machine_mode mode ATTRIBUTE_UNUSED,
++ bool strict_p)
++{
++ if (!HARD_REGISTER_NUM_P (regno))
++ {
++ if (!strict_p)
++ return true;
++ regno = reg_renumber[regno];
++ }
++
++ /* These fake registers will be eliminated to either the stack or
++ hard frame pointer, both of which are usually valid base registers.
++ Reload deals with the cases where the eliminated form isn't valid. */
++ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
++ return true;
++
++ return GP_REG_P (regno);
++}
++
++/* Return true if X is a valid base register for mode MODE.
++ STRICT_P is true if REG_OK_STRICT is in effect. */
++
++static bool
++riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
++{
++ if (!strict_p && GET_CODE (x) == SUBREG)
++ x = SUBREG_REG (x);
++
++ return (REG_P (x)
++ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
++}
++
++/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
++ can address a value of mode MODE. */
++
++static bool
++riscv_valid_offset_p (rtx x, enum machine_mode mode)
++{
++ /* Check that X is a signed 12-bit number. */
++ if (!const_arith_operand (x, Pmode))
++ return false;
++
++ /* We may need to split multiword moves, so make sure that every word
++ is accessible. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
++ return false;
++
++ return true;
++}
++
++/* Return true if a LO_SUM can address a value of mode MODE when the
++ LO_SUM symbol has type SYMBOL_TYPE. */
++
++static bool
++riscv_valid_lo_sum_p (enum riscv_symbol_type symbol_type, enum machine_mode mode)
++{
++ /* Check that symbols of type SYMBOL_TYPE can be used to access values
++ of mode MODE. */
++ if (riscv_symbol_insns (symbol_type) == 0)
++ return false;
++
++ /* Check that there is a known low-part relocation. */
++ if (riscv_lo_relocs[symbol_type] == NULL)
++ return false;
++
++ /* We may need to split multiword moves, so make sure that each word
++ can be accessed without inducing a carry. This is mainly needed
++ for o64, which has historically only guaranteed 64-bit alignment
++ for 128-bit types. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
++ return false;
++
++ return true;
++}
++
++/* Return true if X is a valid address for machine mode MODE. If it is,
++ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
++ effect. */
++
++static bool
++riscv_classify_address (struct riscv_address_info *info, rtx x,
++ enum machine_mode mode, bool strict_p)
++{
++ switch (GET_CODE (x))
++ {
++ case REG:
++ case SUBREG:
++ info->type = ADDRESS_REG;
++ info->reg = x;
++ info->offset = const0_rtx;
++ return riscv_valid_base_register_p (info->reg, mode, strict_p);
++
++ case PLUS:
++ info->type = ADDRESS_REG;
++ info->reg = XEXP (x, 0);
++ info->offset = XEXP (x, 1);
++ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++ && riscv_valid_offset_p (info->offset, mode));
++
++ case LO_SUM:
++ info->type = ADDRESS_LO_SUM;
++ info->reg = XEXP (x, 0);
++ info->offset = XEXP (x, 1);
++ /* We have to trust the creator of the LO_SUM to do something vaguely
++ sane. Target-independent code that creates a LO_SUM should also
++ create and verify the matching HIGH. Target-independent code that
++ adds an offset to a LO_SUM must prove that the offset will not
++ induce a carry. Failure to do either of these things would be
++ a bug, and we are not required to check for it here. The RISCV
++ backend itself should only create LO_SUMs for valid symbolic
++ constants, with the high part being either a HIGH or a copy
++ of _gp. */
++ info->symbol_type
++ = riscv_classify_symbolic_expression (info->offset);
++ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++ && riscv_valid_lo_sum_p (info->symbol_type, mode));
++
++ case CONST_INT:
++ /* Small-integer addresses don't occur very often, but they
++ are legitimate if $0 is a valid base register. */
++ info->type = ADDRESS_CONST_INT;
++ return SMALL_INT (x);
++
++ default:
++ return false;
++ }
++}
++
++/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
++
++static bool
++riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
++{
++ struct riscv_address_info addr;
++
++ return riscv_classify_address (&addr, x, mode, strict_p);
++}
++
++/* Return the number of instructions needed to load or store a value
++ of mode MODE at address X. Return 0 if X isn't valid for MODE.
++ Assume that multiword moves may need to be split into word moves
++ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
++ enough. */
++
++int
++riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
++{
++ struct riscv_address_info addr;
++ int n = 1;
++
++ if (!riscv_classify_address (&addr, x, mode, false))
++ return 0;
++
++ /* BLKmode is used for single unaligned loads and stores and should
++ not count as a multiword mode. */
++ if (mode != BLKmode && might_split_p)
++ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++ if (addr.type == ADDRESS_LO_SUM)
++ n += riscv_symbol_insns (addr.symbol_type) - 1;
++
++ return n;
++}
++
++/* Return the number of instructions needed to load constant X.
++ Return 0 if X isn't a valid constant. */
++
++int
++riscv_const_insns (rtx x)
++{
++ enum riscv_symbol_type symbol_type;
++ rtx offset;
++
++ switch (GET_CODE (x))
++ {
++ case HIGH:
++ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
++ || !riscv_hi_relocs[symbol_type])
++ return 0;
++
++ /* This is simply an LUI. */
++ return 1;
++
++ case CONST_INT:
++ {
++ int cost = riscv_integer_cost (INTVAL (x));
++ /* Force complicated constants to memory. */
++ return cost < 4 ? cost : 0;
++ }
++
++ case CONST_DOUBLE:
++ case CONST_VECTOR:
++ /* Allow zeros for normal mode, where we can use x0. */
++ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
++
++ case CONST:
++ /* See if we can refer to X directly. */
++ if (riscv_symbolic_constant_p (x, &symbol_type))
++ return riscv_symbol_insns (symbol_type);
++
++ /* Otherwise try splitting the constant into a base and offset.
++ If the offset is a 16-bit value, we can load the base address
++ into a register and then use (D)ADDIU to add in the offset.
++ If the offset is larger, we can load the base and offset
++ into separate registers and add them together with (D)ADDU.
++ However, the latter is only possible before reload; during
++ and after reload, we must have the option of forcing the
++ constant into the pool instead. */
++ split_const (x, &x, &offset);
++ if (offset != 0)
++ {
++ int n = riscv_const_insns (x);
++ if (n != 0)
++ {
++ if (SMALL_INT (offset))
++ return n + 1;
++ else if (!targetm.cannot_force_const_mem (GET_MODE (x), x))
++ return n + 1 + riscv_integer_cost (INTVAL (offset));
++ }
++ }
++ return 0;
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return riscv_symbol_insns (riscv_classify_symbol (x));
++
++ default:
++ return 0;
++ }
++}
++
++/* X is a doubleword constant that can be handled by splitting it into
++ two words and loading each word separately. Return the number of
++ instructions required to do this. */
++
++int
++riscv_split_const_insns (rtx x)
++{
++ unsigned int low, high;
++
++ low = riscv_const_insns (riscv_subword (x, false));
++ high = riscv_const_insns (riscv_subword (x, true));
++ gcc_assert (low > 0 && high > 0);
++ return low + high;
++}
++
++/* Return the number of instructions needed to implement INSN,
++ given that it loads from or stores to MEM. */
++
++int
++riscv_load_store_insns (rtx mem, rtx insn)
++{
++ enum machine_mode mode;
++ bool might_split_p;
++ rtx set;
++
++ gcc_assert (MEM_P (mem));
++ mode = GET_MODE (mem);
++
++ /* Try to prove that INSN does not need to be split. */
++ might_split_p = true;
++ if (GET_MODE_BITSIZE (mode) == 64)
++ {
++ set = single_set (insn);
++ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
++ might_split_p = false;
++ }
++
++ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
++}
++
++/* Emit a move from SRC to DEST. Assume that the move expanders can
++ handle all moves if !can_create_pseudo_p (). The distinction is
++ important because, unlike emit_move_insn, the move expanders know
++ how to force Pmode objects into the constant pool even when the
++ constant pool address is not itself legitimate. */
++
++rtx
++riscv_emit_move (rtx dest, rtx src)
++{
++ return (can_create_pseudo_p ()
++ ? emit_move_insn (dest, src)
++ : emit_move_insn_1 (dest, src));
++}
++
++/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
++
++static void
++riscv_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
++{
++ emit_insn (gen_rtx_SET (VOIDmode, target,
++ gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
++}
++
++/* Compute (CODE OP0 OP1) and store the result in a new register
++ of mode MODE. Return that new register. */
++
++static rtx
++riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx op0, rtx op1)
++{
++ rtx reg;
++
++ reg = gen_reg_rtx (mode);
++ riscv_emit_binary (code, reg, op0, op1);
++ return reg;
++}
++
++/* Copy VALUE to a register and return that register. If new pseudos
++ are allowed, copy it into a new register, otherwise use DEST. */
++
++static rtx
++riscv_force_temporary (rtx dest, rtx value)
++{
++ if (can_create_pseudo_p ())
++ return force_reg (Pmode, value);
++ else
++ {
++ riscv_emit_move (dest, value);
++ return dest;
++ }
++}
++
++/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
++ then add CONST_INT OFFSET to the result. */
++
++static rtx
++riscv_unspec_address_offset (rtx base, rtx offset,
++ enum riscv_symbol_type symbol_type)
++{
++ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
++ UNSPEC_ADDRESS_FIRST + symbol_type);
++ if (offset != const0_rtx)
++ base = gen_rtx_PLUS (Pmode, base, offset);
++ return gen_rtx_CONST (Pmode, base);
++}
++
++/* Return an UNSPEC address with underlying address ADDRESS and symbol
++ type SYMBOL_TYPE. */
++
++rtx
++riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
++{
++ rtx base, offset;
++
++ split_const (address, &base, &offset);
++ return riscv_unspec_address_offset (base, offset, symbol_type);
++}
++
++/* If OP is an UNSPEC address, return the address to which it refers,
++ otherwise return OP itself. */
++
++static rtx
++riscv_strip_unspec_address (rtx op)
++{
++ rtx base, offset;
++
++ split_const (op, &base, &offset);
++ if (UNSPEC_ADDRESS_P (base))
++ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
++ return op;
++}
++
++/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
++ high part to BASE and return the result. Just return BASE otherwise.
++ TEMP is as for riscv_force_temporary.
++
++ The returned expression can be used as the first operand to a LO_SUM. */
++
++static rtx
++riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
++{
++ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
++ return riscv_force_temporary (temp, addr);
++}
++
++/* Load an entry from the GOT. */
++static rtx riscv_got_load_tls_gd(rtx dest, rtx sym)
++{
++ return (Pmode == DImode ? gen_got_load_tls_gddi(dest, sym) : gen_got_load_tls_gdsi(dest, sym));
++}
++
++static rtx riscv_got_load_tls_ie(rtx dest, rtx sym)
++{
++ return (Pmode == DImode ? gen_got_load_tls_iedi(dest, sym) : gen_got_load_tls_iesi(dest, sym));
++}
++
++static rtx riscv_tls_add_tp_le(rtx dest, rtx base, rtx sym)
++{
++ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++ return (Pmode == DImode ? gen_tls_add_tp_ledi(dest, base, tp, sym) : gen_tls_add_tp_lesi(dest, base, tp, sym));
++}
++
++/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
++ it appears in a MEM of that mode. Return true if ADDR is a legitimate
++ constant in that context and can be split into high and low parts.
++ If so, and if LOW_OUT is nonnull, emit the high part and store the
++ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
++
++ TEMP is as for riscv_force_temporary and is used to load the high
++ part into a register.
++
++ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
++ a legitimize SET_SRC for an .md pattern, otherwise the low part
++ is guaranteed to be a legitimate address for mode MODE. */
++
++bool
++riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
++{
++ enum riscv_symbol_type symbol_type;
++ rtx high;
++
++ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
++ || !riscv_symbolic_constant_p (addr, &symbol_type)
++ || riscv_symbol_insns (symbol_type) == 0
++ || !riscv_hi_relocs[symbol_type])
++ return false;
++
++ if (low_out)
++ {
++ switch (symbol_type)
++ {
++ case SYMBOL_ABSOLUTE:
++ high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
++ high = riscv_force_temporary (temp, high);
++ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ }
++
++ return true;
++}
++
++/* Return a legitimate address for REG + OFFSET. TEMP is as for
++ riscv_force_temporary; it is only needed when OFFSET is not a
++ SMALL_OPERAND. */
++
++static rtx
++riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
++{
++ if (!SMALL_OPERAND (offset))
++ {
++ rtx high;
++
++ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
++ The addition inside the macro CONST_HIGH_PART may cause an
++ overflow, so we need to force a sign-extension check. */
++ high = gen_int_mode (RISCV_CONST_HIGH_PART (offset), Pmode);
++ offset = RISCV_CONST_LOW_PART (offset);
++ high = riscv_force_temporary (temp, high);
++ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
++ }
++ return plus_constant (Pmode, reg, offset);
++}
++
++/* The __tls_get_attr symbol. */
++static GTY(()) rtx riscv_tls_symbol;
++
++/* Return an instruction sequence that calls __tls_get_addr. SYM is
++ the TLS symbol we are referencing and TYPE is the symbol type to use
++ (either global dynamic or local dynamic). RESULT is an RTX for the
++ return value location. */
++
++static rtx
++riscv_call_tls_get_addr (rtx sym, rtx result)
++{
++ rtx insn, a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++ if (!riscv_tls_symbol)
++ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
++
++ start_sequence ();
++
++ emit_insn (riscv_got_load_tls_gd (a0, sym));
++ insn = riscv_expand_call (false, result, riscv_tls_symbol, const0_rtx);
++ RTL_CONST_CALL_P (insn) = 1;
++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
++ insn = get_insns ();
++
++ end_sequence ();
++
++ return insn;
++}
++
++/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
++ its address. The return value will be both a valid address and a valid
++ SET_SRC (either a REG or a LO_SUM). */
++
++static rtx
++riscv_legitimize_tls_address (rtx loc)
++{
++ rtx dest, insn, tp, tmp1;
++ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
++
++ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
++ if (!flag_pic)
++ model = TLS_MODEL_LOCAL_EXEC;
++
++ switch (model)
++ {
++ case TLS_MODEL_LOCAL_DYNAMIC:
++ /* Rely on section anchors for the optimization that LDM TLS
++ provides. The anchor's address is loaded with GD TLS. */
++ case TLS_MODEL_GLOBAL_DYNAMIC:
++ tmp1 = gen_rtx_REG (Pmode, GP_RETURN);
++ insn = riscv_call_tls_get_addr (loc, tmp1);
++ dest = gen_reg_rtx (Pmode);
++ emit_libcall_block (insn, dest, tmp1, loc);
++ break;
++
++ case TLS_MODEL_INITIAL_EXEC:
++ /* la.tls.ie; tp-relative add */
++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++ tmp1 = gen_reg_rtx (Pmode);
++ emit_insn (riscv_got_load_tls_ie (tmp1, loc));
++ dest = gen_reg_rtx (Pmode);
++ emit_insn (gen_add3_insn (dest, tmp1, tp));
++ break;
++
++ case TLS_MODEL_LOCAL_EXEC:
++ tmp1 = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
++ dest = gen_reg_rtx (Pmode);
++ emit_insn (riscv_tls_add_tp_le (dest, tmp1, loc));
++ dest = gen_rtx_LO_SUM (Pmode, dest,
++ riscv_unspec_address (loc, SYMBOL_TLS_LE));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ return dest;
++}
++
++/* If X is not a valid address for mode MODE, force it into a register. */
++
++static rtx
++riscv_force_address (rtx x, enum machine_mode mode)
++{
++ if (!riscv_legitimate_address_p (mode, x, false))
++ x = force_reg (Pmode, x);
++ return x;
++}
++
++/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
++ be legitimized in a way that the generic machinery might not expect,
++ return a new address, otherwise return NULL. MODE is the mode of
++ the memory being accessed. */
++
++static rtx
++riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
++ enum machine_mode mode)
++{
++ rtx addr;
++
++ if (riscv_tls_symbol_p (x))
++ return riscv_legitimize_tls_address (x);
++
++ /* See if the address can split into a high part and a LO_SUM. */
++ if (riscv_split_symbol (NULL, x, mode, &addr))
++ return riscv_force_address (addr, mode);
++
++ /* Handle BASE + OFFSET using riscv_add_offset. */
++ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
++ && INTVAL (XEXP (x, 1)) != 0)
++ {
++ rtx base = XEXP (x, 0);
++ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
++
++ if (!riscv_valid_base_register_p (base, mode, false))
++ base = copy_to_mode_reg (Pmode, base);
++ addr = riscv_add_offset (NULL, base, offset);
++ return riscv_force_address (addr, mode);
++ }
++
++ return x;
++}
++
++/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
++
++void
++riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
++{
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++ enum machine_mode mode;
++ int i, num_ops;
++ rtx x;
++
++ mode = GET_MODE (dest);
++ num_ops = riscv_build_integer (codes, value, mode);
++
++ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
++ && num_ops >= riscv_split_integer_cost (value))
++ x = riscv_split_integer (value, mode);
++ else
++ {
++ /* Apply each binary operation to X. */
++ x = GEN_INT (codes[0].value);
++
++ for (i = 1; i < num_ops; i++)
++ {
++ if (!can_create_pseudo_p ())
++ {
++ emit_insn (gen_rtx_SET (VOIDmode, temp, x));
++ x = temp;
++ }
++ else
++ x = force_reg (mode, x);
++
++ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
++ }
++ }
++
++ emit_insn (gen_rtx_SET (VOIDmode, dest, x));
++}
++
++/* Subroutine of riscv_legitimize_move. Move constant SRC into register
++ DEST given that SRC satisfies immediate_operand but doesn't satisfy
++ move_operand. */
++
++static void
++riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
++{
++ rtx base, offset;
++
++ /* Split moves of big integers into smaller pieces. */
++ if (splittable_const_int_operand (src, mode))
++ {
++ riscv_move_integer (dest, dest, INTVAL (src));
++ return;
++ }
++
++ /* Split moves of symbolic constants into high/low pairs. */
++ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
++ {
++ emit_insn (gen_rtx_SET (VOIDmode, dest, src));
++ return;
++ }
++
++ /* Generate the appropriate access sequences for TLS symbols. */
++ if (riscv_tls_symbol_p (src))
++ {
++ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
++ return;
++ }
++
++ /* If we have (const (plus symbol offset)), and that expression cannot
++ be forced into memory, load the symbol first and add in the offset. Also
++ prefer to do this even if the constant _can_ be forced into memory, as it
++ usually produces better code. */
++ split_const (src, &base, &offset);
++ if (offset != const0_rtx
++ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
++ {
++ base = riscv_force_temporary (dest, base);
++ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
++ return;
++ }
++
++ src = force_const_mem (mode, src);
++
++ /* When using explicit relocs, constant pool references are sometimes
++ not legitimate addresses. */
++ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
++ riscv_emit_move (dest, src);
++}
++
++/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
++ sequence that is valid. */
++
++bool
++riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
++{
++ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
++ {
++ riscv_emit_move (dest, force_reg (mode, src));
++ return true;
++ }
++
++ /* We need to deal with constants that would be legitimate
++ immediate_operands but aren't legitimate move_operands. */
++ if (CONSTANT_P (src) && !move_operand (src, mode))
++ {
++ riscv_legitimize_const_move (mode, dest, src);
++ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
++ return true;
++ }
++ return false;
++}
++
++/* Return true if there is an instruction that implements CODE and accepts
++ X as an immediate operand. */
++
++static int
++riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
++{
++ switch (code)
++ {
++ case ASHIFT:
++ case ASHIFTRT:
++ case LSHIFTRT:
++ /* All shift counts are truncated to a valid constant. */
++ return true;
++
++ case AND:
++ case IOR:
++ case XOR:
++ case PLUS:
++ case LT:
++ case LTU:
++ /* These instructions take 12-bit signed immediates. */
++ return SMALL_OPERAND (x);
++
++ case LE:
++ /* We add 1 to the immediate and use SLT. */
++ return SMALL_OPERAND (x + 1);
++
++ case LEU:
++ /* Likewise SLTU, but reject the always-true case. */
++ return SMALL_OPERAND (x + 1) && x + 1 != 0;
++
++ case GE:
++ case GEU:
++ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
++ return x == 1;
++
++ default:
++ /* By default assume that x0 can be used for 0. */
++ return x == 0;
++ }
++}
++
++/* Return the cost of binary operation X, given that the instruction
++ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
++ instructions and that the sequence of a double-word operation takes
++ DOUBLE_INSNS instructions. */
++
++static int
++riscv_binary_cost (rtx x, int single_insns, int double_insns)
++{
++ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
++ return COSTS_N_INSNS (double_insns);
++ return COSTS_N_INSNS (single_insns);
++}
++
++/* Return the cost of sign-extending OP to mode MODE, not including the
++ cost of OP itself. */
++
++static int
++riscv_sign_extend_cost (enum machine_mode mode, rtx op)
++{
++ if (MEM_P (op))
++ /* Extended loads are as cheap as unextended ones. */
++ return 0;
++
++ if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
++ /* A sign extension from SImode to DImode in 64-bit mode is free. */
++ return 0;
++
++ /* We need to use a shift left and a shift right. */
++ return COSTS_N_INSNS (2);
++}
++
++/* Return the cost of zero-extending OP to mode MODE, not including the
++ cost of OP itself. */
++
++static int
++riscv_zero_extend_cost (enum machine_mode mode, rtx op)
++{
++ if (MEM_P (op))
++ /* Extended loads are as cheap as unextended ones. */
++ return 0;
++
++ if ((TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode) ||
++ ((mode == DImode || mode == SImode) && GET_MODE (op) == HImode))
++ /* We need a shift left by 32 bits and a shift right by 32 bits. */
++ return COSTS_N_INSNS (2);
++
++ /* We can use ANDI. */
++ return COSTS_N_INSNS (1);
++}
++
++/* Implement TARGET_RTX_COSTS. */
++
++static bool
++riscv_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
++ int *total, bool speed)
++{
++ enum machine_mode mode = GET_MODE (x);
++ bool float_mode_p = FLOAT_MODE_P (mode);
++ int cost;
++
++ switch (code)
++ {
++ case CONST_INT:
++ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
++ {
++ *total = 0;
++ return true;
++ }
++ /* Fall through. */
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ case CONST_DOUBLE:
++ case CONST:
++ if (speed)
++ *total = 1;
++ else if ((cost = riscv_const_insns (x)) > 0)
++ *total = COSTS_N_INSNS (cost);
++ else /* The instruction will be fetched from the constant pool. */
++ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
++ return true;
++
++ case MEM:
++ /* If the address is legitimate, return the number of
++ instructions it needs. */
++ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
++ {
++ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
++ return true;
++ }
++ /* Otherwise use the default handling. */
++ return false;
++
++ case NOT:
++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
++ return false;
++
++ case AND:
++ case IOR:
++ case XOR:
++ /* Double-word operations use two single-word operations. */
++ *total = riscv_binary_cost (x, 1, 2);
++ return false;
++
++ case ASHIFT:
++ case ASHIFTRT:
++ case LSHIFTRT:
++ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
++ return false;
++
++ case ABS:
++ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
++ return false;
++
++ case LO_SUM:
++ *total = set_src_cost (XEXP (x, 0), speed);
++ return true;
++
++ case LT:
++ case LTU:
++ case LE:
++ case LEU:
++ case GT:
++ case GTU:
++ case GE:
++ case GEU:
++ case EQ:
++ case NE:
++ case UNORDERED:
++ case LTGT:
++ /* Branch comparisons have VOIDmode, so use the first operand's
++ mode instead. */
++ mode = GET_MODE (XEXP (x, 0));
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = riscv_binary_cost (x, 1, 3);
++ return false;
++
++ case MINUS:
++ if (float_mode_p
++ && !HONOR_NANS (mode)
++ && !HONOR_SIGNED_ZEROS (mode))
++ {
++ /* See if we can use NMADD or NMSUB. See riscv.md for the
++ associated patterns. */
++ rtx op0 = XEXP (x, 0);
++ rtx op1 = XEXP (x, 1);
++ if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (XEXP (XEXP (op0, 0), 0), speed)
++ + set_src_cost (XEXP (op0, 1), speed)
++ + set_src_cost (op1, speed));
++ return true;
++ }
++ if (GET_CODE (op1) == MULT)
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (op0, speed)
++ + set_src_cost (XEXP (op1, 0), speed)
++ + set_src_cost (XEXP (op1, 1), speed));
++ return true;
++ }
++ }
++ /* Fall through. */
++
++ case PLUS:
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = riscv_binary_cost (x, 1, 4);
++ return false;
++
++ case NEG:
++ if (float_mode_p
++ && !HONOR_NANS (mode)
++ && HONOR_SIGNED_ZEROS (mode))
++ {
++ /* See if we can use NMADD or NMSUB. See riscv.md for the
++ associated patterns. */
++ rtx op = XEXP (x, 0);
++ if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
++ && GET_CODE (XEXP (op, 0)) == MULT)
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (XEXP (XEXP (op, 0), 0), speed)
++ + set_src_cost (XEXP (XEXP (op, 0), 1), speed)
++ + set_src_cost (XEXP (op, 1), speed));
++ return true;
++ }
++ }
++
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
++ return false;
++
++ case MULT:
++ if (float_mode_p)
++ *total = tune_info->fp_mul[mode == DFmode];
++ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
++ else if (!speed)
++ *total = COSTS_N_INSNS (1);
++ else
++ *total = tune_info->int_mul[mode == DImode];
++ return false;
++
++ case DIV:
++ case SQRT:
++ case MOD:
++ if (float_mode_p)
++ {
++ *total = tune_info->fp_div[mode == DFmode];
++ return false;
++ }
++ /* Fall through. */
++
++ case UDIV:
++ case UMOD:
++ if (speed)
++ *total = tune_info->int_div[mode == DImode];
++ else
++ *total = COSTS_N_INSNS (1);
++ return false;
++
++ case SIGN_EXTEND:
++ *total = riscv_sign_extend_cost (mode, XEXP (x, 0));
++ return false;
++
++ case ZERO_EXTEND:
++ *total = riscv_zero_extend_cost (mode, XEXP (x, 0));
++ return false;
++
++ case FLOAT:
++ case UNSIGNED_FLOAT:
++ case FIX:
++ case FLOAT_EXTEND:
++ case FLOAT_TRUNCATE:
++ *total = tune_info->fp_add[mode == DFmode];
++ return false;
++
++ default:
++ return false;
++ }
++}
++
++/* Implement TARGET_ADDRESS_COST. */
++
++static int
++riscv_address_cost (rtx addr, enum machine_mode mode,
++ addr_space_t as ATTRIBUTE_UNUSED,
++ bool speed ATTRIBUTE_UNUSED)
++{
++ return riscv_address_insns (addr, mode, false);
++}
++
++/* Return one word of double-word value OP. HIGH_P is true to select the
++ high part or false to select the low part. */
++
++rtx
++riscv_subword (rtx op, bool high_p)
++{
++ unsigned int byte;
++ enum machine_mode mode;
++
++ mode = GET_MODE (op);
++ if (mode == VOIDmode)
++ mode = TARGET_64BIT ? TImode : DImode;
++
++ byte = high_p ? UNITS_PER_WORD : 0;
++
++ if (FP_REG_RTX_P (op))
++ return gen_rtx_REG (word_mode, REGNO (op) + high_p);
++
++ if (MEM_P (op))
++ return adjust_address (op, word_mode, byte);
++
++ return simplify_gen_subreg (word_mode, op, mode, byte);
++}
++
++/* Return true if a 64-bit move from SRC to DEST should be split into two. */
++
++bool
++riscv_split_64bit_move_p (rtx dest, rtx src)
++{
++ /* All 64b moves are legal in 64b mode. All 64b FPR <-> FPR and
++ FPR <-> MEM moves are legal in 32b mode, too. Although
++ FPR <-> GPR moves are not available in general in 32b mode,
++ we can at least load 0 into an FPR with fcvt.d.w fpr, x0. */
++ return !(TARGET_64BIT
++ || (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
++ || (FP_REG_RTX_P (dest) && MEM_P (src))
++ || (FP_REG_RTX_P (src) && MEM_P (dest))
++ || (FP_REG_RTX_P(dest) && src == CONST0_RTX(GET_MODE(src))));
++}
++
++/* Split a doubleword move from SRC to DEST. On 32-bit targets,
++ this function handles 64-bit moves for which riscv_split_64bit_move_p
++ holds. For 64-bit targets, this function handles 128-bit moves. */
++
++void
++riscv_split_doubleword_move (rtx dest, rtx src)
++{
++ rtx low_dest;
++
++ /* The operation can be split into two normal moves. Decide in
++ which order to do them. */
++ low_dest = riscv_subword (dest, false);
++ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
++ {
++ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++ riscv_emit_move (low_dest, riscv_subword (src, false));
++ }
++ else
++ {
++ riscv_emit_move (low_dest, riscv_subword (src, false));
++ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++ }
++}
++
++/* Return the appropriate instructions to move SRC into DEST. Assume
++ that SRC is operand 1 and DEST is operand 0. */
++
++const char *
++riscv_output_move (rtx dest, rtx src)
++{
++ enum rtx_code dest_code, src_code;
++ enum machine_mode mode;
++ bool dbl_p;
++
++ dest_code = GET_CODE (dest);
++ src_code = GET_CODE (src);
++ mode = GET_MODE (dest);
++ dbl_p = (GET_MODE_SIZE (mode) == 8);
++
++ if (dbl_p && riscv_split_64bit_move_p (dest, src))
++ return "#";
++
++ if ((src_code == REG && GP_REG_P (REGNO (src)))
++ || (src == CONST0_RTX (mode)))
++ {
++ if (dest_code == REG)
++ {
++ if (GP_REG_P (REGNO (dest)))
++ return "move\t%0,%z1";
++
++ if (FP_REG_P (REGNO (dest)))
++ {
++ if (!dbl_p)
++ return "fmv.s.x\t%0,%z1";
++ if (TARGET_64BIT)
++ return "fmv.d.x\t%0,%z1";
++ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
++ gcc_assert (src == CONST0_RTX (mode));
++ return "fcvt.d.w\t%0,x0";
++ }
++ }
++ if (dest_code == MEM)
++ switch (GET_MODE_SIZE (mode))
++ {
++ case 1: return "sb\t%z1,%0";
++ case 2: return "sh\t%z1,%0";
++ case 4: return "sw\t%z1,%0";
++ case 8: return "sd\t%z1,%0";
++ }
++ }
++ if (dest_code == REG && GP_REG_P (REGNO (dest)))
++ {
++ if (src_code == REG)
++ {
++ if (FP_REG_P (REGNO (src)))
++ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
++ }
++
++ if (src_code == MEM)
++ switch (GET_MODE_SIZE (mode))
++ {
++ case 1: return "lbu\t%0,%1";
++ case 2: return "lhu\t%0,%1";
++ case 4: return "lw\t%0,%1";
++ case 8: return "ld\t%0,%1";
++ }
++
++ if (src_code == CONST_INT)
++ return "li\t%0,%1";
++
++ if (src_code == HIGH)
++ return "lui\t%0,%h1";
++
++ if (symbolic_operand (src, VOIDmode))
++ switch (riscv_classify_symbolic_expression (src))
++ {
++ case SYMBOL_GOT_DISP: return "la\t%0,%1";
++ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
++ default: gcc_unreachable();
++ }
++ }
++ if (src_code == REG && FP_REG_P (REGNO (src)))
++ {
++ if (dest_code == REG && FP_REG_P (REGNO (dest)))
++ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
++
++ if (dest_code == MEM)
++ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
++ }
++ if (dest_code == REG && FP_REG_P (REGNO (dest)))
++ {
++ if (src_code == MEM)
++ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
++ }
++ gcc_unreachable ();
++}
++
++/* Return true if CMP1 is a suitable second operand for integer ordering
++ test CODE. See also the *sCC patterns in riscv.md. */
++
++static bool
++riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
++{
++ switch (code)
++ {
++ case GT:
++ case GTU:
++ return reg_or_0_operand (cmp1, VOIDmode);
++
++ case GE:
++ case GEU:
++ return cmp1 == const1_rtx;
++
++ case LT:
++ case LTU:
++ return arith_operand (cmp1, VOIDmode);
++
++ case LE:
++ return sle_operand (cmp1, VOIDmode);
++
++ case LEU:
++ return sleu_operand (cmp1, VOIDmode);
++
++ default:
++ gcc_unreachable ();
++ }
++}
++
++/* Return true if *CMP1 (of mode MODE) is a valid second operand for
++ integer ordering test *CODE, or if an equivalent combination can
++ be formed by adjusting *CODE and *CMP1. When returning true, update
++ *CODE and *CMP1 with the chosen code and operand, otherwise leave
++ them alone. */
++
++static bool
++riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
++ enum machine_mode mode)
++{
++ HOST_WIDE_INT plus_one;
++
++ if (riscv_int_order_operand_ok_p (*code, *cmp1))
++ return true;
++
++ if (CONST_INT_P (*cmp1))
++ switch (*code)
++ {
++ case LE:
++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++ if (INTVAL (*cmp1) < plus_one)
++ {
++ *code = LT;
++ *cmp1 = force_reg (mode, GEN_INT (plus_one));
++ return true;
++ }
++ break;
++
++ case LEU:
++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++ if (plus_one != 0)
++ {
++ *code = LTU;
++ *cmp1 = force_reg (mode, GEN_INT (plus_one));
++ return true;
++ }
++ break;
++
++ default:
++ break;
++ }
++ return false;
++}
++
++/* Compare CMP0 and CMP1 using ordering test CODE and store the result
++ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
++ is nonnull, it's OK to set TARGET to the inverse of the result and
++ flip *INVERT_PTR instead. */
++
++static void
++riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
++ rtx target, rtx cmp0, rtx cmp1)
++{
++ enum machine_mode mode;
++
++ /* First see if there is a RISCV instruction that can do this operation.
++ If not, try doing the same for the inverse operation. If that also
++ fails, force CMP1 into a register and try again. */
++ mode = GET_MODE (cmp0);
++ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
++ riscv_emit_binary (code, target, cmp0, cmp1);
++ else
++ {
++ enum rtx_code inv_code = reverse_condition (code);
++ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
++ {
++ cmp1 = force_reg (mode, cmp1);
++ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
++ }
++ else if (invert_ptr == 0)
++ {
++ rtx inv_target;
++
++ inv_target = riscv_force_binary (GET_MODE (target),
++ inv_code, cmp0, cmp1);
++ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
++ }
++ else
++ {
++ *invert_ptr = !*invert_ptr;
++ riscv_emit_binary (inv_code, target, cmp0, cmp1);
++ }
++ }
++}
++
++/* Return a register that is zero iff CMP0 and CMP1 are equal.
++ The register will have the same mode as CMP0. */
++
++static rtx
++riscv_zero_if_equal (rtx cmp0, rtx cmp1)
++{
++ if (cmp1 == const0_rtx)
++ return cmp0;
++
++ return expand_binop (GET_MODE (cmp0), sub_optab,
++ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
++}
++
++/* Return false if we can easily emit code for the FP comparison specified
++ by *CODE. If not, set *CODE to its inverse and return true. */
++
++static bool
++riscv_reversed_fp_cond (enum rtx_code *code)
++{
++ switch (*code)
++ {
++ case EQ:
++ case LT:
++ case LE:
++ case GT:
++ case GE:
++ case LTGT:
++ case ORDERED:
++ /* We know how to emit code for these cases... */
++ return false;
++
++ default:
++ /* ...but we must invert these and rely on the others. */
++ *code = reverse_condition_maybe_unordered (*code);
++ return true;
++ }
++}
++
++/* Convert a comparison into something that can be used in a branch or
++ conditional move. On entry, *OP0 and *OP1 are the values being
++ compared and *CODE is the code used to compare them.
++
++ Update *CODE, *OP0 and *OP1 so that they describe the final comparison. */
++
++static void
++riscv_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1)
++{
++ rtx cmp_op0 = *op0;
++ rtx cmp_op1 = *op1;
++
++ if (GET_MODE_CLASS (GET_MODE (*op0)) == MODE_INT)
++ {
++ if (splittable_const_int_operand (cmp_op1, VOIDmode))
++ {
++ HOST_WIDE_INT rhs = INTVAL (cmp_op1), new_rhs;
++ enum rtx_code new_code;
++
++ switch (*code)
++ {
++ case LTU: new_rhs = rhs - 1; new_code = LEU; goto try_new_rhs;
++ case LEU: new_rhs = rhs + 1; new_code = LTU; goto try_new_rhs;
++ case GTU: new_rhs = rhs + 1; new_code = GEU; goto try_new_rhs;
++ case GEU: new_rhs = rhs - 1; new_code = GTU; goto try_new_rhs;
++ case LT: new_rhs = rhs - 1; new_code = LE; goto try_new_rhs;
++ case LE: new_rhs = rhs + 1; new_code = LT; goto try_new_rhs;
++ case GT: new_rhs = rhs + 1; new_code = GE; goto try_new_rhs;
++ case GE: new_rhs = rhs - 1; new_code = GT;
++ try_new_rhs:
++ /* Convert e.g. OP0 > 4095 into OP0 >= 4096. */
++ if ((rhs < 0) == (new_rhs < 0)
++ && riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs))
++ {
++ *op1 = GEN_INT (new_rhs);
++ *code = new_code;
++ }
++ break;
++
++ case EQ:
++ case NE:
++ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
++ if (SMALL_OPERAND (-rhs))
++ {
++ *op0 = gen_reg_rtx (GET_MODE (cmp_op0));
++ riscv_emit_binary (PLUS, *op0, cmp_op0, GEN_INT (-rhs));
++ *op1 = const0_rtx;
++ }
++ default:
++ break;
++ }
++ }
++
++ if (*op1 != const0_rtx)
++ *op1 = force_reg (GET_MODE (cmp_op0), *op1);
++ }
++ else
++ {
++ /* For FP comparisons, set an integer register with the result of the
++ comparison, then branch on it. */
++ rtx tmp0, tmp1, final_op;
++ enum rtx_code fp_code = *code;
++ *code = riscv_reversed_fp_cond (&fp_code) ? EQ : NE;
++
++ switch (fp_code)
++ {
++ case ORDERED:
++ /* a == a && b == b */
++ tmp0 = gen_reg_rtx (SImode);
++ riscv_emit_binary (EQ, tmp0, cmp_op0, cmp_op0);
++ tmp1 = gen_reg_rtx (SImode);
++ riscv_emit_binary (EQ, tmp1, cmp_op1, cmp_op1);
++ final_op = gen_reg_rtx (SImode);
++ riscv_emit_binary (AND, final_op, tmp0, tmp1);
++ break;
++
++ case LTGT:
++ /* a < b || a > b */
++ tmp0 = gen_reg_rtx (SImode);
++ riscv_emit_binary (LT, tmp0, cmp_op0, cmp_op1);
++ tmp1 = gen_reg_rtx (SImode);
++ riscv_emit_binary (GT, tmp1, cmp_op0, cmp_op1);
++ final_op = gen_reg_rtx (SImode);
++ riscv_emit_binary (IOR, final_op, tmp0, tmp1);
++ break;
++
++ case EQ:
++ case LE:
++ case LT:
++ case GE:
++ case GT:
++ /* We have instructions for these cases. */
++ final_op = gen_reg_rtx (SImode);
++ riscv_emit_binary (fp_code, final_op, cmp_op0, cmp_op1);
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ /* Compare the binary result against 0. */
++ *op0 = final_op;
++ *op1 = const0_rtx;
++ }
++}
++
++/* Try performing the comparison in OPERANDS[1], whose arms are OPERANDS[2]
++ and OPERAND[3]. Store the result in OPERANDS[0].
++
++ On 64-bit targets, the mode of the comparison and target will always be
++ SImode, thus possibly narrower than that of the comparison's operands. */
++
++void
++riscv_expand_scc (rtx operands[])
++{
++ rtx target = operands[0];
++ enum rtx_code code = GET_CODE (operands[1]);
++ rtx op0 = operands[2];
++ rtx op1 = operands[3];
++
++ gcc_assert (GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT);
++
++ if (code == EQ || code == NE)
++ {
++ rtx zie = riscv_zero_if_equal (op0, op1);
++ riscv_emit_binary (code, target, zie, const0_rtx);
++ }
++ else
++ riscv_emit_int_order_test (code, 0, target, op0, op1);
++}
++
++/* Compare OPERANDS[1] with OPERANDS[2] using comparison code
++ CODE and jump to OPERANDS[3] if the condition holds. */
++
++void
++riscv_expand_conditional_branch (rtx *operands)
++{
++ enum rtx_code code = GET_CODE (operands[0]);
++ rtx op0 = operands[1];
++ rtx op1 = operands[2];
++ rtx condition;
++
++ riscv_emit_compare (&code, &op0, &op1);
++ condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
++ emit_jump_insn (gen_condjump (condition, operands[3]));
++}
++
++/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
++ least PARM_BOUNDARY bits of alignment, but will be given anything up
++ to STACK_BOUNDARY bits if the type requires it. */
++
++static unsigned int
++riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
++{
++ unsigned int alignment;
++
++ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
++ if (alignment < PARM_BOUNDARY)
++ alignment = PARM_BOUNDARY;
++ if (alignment > STACK_BOUNDARY)
++ alignment = STACK_BOUNDARY;
++ return alignment;
++}
++
++/* Fill INFO with information about a single argument. CUM is the
++ cumulative state for earlier arguments. MODE is the mode of this
++ argument and TYPE is its type (if known). NAMED is true if this
++ is a named (fixed) argument rather than a variable one. */
++
++static void
++riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
++ enum machine_mode mode, const_tree type, bool named)
++{
++ bool doubleword_aligned_p;
++ unsigned int num_bytes, num_words, max_regs;
++
++ /* Work out the size of the argument. */
++ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
++ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++ /* Scalar, complex and vector floating-point types are passed in
++ floating-point registers, as long as this is a named rather
++ than a variable argument. */
++ info->fpr_p = (named
++ && (type == 0 || FLOAT_TYPE_P (type))
++ && (GET_MODE_CLASS (mode) == MODE_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
++ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
++
++ /* Complex floats should only go into FPRs if there are two FPRs free,
++ otherwise they should be passed in the same way as a struct
++ containing two floats. */
++ if (info->fpr_p
++ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
++ && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
++ {
++ if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
++ info->fpr_p = false;
++ else
++ num_words = 2;
++ }
++
++ /* See whether the argument has doubleword alignment. */
++ doubleword_aligned_p = (riscv_function_arg_boundary (mode, type)
++ > BITS_PER_WORD);
++
++ /* Set REG_OFFSET to the register count we're interested in.
++ The EABI allocates the floating-point registers separately,
++ but the other ABIs allocate them like integer registers. */
++ info->reg_offset = cum->num_gprs;
++
++ /* Advance to an even register if the argument is doubleword-aligned. */
++ if (doubleword_aligned_p)
++ info->reg_offset += info->reg_offset & 1;
++
++ /* Work out the offset of a stack argument. */
++ info->stack_offset = cum->stack_words;
++ if (doubleword_aligned_p)
++ info->stack_offset += info->stack_offset & 1;
++
++ max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
++
++ /* Partition the argument between registers and stack. */
++ info->reg_words = MIN (num_words, max_regs);
++ info->stack_words = num_words - info->reg_words;
++}
++
++/* INFO describes a register argument that has the normal format for the
++ argument's mode. Return the register it uses, assuming that FPRs are
++ available if HARD_FLOAT_P. */
++
++static unsigned int
++riscv_arg_regno (const struct riscv_arg_info *info, bool hard_float_p)
++{
++ if (!info->fpr_p || !hard_float_p)
++ return GP_ARG_FIRST + info->reg_offset;
++ else
++ return FP_ARG_FIRST + info->reg_offset;
++}
++
++/* Implement TARGET_FUNCTION_ARG. */
++
++static rtx
++riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++ struct riscv_arg_info info;
++
++ if (mode == VOIDmode)
++ return NULL;
++
++ riscv_get_arg_info (&info, cum, mode, type, named);
++
++ /* Return straight away if the whole argument is passed on the stack. */
++ if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
++ return NULL;
++
++ /* The n32 and n64 ABIs say that if any 64-bit chunk of the structure
++ contains a double in its entirety, then that 64-bit chunk is passed
++ in a floating-point register. */
++ if (TARGET_HARD_FLOAT
++ && named
++ && type != 0
++ && TREE_CODE (type) == RECORD_TYPE
++ && TYPE_SIZE_UNIT (type)
++ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
++ {
++ tree field;
++
++ /* First check to see if there is any such field. */
++ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
++ if (TREE_CODE (field) == FIELD_DECL
++ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
++ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
++ && tree_fits_shwi_p (bit_position (field))
++ && int_bit_position (field) % BITS_PER_WORD == 0)
++ break;
++
++ if (field != 0)
++ {
++ /* Now handle the special case by returning a PARALLEL
++ indicating where each 64-bit chunk goes. INFO.REG_WORDS
++ chunks are passed in registers. */
++ unsigned int i;
++ HOST_WIDE_INT bitpos;
++ rtx ret;
++
++ /* assign_parms checks the mode of ENTRY_PARM, so we must
++ use the actual mode here. */
++ ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
++
++ bitpos = 0;
++ field = TYPE_FIELDS (type);
++ for (i = 0; i < info.reg_words; i++)
++ {
++ rtx reg;
++
++ for (; field; field = DECL_CHAIN (field))
++ if (TREE_CODE (field) == FIELD_DECL
++ && int_bit_position (field) >= bitpos)
++ break;
++
++ if (field
++ && int_bit_position (field) == bitpos
++ && SCALAR_FLOAT_TYPE_P (TREE_TYPE (field))
++ && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
++ reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
++ else
++ reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
++
++ XVECEXP (ret, 0, i)
++ = gen_rtx_EXPR_LIST (VOIDmode, reg,
++ GEN_INT (bitpos / BITS_PER_UNIT));
++
++ bitpos += BITS_PER_WORD;
++ }
++ return ret;
++ }
++ }
++
++ /* Handle the n32/n64 conventions for passing complex floating-point
++ arguments in FPR pairs. The real part goes in the lower register
++ and the imaginary part goes in the upper register. */
++ if (info.fpr_p
++ && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ {
++ rtx real, imag;
++ enum machine_mode inner;
++ unsigned int regno;
++
++ inner = GET_MODE_INNER (mode);
++ regno = FP_ARG_FIRST + info.reg_offset;
++ if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
++ {
++ /* Real part in registers, imaginary part on stack. */
++ gcc_assert (info.stack_words == info.reg_words);
++ return gen_rtx_REG (inner, regno);
++ }
++ else
++ {
++ gcc_assert (info.stack_words == 0);
++ real = gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (inner, regno),
++ const0_rtx);
++ imag = gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (inner,
++ regno + info.reg_words / 2),
++ GEN_INT (GET_MODE_SIZE (inner)));
++ return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
++ }
++ }
++
++ return gen_rtx_REG (mode, riscv_arg_regno (&info, TARGET_HARD_FLOAT));
++}
++
++/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
++
++static void
++riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++ struct riscv_arg_info info;
++
++ riscv_get_arg_info (&info, cum, mode, type, named);
++
++ /* Advance the register count. This has the effect of setting
++ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
++ argument required us to skip the final GPR and pass the whole
++ argument on the stack. */
++ cum->num_gprs = info.reg_offset + info.reg_words;
++
++ /* Advance the stack word count. */
++ if (info.stack_words > 0)
++ cum->stack_words = info.stack_offset + info.stack_words;
++}
++
++/* Implement TARGET_ARG_PARTIAL_BYTES. */
++
++static int
++riscv_arg_partial_bytes (cumulative_args_t cum,
++ enum machine_mode mode, tree type, bool named)
++{
++ struct riscv_arg_info info;
++
++ riscv_get_arg_info (&info, get_cumulative_args (cum), mode, type, named);
++ return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
++}
++
++/* See whether VALTYPE is a record whose fields should be returned in
++ floating-point registers. If so, return the number of fields and
++ list them in FIELDS (which should have two elements). Return 0
++ otherwise.
++
++ For n32 & n64, a structure with one or two fields is returned in
++ floating-point registers as long as every field has a floating-point
++ type. */
++
++static int
++riscv_fpr_return_fields (const_tree valtype, tree *fields)
++{
++ tree field;
++ int i;
++
++ if (TREE_CODE (valtype) != RECORD_TYPE)
++ return 0;
++
++ i = 0;
++ for (field = TYPE_FIELDS (valtype); field != 0; field = DECL_CHAIN (field))
++ {
++ if (TREE_CODE (field) != FIELD_DECL)
++ continue;
++
++ if (!SCALAR_FLOAT_TYPE_P (TREE_TYPE (field)))
++ return 0;
++
++ if (i == 2)
++ return 0;
++
++ fields[i++] = field;
++ }
++ return i;
++}
++
++/* Return true if the function return value MODE will get returned in a
++ floating-point register. */
++
++static bool
++riscv_return_mode_in_fpr_p (enum machine_mode mode)
++{
++ return ((GET_MODE_CLASS (mode) == MODE_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
++ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
++}
++
++/* Return the representation of an FPR return register when the
++ value being returned in FP_RETURN has mode VALUE_MODE and the
++ return type itself has mode TYPE_MODE. On NewABI targets,
++ the two modes may be different for structures like:
++
++ struct __attribute__((packed)) foo { float f; }
++
++ where we return the SFmode value of "f" in FP_RETURN, but where
++ the structure itself has mode BLKmode. */
++
++static rtx
++riscv_return_fpr_single (enum machine_mode type_mode,
++ enum machine_mode value_mode)
++{
++ rtx x;
++
++ x = gen_rtx_REG (value_mode, FP_RETURN);
++ if (type_mode != value_mode)
++ {
++ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
++ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
++ }
++ return x;
++}
++
++/* Return a composite value in a pair of floating-point registers.
++ MODE1 and OFFSET1 are the mode and byte offset for the first value,
++ likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
++ complete value.
++
++ For n32 & n64, $f0 always holds the first value and $f2 the second.
++ Otherwise the values are packed together as closely as possible. */
++
++static rtx
++riscv_return_fpr_pair (enum machine_mode mode,
++ enum machine_mode mode1, HOST_WIDE_INT offset1,
++ enum machine_mode mode2, HOST_WIDE_INT offset2)
++{
++ return gen_rtx_PARALLEL
++ (mode,
++ gen_rtvec (2,
++ gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (mode1, FP_RETURN),
++ GEN_INT (offset1)),
++ gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (mode2, FP_RETURN + 1),
++ GEN_INT (offset2))));
++
++}
++
++/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
++ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
++ VALTYPE is null and MODE is the mode of the return value. */
++
++rtx
++riscv_function_value (const_tree valtype, const_tree func, enum machine_mode mode)
++{
++ if (valtype)
++ {
++ tree fields[2];
++ int unsigned_p;
++
++ mode = TYPE_MODE (valtype);
++ unsigned_p = TYPE_UNSIGNED (valtype);
++
++ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
++ return values, promote the mode here too. */
++ mode = promote_function_mode (valtype, mode, &unsigned_p, func, 1);
++
++ /* Handle structures whose fields are returned in $f0/$f2. */
++ switch (riscv_fpr_return_fields (valtype, fields))
++ {
++ case 1:
++ return riscv_return_fpr_single (mode,
++ TYPE_MODE (TREE_TYPE (fields[0])));
++
++ case 2:
++ return riscv_return_fpr_pair (mode,
++ TYPE_MODE (TREE_TYPE (fields[0])),
++ int_byte_position (fields[0]),
++ TYPE_MODE (TREE_TYPE (fields[1])),
++ int_byte_position (fields[1]));
++ }
++
++ /* Only use FPRs for scalar, complex or vector types. */
++ if (!FLOAT_TYPE_P (valtype))
++ return gen_rtx_REG (mode, GP_RETURN);
++ }
++
++ /* Handle long doubles for n32 & n64. */
++ if (mode == TFmode)
++ return riscv_return_fpr_pair (mode,
++ DImode, 0,
++ DImode, GET_MODE_SIZE (mode) / 2);
++
++ if (riscv_return_mode_in_fpr_p (mode))
++ {
++ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ return riscv_return_fpr_pair (mode,
++ GET_MODE_INNER (mode), 0,
++ GET_MODE_INNER (mode),
++ GET_MODE_SIZE (mode) / 2);
++ else
++ return gen_rtx_REG (mode, FP_RETURN);
++ }
++
++ return gen_rtx_REG (mode, GP_RETURN);
++}
++
++/* Implement TARGET_RETURN_IN_MEMORY. Scalars and small structures
++ that fit in two registers are returned in a0/a1. */
++
++static bool
++riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
++{
++ return !IN_RANGE (int_size_in_bytes (type), 0, 2 * UNITS_PER_WORD);
++}
++
++/* Implement TARGET_PASS_BY_REFERENCE. */
++
++static bool
++riscv_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
++ enum machine_mode mode, const_tree type,
++ bool named ATTRIBUTE_UNUSED)
++{
++ if (type && riscv_return_in_memory (type, NULL_TREE))
++ return true;
++ return targetm.calls.must_pass_in_stack (mode, type);
++}
++
++/* Implement TARGET_SETUP_INCOMING_VARARGS. */
++
++static void
++riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
++ tree type, int *pretend_size ATTRIBUTE_UNUSED,
++ int no_rtl)
++{
++ CUMULATIVE_ARGS local_cum;
++ int gp_saved;
++
++ /* The caller has advanced CUM up to, but not beyond, the last named
++ argument. Advance a local copy of CUM past the last "real" named
++ argument, to find out how many registers are left over. */
++ local_cum = *get_cumulative_args (cum);
++ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
++
++ /* Found out how many registers we need to save. */
++ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
++
++ if (!no_rtl && gp_saved > 0)
++ {
++ rtx ptr, mem;
++
++ ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
++ REG_PARM_STACK_SPACE (cfun->decl)
++ - gp_saved * UNITS_PER_WORD);
++ mem = gen_frame_mem (BLKmode, ptr);
++ set_mem_alias_set (mem, get_varargs_alias_set ());
++
++ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
++ mem, gp_saved);
++ }
++ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
++ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
++
++static void
++riscv_va_start (tree valist, rtx nextarg)
++{
++ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
++ std_expand_builtin_va_start (valist, nextarg);
++}
++
++/* Expand a call of type TYPE. RESULT is where the result will go (null
++ for "call"s and "sibcall"s), ADDR is the address of the function,
++ ARGS_SIZE is the size of the arguments and AUX is the value passed
++ to us by riscv_function_arg. Return the call itself. */
++
++rtx
++riscv_expand_call (bool sibcall_p, rtx result, rtx addr, rtx args_size)
++{
++ rtx pattern;
++
++ if (!call_insn_operand (addr, VOIDmode))
++ {
++ rtx reg = RISCV_EPILOGUE_TEMP (Pmode);
++ riscv_emit_move (reg, addr);
++ addr = reg;
++ }
++
++ if (result == 0)
++ {
++ rtx (*fn) (rtx, rtx);
++
++ if (sibcall_p)
++ fn = gen_sibcall_internal;
++ else
++ fn = gen_call_internal;
++
++ pattern = fn (addr, args_size);
++ }
++ else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
++ {
++ /* Handle return values created by riscv_return_fpr_pair. */
++ rtx (*fn) (rtx, rtx, rtx, rtx);
++ rtx reg1, reg2;
++
++ if (sibcall_p)
++ fn = gen_sibcall_value_multiple_internal;
++ else
++ fn = gen_call_value_multiple_internal;
++
++ reg1 = XEXP (XVECEXP (result, 0, 0), 0);
++ reg2 = XEXP (XVECEXP (result, 0, 1), 0);
++ pattern = fn (reg1, addr, args_size, reg2);
++ }
++ else
++ {
++ rtx (*fn) (rtx, rtx, rtx);
++
++ if (sibcall_p)
++ fn = gen_sibcall_value_internal;
++ else
++ fn = gen_call_value_internal;
++
++ /* Handle return values created by riscv_return_fpr_single. */
++ if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 1)
++ result = XEXP (XVECEXP (result, 0, 0), 0);
++ pattern = fn (result, addr, args_size);
++ }
++
++ return emit_call_insn (pattern);
++}
++
++/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
++ Assume that the areas do not overlap. */
++
++static void
++riscv_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
++{
++ HOST_WIDE_INT offset, delta;
++ unsigned HOST_WIDE_INT bits;
++ int i;
++ enum machine_mode mode;
++ rtx *regs;
++
++ bits = MAX( BITS_PER_UNIT,
++ MIN( BITS_PER_WORD, MIN( MEM_ALIGN(src),MEM_ALIGN(dest) ) ) );
++
++ mode = mode_for_size (bits, MODE_INT, 0);
++ delta = bits / BITS_PER_UNIT;
++
++ /* Allocate a buffer for the temporary registers. */
++ regs = XALLOCAVEC (rtx, length / delta);
++
++ /* Load as many BITS-sized chunks as possible. Use a normal load if
++ the source has enough alignment, otherwise use left/right pairs. */
++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++ {
++ regs[i] = gen_reg_rtx (mode);
++ riscv_emit_move (regs[i], adjust_address (src, mode, offset));
++ }
++
++ /* Copy the chunks to the destination. */
++ for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++ riscv_emit_move (adjust_address (dest, mode, offset), regs[i]);
++
++ /* Mop up any left-over bytes. */
++ if (offset < length)
++ {
++ src = adjust_address (src, BLKmode, offset);
++ dest = adjust_address (dest, BLKmode, offset);
++ move_by_pieces (dest, src, length - offset,
++ MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
++ }
++}
++
++/* Helper function for doing a loop-based block operation on memory
++ reference MEM. Each iteration of the loop will operate on LENGTH
++ bytes of MEM.
++
++ Create a new base register for use within the loop and point it to
++ the start of MEM. Create a new memory reference that uses this
++ register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
++
++static void
++riscv_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
++ rtx *loop_reg, rtx *loop_mem)
++{
++ *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
++
++ /* Although the new mem does not refer to a known location,
++ it does keep up to LENGTH bytes of alignment. */
++ *loop_mem = change_address (mem, BLKmode, *loop_reg);
++ set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
++}
++
++/* Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
++ bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
++ the memory regions do not overlap. */
++
++static void
++riscv_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
++ HOST_WIDE_INT bytes_per_iter)
++{
++ rtx label, src_reg, dest_reg, final_src, test;
++ HOST_WIDE_INT leftover;
++
++ leftover = length % bytes_per_iter;
++ length -= leftover;
++
++ /* Create registers and memory references for use within the loop. */
++ riscv_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
++ riscv_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
++
++ /* Calculate the value that SRC_REG should have after the last iteration
++ of the loop. */
++ final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
++ 0, 0, OPTAB_WIDEN);
++
++ /* Emit the start of the loop. */
++ label = gen_label_rtx ();
++ emit_label (label);
++
++ /* Emit the loop body. */
++ riscv_block_move_straight (dest, src, bytes_per_iter);
++
++ /* Move on to the next block. */
++ riscv_emit_move (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
++ riscv_emit_move (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
++
++ /* Emit the loop condition. */
++ test = gen_rtx_NE (VOIDmode, src_reg, final_src);
++ if (Pmode == DImode)
++ emit_jump_insn (gen_cbranchdi4 (test, src_reg, final_src, label));
++ else
++ emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
++
++ /* Mop up any left-over bytes. */
++ if (leftover)
++ riscv_block_move_straight (dest, src, leftover);
++}
++
++/* Expand a movmemsi instruction, which copies LENGTH bytes from
++ memory reference SRC to memory reference DEST. */
++
++bool
++riscv_expand_block_move (rtx dest, rtx src, rtx length)
++{
++ if (CONST_INT_P (length))
++ {
++ HOST_WIDE_INT factor, align;
++
++ align = MIN (MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), BITS_PER_WORD);
++ factor = BITS_PER_WORD / align;
++
++ if (INTVAL (length) <= RISCV_MAX_MOVE_BYTES_STRAIGHT / factor)
++ {
++ riscv_block_move_straight (dest, src, INTVAL (length));
++ return true;
++ }
++ else if (optimize && align >= BITS_PER_WORD)
++ {
++ riscv_block_move_loop (dest, src, INTVAL (length),
++ RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER / factor);
++ return true;
++ }
++ }
++ return false;
++}
++
++/* (Re-)Initialize riscv_lo_relocs and riscv_hi_relocs. */
++
++static void
++riscv_init_relocs (void)
++{
++ memset (riscv_hi_relocs, '\0', sizeof (riscv_hi_relocs));
++ memset (riscv_lo_relocs, '\0', sizeof (riscv_lo_relocs));
++
++ if (!flag_pic)
++ {
++ riscv_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
++ riscv_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
++ }
++
++ if (!flag_pic || flag_pie)
++ {
++ riscv_hi_relocs[SYMBOL_TLS_LE] = "%tprel_hi(";
++ riscv_lo_relocs[SYMBOL_TLS_LE] = "%tprel_lo(";
++ }
++}
++
++/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
++ in context CONTEXT. RELOCS is the array of relocations to use. */
++
++static void
++riscv_print_operand_reloc (FILE *file, rtx op, const char **relocs)
++{
++ enum riscv_symbol_type symbol_type;
++ const char *p;
++
++ symbol_type = riscv_classify_symbolic_expression (op);
++ gcc_assert (relocs[symbol_type]);
++
++ fputs (relocs[symbol_type], file);
++ output_addr_const (file, riscv_strip_unspec_address (op));
++ for (p = relocs[symbol_type]; *p != 0; p++)
++ if (*p == '(')
++ fputc (')', file);
++}
++
++static const char *
++riscv_memory_model_suffix (enum memmodel model)
++{
++ switch (model)
++ {
++ case MEMMODEL_ACQ_REL:
++ case MEMMODEL_SEQ_CST:
++ return ".sc";
++ case MEMMODEL_ACQUIRE:
++ case MEMMODEL_CONSUME:
++ return ".aq";
++ case MEMMODEL_RELEASE:
++ return ".rl";
++ case MEMMODEL_RELAXED:
++ return "";
++ default: gcc_unreachable();
++ }
++}
++
++/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
++
++ 'h' Print the high-part relocation associated with OP, after stripping
++ any outermost HIGH.
++ 'R' Print the low-part relocation associated with OP.
++ 'C' Print the integer branch condition for comparison OP.
++ 'A' Print the atomic operation suffix for memory model OP.
++ 'z' Print $0 if OP is zero, otherwise print OP normally. */
++
++static void
++riscv_print_operand (FILE *file, rtx op, int letter)
++{
++ enum rtx_code code;
++
++ gcc_assert (op);
++ code = GET_CODE (op);
++
++ switch (letter)
++ {
++ case 'h':
++ if (code == HIGH)
++ op = XEXP (op, 0);
++ riscv_print_operand_reloc (file, op, riscv_hi_relocs);
++ break;
++
++ case 'R':
++ riscv_print_operand_reloc (file, op, riscv_lo_relocs);
++ break;
++
++ case 'C':
++ /* The RTL names match the instruction names. */
++ fputs (GET_RTX_NAME (code), file);
++ break;
++
++ case 'A':
++ fputs (riscv_memory_model_suffix ((enum memmodel)INTVAL (op)), file);
++ break;
++
++ default:
++ switch (code)
++ {
++ case REG:
++ if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ fprintf (file, "%s", reg_names[REGNO (op)]);
++ break;
++
++ case MEM:
++ if (letter == 'y')
++ fprintf (file, "%s", reg_names[REGNO(XEXP(op, 0))]);
++ else if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ else
++ output_address (XEXP (op, 0));
++ break;
++
++ default:
++ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
++ fputs (reg_names[GP_REG_FIRST], file);
++ else if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ else
++ output_addr_const (file, riscv_strip_unspec_address (op));
++ break;
++ }
++ }
++}
++
++/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
++
++static void
++riscv_print_operand_address (FILE *file, rtx x)
++{
++ struct riscv_address_info addr;
++
++ if (riscv_classify_address (&addr, x, word_mode, true))
++ switch (addr.type)
++ {
++ case ADDRESS_REG:
++ riscv_print_operand (file, addr.offset, 0);
++ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++ return;
++
++ case ADDRESS_LO_SUM:
++ riscv_print_operand_reloc (file, addr.offset, riscv_lo_relocs);
++ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++ return;
++
++ case ADDRESS_CONST_INT:
++ output_addr_const (file, x);
++ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
++ return;
++
++ case ADDRESS_SYMBOLIC:
++ output_addr_const (file, riscv_strip_unspec_address (x));
++ return;
++ }
++ gcc_unreachable ();
++}
++
++static bool
++riscv_size_ok_for_small_data_p (int size)
++{
++ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
++}
++
++/* Return true if EXP should be placed in the small data section. */
++
++static bool
++riscv_in_small_data_p (const_tree x)
++{
++ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
++ return false;
++
++ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
++ {
++ const char *sec = TREE_STRING_POINTER (DECL_SECTION_NAME (x));
++ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
++ }
++
++ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
++}
++
++/* Return a section for X, handling small data. */
++
++static section *
++riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
++ unsigned HOST_WIDE_INT align)
++{
++ section *s = default_elf_select_rtx_section (mode, x, align);
++
++ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
++ {
++ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
++ {
++ /* Rename .rodata.cst* to .srodata.cst*. */
++ char name[32];
++ sprintf (name, ".s%s", s->named.name + 1);
++ return get_section (name, s->named.common.flags, NULL);
++ }
++
++ if (s == data_section)
++ return sdata_section;
++ }
++
++ return s;
++}
++
++/* Implement TARGET_ASM_OUTPUT_DWARF_DTPREL. */
++
++static void ATTRIBUTE_UNUSED
++riscv_output_dwarf_dtprel (FILE *file, int size, rtx x)
++{
++ switch (size)
++ {
++ case 4:
++ fputs ("\t.dtprelword\t", file);
++ break;
++
++ case 8:
++ fputs ("\t.dtpreldword\t", file);
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ output_addr_const (file, x);
++ fputs ("+0x800", file);
++}
++
++/* Make the last instruction frame-related and note that it performs
++ the operation described by FRAME_PATTERN. */
++
++static void
++riscv_set_frame_expr (rtx frame_pattern)
++{
++ rtx insn;
++
++ insn = get_last_insn ();
++ RTX_FRAME_RELATED_P (insn) = 1;
++ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ frame_pattern,
++ REG_NOTES (insn));
++}
++
++/* Return a frame-related rtx that stores REG at MEM.
++ REG must be a single register. */
++
++static rtx
++riscv_frame_set (rtx mem, rtx reg)
++{
++ rtx set;
++
++ set = gen_rtx_SET (VOIDmode, mem, reg);
++ RTX_FRAME_RELATED_P (set) = 1;
++
++ return set;
++}
++
++/* Return true if the current function must save register REGNO. */
++
++static bool
++riscv_save_reg_p (unsigned int regno)
++{
++ bool call_saved = !global_regs[regno] && !call_really_used_regs[regno];
++ bool might_clobber = crtl->saves_all_registers
++ || df_regs_ever_live_p (regno)
++ || (regno == HARD_FRAME_POINTER_REGNUM
++ && frame_pointer_needed);
++
++ return (call_saved && might_clobber)
++ || (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return);
++}
++
++/* Populate the current function's riscv_frame_info structure.
++
++ RISC-V stack frames grown downward. High addresses are at the top.
++
++ +-------------------------------+
++ | |
++ | incoming stack arguments |
++ | |
++ +-------------------------------+ <-- incoming stack pointer
++ | |
++ | callee-allocated save area |
++ | for arguments that are |
++ | split between registers and |
++ | the stack |
++ | |
++ +-------------------------------+ <-- arg_pointer_rtx
++ | |
++ | callee-allocated save area |
++ | for register varargs |
++ | |
++ +-------------------------------+ <-- hard_frame_pointer_rtx;
++ | | stack_pointer_rtx + gp_sp_offset
++ | GPR save area | + UNITS_PER_WORD
++ | |
++ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
++ | | + UNITS_PER_HWVALUE
++ | FPR save area |
++ | |
++ +-------------------------------+ <-- frame_pointer_rtx (virtual)
++ | |
++ | local variables |
++ | |
++ P +-------------------------------+
++ | |
++ | outgoing stack arguments |
++ | |
++ +-------------------------------+ <-- stack_pointer_rtx
++
++ Dynamic stack allocations such as alloca insert data at point P.
++ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
++ hard_frame_pointer_rtx unchanged. */
++
++static void
++riscv_compute_frame_info (void)
++{
++ struct riscv_frame_info *frame;
++ HOST_WIDE_INT offset;
++ unsigned int regno, i;
++
++ frame = &cfun->machine->frame;
++ memset (frame, 0, sizeof (*frame));
++
++ /* Find out which GPRs we need to save. */
++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++ if (riscv_save_reg_p (regno))
++ frame->mask |= 1 << (regno - GP_REG_FIRST);
++
++ /* If this function calls eh_return, we must also save and restore the
++ EH data registers. */
++ if (crtl->calls_eh_return)
++ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
++ frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
++
++ /* Find out which FPRs we need to save. This loop must iterate over
++ the same space as its companion in riscv_for_each_saved_gpr_and_fpr. */
++ if (TARGET_HARD_FLOAT)
++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ if (riscv_save_reg_p (regno))
++ frame->fmask |= 1 << (regno - FP_REG_FIRST);
++
++ /* At the bottom of the frame are any outgoing stack arguments. */
++ offset = crtl->outgoing_args_size;
++ /* Next are local stack variables. */
++ offset += RISCV_STACK_ALIGN (get_frame_size ());
++ /* The virtual frame pointer points above the local variables. */
++ frame->frame_pointer_offset = offset;
++ /* Next are the callee-saved FPRs. */
++ if (frame->fmask)
++ {
++ unsigned num_saved = __builtin_popcount(frame->fmask);
++ offset += RISCV_STACK_ALIGN (num_saved * UNITS_PER_FPREG);
++ frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
++ }
++ /* Next are the callee-saved GPRs. */
++ if (frame->mask)
++ {
++ unsigned num_saved = __builtin_popcount(frame->mask);
++ offset += RISCV_STACK_ALIGN (num_saved * UNITS_PER_WORD);
++ frame->gp_sp_offset = offset - UNITS_PER_WORD;
++ }
++ /* The hard frame pointer points above the callee-saved GPRs. */
++ frame->hard_frame_pointer_offset = offset;
++ /* Above the hard frame pointer is the callee-allocated varags save area. */
++ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
++ frame->arg_pointer_offset = offset;
++ /* Next is the callee-allocated area for pretend stack arguments. */
++ offset += crtl->args.pretend_args_size;
++ frame->total_size = offset;
++ /* Next points the incoming stack pointer and any incoming arguments. */
++}
++
++/* Make sure that we're not trying to eliminate to the wrong hard frame
++ pointer. */
++
++static bool
++riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
++{
++ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
++}
++
++/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
++ or argument pointer. TO is either the stack pointer or hard frame
++ pointer. */
++
++HOST_WIDE_INT
++riscv_initial_elimination_offset (int from, int to)
++{
++ HOST_WIDE_INT src, dest;
++
++ riscv_compute_frame_info ();
++
++ if (to == HARD_FRAME_POINTER_REGNUM)
++ dest = cfun->machine->frame.hard_frame_pointer_offset;
++ else if (to == STACK_POINTER_REGNUM)
++ dest = 0; /* this is the base of all offsets */
++ else
++ gcc_unreachable ();
++
++ if (from == FRAME_POINTER_REGNUM)
++ src = cfun->machine->frame.frame_pointer_offset;
++ else if (from == ARG_POINTER_REGNUM)
++ src = cfun->machine->frame.arg_pointer_offset;
++ else
++ gcc_unreachable ();
++
++ return src - dest;
++}
++
++/* Implement RETURN_ADDR_RTX. We do not support moving back to a
++ previous frame. */
++
++rtx
++riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
++{
++ if (count != 0)
++ return const0_rtx;
++
++ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
++}
++
++/* Emit code to change the current function's return address to
++ ADDRESS. SCRATCH is available as a scratch register, if needed.
++ ADDRESS and SCRATCH are both word-mode GPRs. */
++
++void
++riscv_set_return_address (rtx address, rtx scratch)
++{
++ rtx slot_address;
++
++ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
++ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
++ cfun->machine->frame.gp_sp_offset);
++ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
++}
++
++/* A function to save or store a register. The first argument is the
++ register and the second is the stack slot. */
++typedef void (*riscv_save_restore_fn) (rtx, rtx);
++
++/* Use FN to save or restore register REGNO. MODE is the register's
++ mode and OFFSET is the offset of its save slot from the current
++ stack pointer. */
++
++static void
++riscv_save_restore_reg (enum machine_mode mode, int regno,
++ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
++{
++ rtx mem;
++
++ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
++ fn (gen_rtx_REG (mode, regno), mem);
++}
++
++/* Call FN for each register that is saved by the current function.
++ SP_OFFSET is the offset of the current stack pointer from the start
++ of the frame. */
++
++static void
++riscv_for_each_saved_gpr_and_fpr (HOST_WIDE_INT sp_offset,
++ riscv_save_restore_fn fn)
++{
++ HOST_WIDE_INT offset;
++ int regno;
++
++ /* Save the link register and s-registers. */
++ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++ {
++ riscv_save_restore_reg (word_mode, regno, offset, fn);
++ offset -= UNITS_PER_WORD;
++ }
++
++ /* This loop must iterate over the same space as its companion in
++ riscv_compute_frame_info. */
++ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
++ {
++ riscv_save_restore_reg (DFmode, regno, offset, fn);
++ offset -= GET_MODE_SIZE (DFmode);
++ }
++}
++
++/* Emit a move from SRC to DEST, given that one of them is a register
++ save slot and that the other is a register. TEMP is a temporary
++ GPR of the same mode that is available if need be. */
++
++static void
++riscv_emit_save_slot_move (rtx dest, rtx src, rtx temp)
++{
++ unsigned int regno;
++ rtx mem;
++ enum reg_class rclass;
++
++ if (REG_P (src))
++ {
++ regno = REGNO (src);
++ mem = dest;
++ }
++ else
++ {
++ regno = REGNO (dest);
++ mem = src;
++ }
++
++ rclass = riscv_secondary_reload_class (REGNO_REG_CLASS (regno),
++ GET_MODE (mem), mem, mem == src);
++
++ if (rclass == NO_REGS)
++ riscv_emit_move (dest, src);
++ else
++ {
++ gcc_assert (!reg_overlap_mentioned_p (dest, temp));
++ riscv_emit_move (temp, src);
++ riscv_emit_move (dest, temp);
++ }
++ if (MEM_P (dest))
++ riscv_set_frame_expr (riscv_frame_set (dest, src));
++}
++
++/* Save register REG to MEM. Make the instruction frame-related. */
++
++static void
++riscv_save_reg (rtx reg, rtx mem)
++{
++ riscv_emit_save_slot_move (mem, reg, RISCV_PROLOGUE_TEMP (GET_MODE (reg)));
++}
++
++
++/* Expand the "prologue" pattern. */
++
++void
++riscv_expand_prologue (void)
++{
++ const struct riscv_frame_info *frame;
++ HOST_WIDE_INT size;
++ rtx insn;
++
++ frame = &cfun->machine->frame;
++ size = frame->total_size;
++
++ if (flag_stack_usage_info)
++ current_function_static_stack_size = size;
++
++ /* Save the registers. Allocate up to RISCV_MAX_FIRST_STACK_STEP
++ bytes beforehand; this is enough to cover the register save area
++ without going out of range. */
++ if ((frame->mask | frame->fmask) != 0)
++ {
++ HOST_WIDE_INT step1;
++
++ step1 = MIN (size, RISCV_MAX_FIRST_STACK_STEP);
++ insn = gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (-step1));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ size -= step1;
++ riscv_for_each_saved_gpr_and_fpr (size, riscv_save_reg);
++ }
++
++ /* Set up the frame pointer, if we're using one. */
++ if (frame_pointer_needed)
++ {
++ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (frame->hard_frame_pointer_offset - size));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ }
++
++ /* Allocate the rest of the frame. */
++ if (size > 0)
++ {
++ if (SMALL_OPERAND (-size))
++ RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (-size)))) = 1;
++ else
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (size));
++ emit_insn (gen_sub3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ RISCV_PROLOGUE_TEMP (Pmode)));
++
++ /* Describe the combined effect of the previous instructions. */
++ riscv_set_frame_expr
++ (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
++ plus_constant (Pmode, stack_pointer_rtx, -size)));
++ }
++ }
++}
++
++/* Emit instructions to restore register REG from slot MEM. */
++
++static void
++riscv_restore_reg (rtx reg, rtx mem)
++{
++ riscv_emit_save_slot_move (reg, mem, RISCV_EPILOGUE_TEMP (GET_MODE (reg)));
++}
++
++/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
++ says which. */
++
++void
++riscv_expand_epilogue (bool sibcall_p)
++{
++ const struct riscv_frame_info *frame;
++ HOST_WIDE_INT step1, step2;
++
++ if (!sibcall_p && riscv_can_use_return_insn ())
++ {
++ emit_jump_insn (gen_return ());
++ return;
++ }
++
++ /* Split the frame into two. STEP1 is the amount of stack we should
++ deallocate before restoring the registers. STEP2 is the amount we
++ should deallocate afterwards.
++
++ Start off by assuming that no registers need to be restored. */
++ frame = &cfun->machine->frame;
++ step1 = frame->total_size;
++ step2 = 0;
++
++ /* Move past any dynamic stack allocations. */
++ if (cfun->calls_alloca)
++ {
++ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
++ if (!SMALL_INT (adjust))
++ {
++ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
++ adjust = RISCV_EPILOGUE_TEMP (Pmode);
++ }
++
++ emit_insn (gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, adjust));
++ }
++
++ /* If we need to restore registers, deallocate as much stack as
++ possible in the second step without going out of range. */
++ if ((frame->mask | frame->fmask) != 0)
++ {
++ step2 = MIN (step1, RISCV_MAX_FIRST_STACK_STEP);
++ step1 -= step2;
++ }
++
++ /* Set TARGET to BASE + STEP1. */
++ if (step1 > 0)
++ {
++ /* Get an rtx for STEP1 that we can add to BASE. */
++ rtx adjust = GEN_INT (step1);
++ if (!SMALL_OPERAND (step1))
++ {
++ riscv_emit_move (RISCV_EPILOGUE_TEMP (Pmode), adjust);
++ adjust = RISCV_EPILOGUE_TEMP (Pmode);
++ }
++
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
++ }
++
++ /* Restore the registers. */
++ riscv_for_each_saved_gpr_and_fpr (frame->total_size - step2,
++ riscv_restore_reg);
++
++ /* Deallocate the final bit of the frame. */
++ if (step2 > 0)
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (step2)));
++
++ /* Add in the __builtin_eh_return stack adjustment. */
++ if (crtl->calls_eh_return)
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ EH_RETURN_STACKADJ_RTX));
++
++ if (!sibcall_p)
++ {
++ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
++ emit_jump_insn (gen_simple_return_internal (ra));
++ }
++}
++
++/* Return nonzero if this function is known to have a null epilogue.
++ This allows the optimizer to omit jumps to jumps if no stack
++ was created. */
++
++bool
++riscv_can_use_return_insn (void)
++{
++ return reload_completed && cfun->machine->frame.total_size == 0;
++}
++
++/* Return true if register REGNO can store a value of mode MODE.
++ The result of this function is cached in riscv_hard_regno_mode_ok. */
++
++static bool
++riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
++{
++ unsigned int size = GET_MODE_SIZE (mode);
++ enum mode_class mclass = GET_MODE_CLASS (mode);
++
++ /* This is hella bogus but ira_build segfaults on RV32 without it. */
++ if (VECTOR_MODE_P (mode))
++ return true;
++
++ if (GP_REG_P (regno))
++ {
++ if (size <= UNITS_PER_WORD)
++ return true;
++
++ /* Double-word values must be even-register-aligned. */
++ if (size <= 2 * UNITS_PER_WORD)
++ return regno % 2 == 0;
++ }
++
++ if (FP_REG_P (regno))
++ {
++ if (mclass == MODE_FLOAT
++ || mclass == MODE_COMPLEX_FLOAT
++ || mclass == MODE_VECTOR_FLOAT)
++ return size <= UNITS_PER_FPVALUE;
++ }
++
++ return false;
++}
++
++/* Implement HARD_REGNO_NREGS. */
++
++unsigned int
++riscv_hard_regno_nregs (int regno, enum machine_mode mode)
++{
++ if (FP_REG_P (regno))
++ return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
++
++ /* All other registers are word-sized. */
++ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++}
++
++/* Implement CLASS_MAX_NREGS, taking the maximum of the cases
++ in riscv_hard_regno_nregs. */
++
++int
++riscv_class_max_nregs (enum reg_class rclass, enum machine_mode mode)
++{
++ int size;
++ HARD_REG_SET left;
++
++ size = 0x8000;
++ COPY_HARD_REG_SET (left, reg_class_contents[(int) rclass]);
++ if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
++ {
++ size = MIN (size, UNITS_PER_FPREG);
++ AND_COMPL_HARD_REG_SET (left, reg_class_contents[(int) FP_REGS]);
++ }
++ if (!hard_reg_set_empty_p (left))
++ size = MIN (size, UNITS_PER_WORD);
++ return (GET_MODE_SIZE (mode) + size - 1) / size;
++}
++
++/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
++
++static reg_class_t
++riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
++{
++ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
++ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
++ rclass;
++}
++
++/* RCLASS is a class involved in a REGISTER_MOVE_COST calculation.
++ Return a "canonical" class to represent it in later calculations. */
++
++static reg_class_t
++riscv_canonicalize_move_class (reg_class_t rclass)
++{
++ if (reg_class_subset_p (rclass, GENERAL_REGS))
++ rclass = GENERAL_REGS;
++
++ return rclass;
++}
++
++/* Implement TARGET_REGISTER_MOVE_COST. Return 0 for classes that are the
++ maximum of the move costs for subclasses; regclass will work out
++ the maximum for us. */
++
++static int
++riscv_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
++ reg_class_t from, reg_class_t to)
++{
++ from = riscv_canonicalize_move_class (from);
++ to = riscv_canonicalize_move_class (to);
++
++ if ((from == GENERAL_REGS && to == GENERAL_REGS)
++ || (from == GENERAL_REGS && to == FP_REGS)
++ || (from == FP_REGS && to == FP_REGS))
++ return COSTS_N_INSNS (1);
++
++ if (from == FP_REGS && to == GENERAL_REGS)
++ return tune_info->fp_to_int_cost;
++
++ return 0;
++}
++
++/* Implement TARGET_MEMORY_MOVE_COST. */
++
++static int
++riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
++{
++ return (tune_info->memory_cost
++ + memory_move_secondary_cost (mode, rclass, in));
++}
++
++/* Return the register class required for a secondary register when
++ copying between one of the registers in RCLASS and value X, which
++ has mode MODE. X is the source of the move if IN_P, otherwise it
++ is the destination. Return NO_REGS if no secondary register is
++ needed. */
++
++enum reg_class
++riscv_secondary_reload_class (enum reg_class rclass,
++ enum machine_mode mode, rtx x,
++ bool in_p ATTRIBUTE_UNUSED)
++{
++ int regno;
++
++ regno = true_regnum (x);
++
++ if (reg_class_subset_p (rclass, FP_REGS))
++ {
++ if (MEM_P (x) && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
++ /* We can use flw/fld/fsw/fsd. */
++ return NO_REGS;
++
++ if (GP_REG_P (regno) || x == CONST0_RTX (mode))
++ /* We can use fmv or go through memory when mode > Pmode. */
++ return NO_REGS;
++
++ if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (mode, x))
++ /* We can force the constant to memory and use flw/fld. */
++ return NO_REGS;
++
++ if (FP_REG_P (regno))
++ /* We can use fmv.fmt. */
++ return NO_REGS;
++
++ /* Otherwise, we need to reload through an integer register. */
++ return GR_REGS;
++ }
++ if (FP_REG_P (regno))
++ return reg_class_subset_p (rclass, GR_REGS) ? NO_REGS : GR_REGS;
++
++ return NO_REGS;
++}
++
++/* Implement TARGET_MODE_REP_EXTENDED. */
++
++static int
++riscv_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
++{
++ /* On 64-bit targets, SImode register values are sign-extended to DImode. */
++ if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
++ return SIGN_EXTEND;
++
++ return UNKNOWN;
++}
++
++/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
++
++static bool
++riscv_scalar_mode_supported_p (enum machine_mode mode)
++{
++ if (ALL_FIXED_POINT_MODE_P (mode)
++ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
++ return true;
++
++ return default_scalar_mode_supported_p (mode);
++}
++
++/* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
++ dependencies have no cost. */
++
++static int
++riscv_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
++ rtx dep ATTRIBUTE_UNUSED, int cost)
++{
++ if (REG_NOTE_KIND (link) != 0)
++ return 0;
++ return cost;
++}
++
++/* Return the number of instructions that can be issued per cycle. */
++
++static int
++riscv_issue_rate (void)
++{
++ return tune_info->issue_rate;
++}
++
++/* This structure describes a single built-in function. */
++struct riscv_builtin_description {
++ /* The code of the main .md file instruction. See riscv_builtin_type
++ for more information. */
++ enum insn_code icode;
++
++ /* The name of the built-in function. */
++ const char *name;
++
++ /* Specifies how the function should be expanded. */
++ enum riscv_builtin_type builtin_type;
++
++ /* The function's prototype. */
++ enum riscv_function_type function_type;
++
++ /* Whether the function is available. */
++ unsigned int (*avail) (void);
++};
++
++static unsigned int
++riscv_builtin_avail_riscv (void)
++{
++ return 1;
++}
++
++/* Construct a riscv_builtin_description from the given arguments.
++
++ INSN is the name of the associated instruction pattern, without the
++ leading CODE_FOR_riscv_.
++
++ CODE is the floating-point condition code associated with the
++ function. It can be 'f' if the field is not applicable.
++
++ NAME is the name of the function itself, without the leading
++ "__builtin_riscv_".
++
++ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
++
++ AVAIL is the name of the availability predicate, without the leading
++ riscv_builtin_avail_. */
++#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
++ { CODE_FOR_ ## INSN, "__builtin_riscv_" NAME, \
++ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
++ mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE and AVAIL
++ are as for RISCV_BUILTIN. */
++#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
++ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
++ function mapped to instruction CODE_FOR_<INSN>, FUNCTION_TYPE
++ and AVAIL are as for RISCV_BUILTIN. */
++#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
++ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
++ FUNCTION_TYPE, AVAIL)
++
++static const struct riscv_builtin_description riscv_builtins[] = {
++ DIRECT_NO_TARGET_BUILTIN (nop, RISCV_VOID_FTYPE_VOID, riscv),
++};
++
++/* Index I is the function declaration for riscv_builtins[I], or null if the
++ function isn't defined on this target. */
++static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
++
++
++/* Source-level argument types. */
++#define RISCV_ATYPE_VOID void_type_node
++#define RISCV_ATYPE_INT integer_type_node
++#define RISCV_ATYPE_POINTER ptr_type_node
++#define RISCV_ATYPE_CPOINTER const_ptr_type_node
++
++/* Standard mode-based argument types. */
++#define RISCV_ATYPE_UQI unsigned_intQI_type_node
++#define RISCV_ATYPE_SI intSI_type_node
++#define RISCV_ATYPE_USI unsigned_intSI_type_node
++#define RISCV_ATYPE_DI intDI_type_node
++#define RISCV_ATYPE_UDI unsigned_intDI_type_node
++#define RISCV_ATYPE_SF float_type_node
++#define RISCV_ATYPE_DF double_type_node
++
++/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
++ their associated RISCV_ATYPEs. */
++#define RISCV_FTYPE_ATYPES1(A, B) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B
++
++#define RISCV_FTYPE_ATYPES2(A, B, C) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C
++
++#define RISCV_FTYPE_ATYPES3(A, B, C, D) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D
++
++#define RISCV_FTYPE_ATYPES4(A, B, C, D, E) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B, RISCV_ATYPE_##C, RISCV_ATYPE_##D, \
++ RISCV_ATYPE_##E
++
++/* Return the function type associated with function prototype TYPE. */
++
++static tree
++riscv_build_function_type (enum riscv_function_type type)
++{
++ static tree types[(int) RISCV_MAX_FTYPE_MAX];
++
++ if (types[(int) type] == NULL_TREE)
++ switch (type)
++ {
++#define DEF_RISCV_FTYPE(NUM, ARGS) \
++ case RISCV_FTYPE_NAME##NUM ARGS: \
++ types[(int) type] \
++ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
++ NULL_TREE); \
++ break;
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++ default:
++ gcc_unreachable ();
++ }
++
++ return types[(int) type];
++}
++
++/* Implement TARGET_INIT_BUILTINS. */
++
++static void
++riscv_init_builtins (void)
++{
++ const struct riscv_builtin_description *d;
++ unsigned int i;
++
++ /* Iterate through all of the bdesc arrays, initializing all of the
++ builtin functions. */
++ for (i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
++ {
++ d = &riscv_builtins[i];
++ if (d->avail ())
++ riscv_builtin_decls[i]
++ = add_builtin_function (d->name,
++ riscv_build_function_type (d->function_type),
++ i, BUILT_IN_MD, NULL, NULL);
++ }
++}
++
++/* Implement TARGET_BUILTIN_DECL. */
++
++static tree
++riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
++{
++ if (code >= ARRAY_SIZE (riscv_builtins))
++ return error_mark_node;
++ return riscv_builtin_decls[code];
++}
++
++/* Take argument ARGNO from EXP's argument list and convert it into a
++ form suitable for input operand OPNO of instruction ICODE. Return the
++ value. */
++
++static rtx
++riscv_prepare_builtin_arg (enum insn_code icode,
++ unsigned int opno, tree exp, unsigned int argno)
++{
++ tree arg;
++ rtx value;
++ enum machine_mode mode;
++
++ arg = CALL_EXPR_ARG (exp, argno);
++ value = expand_normal (arg);
++ mode = insn_data[icode].operand[opno].mode;
++ if (!insn_data[icode].operand[opno].predicate (value, mode))
++ {
++ /* We need to get the mode from ARG for two reasons:
++
++ - to cope with address operands, where MODE is the mode of the
++ memory, rather than of VALUE itself.
++
++ - to cope with special predicates like pmode_register_operand,
++ where MODE is VOIDmode. */
++ value = copy_to_mode_reg (TYPE_MODE (TREE_TYPE (arg)), value);
++
++ /* Check the predicate again. */
++ if (!insn_data[icode].operand[opno].predicate (value, mode))
++ {
++ error ("invalid argument to built-in function");
++ return const0_rtx;
++ }
++ }
++
++ return value;
++}
++
++/* Return an rtx suitable for output operand OP of instruction ICODE.
++ If TARGET is non-null, try to use it where possible. */
++
++static rtx
++riscv_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
++{
++ enum machine_mode mode;
++
++ mode = insn_data[icode].operand[op].mode;
++ if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
++ target = gen_reg_rtx (mode);
++
++ return target;
++}
++
++/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
++ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
++ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
++ suggests a good place to put the result. */
++
++static rtx
++riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
++ bool has_target_p)
++{
++ rtx ops[MAX_RECOG_OPERANDS];
++ int opno, argno;
++
++ /* Map any target to operand 0. */
++ opno = 0;
++ if (has_target_p)
++ {
++ target = riscv_prepare_builtin_target (icode, opno, target);
++ ops[opno] = target;
++ opno++;
++ }
++
++ /* Map the arguments to the other operands. The n_operands value
++ for an expander includes match_dups and match_scratches as well as
++ match_operands, so n_operands is only an upper bound on the number
++ of arguments to the expander function. */
++ gcc_assert (opno + call_expr_nargs (exp) <= insn_data[icode].n_operands);
++ for (argno = 0; argno < call_expr_nargs (exp); argno++, opno++)
++ ops[opno] = riscv_prepare_builtin_arg (icode, opno, exp, argno);
++
++ switch (opno)
++ {
++ case 2:
++ emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
++ break;
++
++ case 3:
++ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
++ break;
++
++ case 4:
++ emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ return target;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN. */
++
++static rtx
++riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree fndecl;
++ unsigned int fcode, avail;
++ const struct riscv_builtin_description *d;
++
++ fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
++ fcode = DECL_FUNCTION_CODE (fndecl);
++ gcc_assert (fcode < ARRAY_SIZE (riscv_builtins));
++ d = &riscv_builtins[fcode];
++ avail = d->avail ();
++ gcc_assert (avail != 0);
++ switch (d->builtin_type)
++ {
++ case RISCV_BUILTIN_DIRECT:
++ return riscv_expand_builtin_direct (d->icode, target, exp, true);
++
++ case RISCV_BUILTIN_DIRECT_NO_TARGET:
++ return riscv_expand_builtin_direct (d->icode, target, exp, false);
++ }
++ gcc_unreachable ();
++}
++
++/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
++ in order to avoid duplicating too much logic from elsewhere. */
++
++static void
++riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
++ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
++ tree function)
++{
++ rtx this_rtx, temp1, temp2, insn, fnaddr;
++ bool use_sibcall_p;
++
++ /* Pretend to be a post-reload pass while generating rtl. */
++ reload_completed = 1;
++
++ /* Mark the end of the (empty) prologue. */
++ emit_note (NOTE_INSN_PROLOGUE_END);
++
++ /* Determine if we can use a sibcall to call FUNCTION directly. */
++ fnaddr = XEXP (DECL_RTL (function), 0);
++ use_sibcall_p = absolute_symbolic_operand (fnaddr, Pmode);
++
++ /* We need two temporary registers in some cases. */
++ temp1 = gen_rtx_REG (Pmode, GP_TEMP_FIRST);
++ temp2 = gen_rtx_REG (Pmode, GP_TEMP_FIRST + 1);
++
++ /* Find out which register contains the "this" pointer. */
++ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
++ else
++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++ /* Add DELTA to THIS_RTX. */
++ if (delta != 0)
++ {
++ rtx offset = GEN_INT (delta);
++ if (!SMALL_OPERAND (delta))
++ {
++ riscv_emit_move (temp1, offset);
++ offset = temp1;
++ }
++ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
++ }
++
++ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
++ if (vcall_offset != 0)
++ {
++ rtx addr;
++
++ /* Set TEMP1 to *THIS_RTX. */
++ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
++
++ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
++ addr = riscv_add_offset (temp2, temp1, vcall_offset);
++
++ /* Load the offset and add it to THIS_RTX. */
++ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
++ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
++ }
++
++ /* Jump to the target function. Use a sibcall if direct jumps are
++ allowed, otherwise load the address into a register first. */
++ if (use_sibcall_p)
++ {
++ insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
++ SIBLING_CALL_P (insn) = 1;
++ }
++ else
++ {
++ riscv_emit_move(temp1, fnaddr);
++ emit_jump_insn (gen_indirect_jump (temp1));
++ }
++
++ /* Run just enough of rest_of_compilation. This sequence was
++ "borrowed" from alpha.c. */
++ insn = get_insns ();
++ split_all_insns_noflow ();
++ shorten_branches (insn);
++ final_start_function (insn, file, 1);
++ final (insn, file, 1);
++ final_end_function ();
++
++ /* Clean up the vars set above. Note that final_end_function resets
++ the global pointer for us. */
++ reload_completed = 0;
++}
++
++/* Allocate a chunk of memory for per-function machine-dependent data. */
++
++static struct machine_function *
++riscv_init_machine_status (void)
++{
++ return ggc_alloc_cleared_machine_function ();
++}
++
++/* Implement TARGET_OPTION_OVERRIDE. */
++
++static void
++riscv_option_override (void)
++{
++ int regno, mode;
++ const struct riscv_cpu_info *cpu;
++
++#ifdef SUBTARGET_OVERRIDE_OPTIONS
++ SUBTARGET_OVERRIDE_OPTIONS;
++#endif
++
++ flag_pcc_struct_return = 0;
++
++ if (flag_pic)
++ g_switch_value = 0;
++
++ /* Prefer a call to memcpy over inline code when optimizing for size,
++ though see MOVE_RATIO in riscv.h. */
++ if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
++ target_flags |= MASK_MEMCPY;
++
++ /* Handle -mtune. */
++ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
++ RISCV_TUNE_STRING_DEFAULT);
++ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
++
++ /* If the user hasn't specified a branch cost, use the processor's
++ default. */
++ if (riscv_branch_cost == 0)
++ riscv_branch_cost = tune_info->branch_cost;
++
++ /* Set up riscv_hard_regno_mode_ok. */
++ for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
++ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
++ riscv_hard_regno_mode_ok[mode][regno]
++ = riscv_hard_regno_mode_ok_p (regno, (enum machine_mode) mode);
++
++ /* Function to allocate machine-dependent function status. */
++ init_machine_status = &riscv_init_machine_status;
++
++ riscv_init_relocs ();
++}
++
++/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
++
++static void
++riscv_conditional_register_usage (void)
++{
++ int regno;
++
++ if (!TARGET_HARD_FLOAT)
++ {
++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ fixed_regs[regno] = call_used_regs[regno] = 1;
++ }
++}
++
++/* Implement TARGET_TRAMPOLINE_INIT. */
++
++static void
++riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
++{
++ rtx addr, end_addr, mem;
++ rtx trampoline[4];
++ unsigned int i;
++ HOST_WIDE_INT static_chain_offset, target_function_offset;
++
++ /* Work out the offsets of the pointers from the start of the
++ trampoline code. */
++ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
++ static_chain_offset = TRAMPOLINE_CODE_SIZE;
++ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
++
++ /* Get pointers to the beginning and end of the code block. */
++ addr = force_reg (Pmode, XEXP (m_tramp, 0));
++ end_addr = riscv_force_binary (Pmode, PLUS, addr, GEN_INT (TRAMPOLINE_CODE_SIZE));
++
++#define OP(X) gen_int_mode (X, SImode)
++#define MATCH_LREG ((Pmode) == DImode ? MATCH_LD : MATCH_LW)
++
++ /* auipc t0, 0
++ l[wd] t1, target_function_offset(t0)
++ l[wd] $static_chain, static_chain_offset(t0)
++ jr t1
++ */
++
++ trampoline[0] = OP (RISCV_UTYPE (AUIPC, STATIC_CHAIN_REGNUM, 0));
++ trampoline[1] = OP (RISCV_ITYPE (LREG, RISCV_PROLOGUE_TEMP_REGNUM,
++ STATIC_CHAIN_REGNUM, target_function_offset));
++ trampoline[2] = OP (RISCV_ITYPE (LREG, STATIC_CHAIN_REGNUM,
++ STATIC_CHAIN_REGNUM, static_chain_offset));
++ trampoline[3] = OP (RISCV_ITYPE (JALR, 0, RISCV_PROLOGUE_TEMP_REGNUM, 0));
++
++#undef MATCH_LREG
++#undef OP
++
++ /* Copy the trampoline code. Leave any padding uninitialized. */
++ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
++ {
++ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
++ riscv_emit_move (mem, trampoline[i]);
++ }
++
++ /* Set up the static chain pointer field. */
++ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
++ riscv_emit_move (mem, chain_value);
++
++ /* Set up the target function field. */
++ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
++ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
++
++ /* Flush the code part of the trampoline. */
++ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
++ emit_insn (gen_clear_cache (addr, end_addr));
++}
++
++static bool
++riscv_lra_p (void)
++{
++ return riscv_lra_flag;
++}
++
++/* Initialize the GCC target structure. */
++#undef TARGET_ASM_ALIGNED_HI_OP
++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
++#undef TARGET_ASM_ALIGNED_SI_OP
++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
++#undef TARGET_ASM_ALIGNED_DI_OP
++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++
++#undef TARGET_OPTION_OVERRIDE
++#define TARGET_OPTION_OVERRIDE riscv_option_override
++
++#undef TARGET_LEGITIMIZE_ADDRESS
++#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
++
++#undef TARGET_SCHED_ADJUST_COST
++#define TARGET_SCHED_ADJUST_COST riscv_adjust_cost
++#undef TARGET_SCHED_ISSUE_RATE
++#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
++
++#undef TARGET_FUNCTION_OK_FOR_SIBCALL
++#define TARGET_FUNCTION_OK_FOR_SIBCALL hook_bool_tree_tree_true
++
++#undef TARGET_REGISTER_MOVE_COST
++#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
++#undef TARGET_MEMORY_MOVE_COST
++#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS riscv_rtx_costs
++#undef TARGET_ADDRESS_COST
++#define TARGET_ADDRESS_COST riscv_address_cost
++
++#undef TARGET_PREFERRED_RELOAD_CLASS
++#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
++
++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
++
++#undef TARGET_EXPAND_BUILTIN_VA_START
++#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
++
++#undef TARGET_PROMOTE_FUNCTION_MODE
++#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
++
++#undef TARGET_RETURN_IN_MEMORY
++#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
++
++#undef TARGET_ASM_OUTPUT_MI_THUNK
++#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
++
++#undef TARGET_PRINT_OPERAND
++#define TARGET_PRINT_OPERAND riscv_print_operand
++#undef TARGET_PRINT_OPERAND_ADDRESS
++#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
++
++#undef TARGET_SETUP_INCOMING_VARARGS
++#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
++#undef TARGET_STRICT_ARGUMENT_NAMING
++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
++#undef TARGET_FUNCTION_ARG
++#define TARGET_FUNCTION_ARG riscv_function_arg
++#undef TARGET_FUNCTION_ARG_ADVANCE
++#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
++#undef TARGET_FUNCTION_ARG_BOUNDARY
++#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
++
++#undef TARGET_MODE_REP_EXTENDED
++#define TARGET_MODE_REP_EXTENDED riscv_mode_rep_extended
++
++#undef TARGET_SCALAR_MODE_SUPPORTED_P
++#define TARGET_SCALAR_MODE_SUPPORTED_P riscv_scalar_mode_supported_p
++
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS riscv_init_builtins
++#undef TARGET_BUILTIN_DECL
++#define TARGET_BUILTIN_DECL riscv_builtin_decl
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
++
++#undef TARGET_HAVE_TLS
++#define TARGET_HAVE_TLS HAVE_AS_TLS
++
++#undef TARGET_CANNOT_FORCE_CONST_MEM
++#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
++
++#undef TARGET_LEGITIMATE_CONSTANT_P
++#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
++
++#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
++
++#ifdef HAVE_AS_DTPRELWORD
++#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
++#define TARGET_ASM_OUTPUT_DWARF_DTPREL riscv_output_dwarf_dtprel
++#endif
++
++#undef TARGET_LEGITIMATE_ADDRESS_P
++#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
++
++#undef TARGET_CAN_ELIMINATE
++#define TARGET_CAN_ELIMINATE riscv_can_eliminate
++
++#undef TARGET_CONDITIONAL_REGISTER_USAGE
++#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
++
++#undef TARGET_TRAMPOLINE_INIT
++#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
++
++#undef TARGET_IN_SMALL_DATA_P
++#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
++
++#undef TARGET_ASM_SELECT_RTX_SECTION
++#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
++
++#undef TARGET_MIN_ANCHOR_OFFSET
++#define TARGET_MIN_ANCHOR_OFFSET (-RISCV_IMM_REACH/2)
++
++#undef TARGET_MAX_ANCHOR_OFFSET
++#define TARGET_MAX_ANCHOR_OFFSET (RISCV_IMM_REACH/2-1)
++
++#undef TARGET_LRA_P
++#define TARGET_LRA_P riscv_lra_p
++
++struct gcc_target targetm = TARGET_INITIALIZER;
++
++#include "gt-riscv.h"
+diff -urN original-gcc/gcc/config/riscv/riscv-ftypes.def gcc/gcc/config/riscv/riscv-ftypes.def
+--- original-gcc/gcc/config/riscv/riscv-ftypes.def 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv-ftypes.def 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,39 @@
++/* Definitions of prototypes for RISC-V built-in functions.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
++ MIPS built-in functions, where:
++
++ NARGS is the number of arguments.
++ LIST contains the return-type code followed by the codes for each
++ argument type.
++
++ Argument- and return-type codes are either modes or one of the following:
++
++ VOID for void_type_node
++ INT for integer_type_node
++ POINTER for ptr_type_node
++
++ (we don't use PTR because that's a ANSI-compatibillity macro).
++
++ Please keep this list lexicographically sorted by the LIST argument. */
++
++DEF_RISCV_FTYPE (1, (VOID, VOID))
+diff -urN original-gcc/gcc/config/riscv/riscv.h gcc/gcc/config/riscv/riscv.h
+--- original-gcc/gcc/config/riscv/riscv.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv.h 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,1127 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++/* TARGET_HARD_FLOAT and TARGET_SOFT_FLOAT reflect whether the FPU is
++ directly accessible, while the command-line options select
++ TARGET_HARD_FLOAT_ABI and TARGET_SOFT_FLOAT_ABI to reflect the ABI
++ in use. */
++#define TARGET_HARD_FLOAT TARGET_HARD_FLOAT_ABI
++#define TARGET_SOFT_FLOAT TARGET_SOFT_FLOAT_ABI
++
++/* Target CPU builtins. */
++#define TARGET_CPU_CPP_BUILTINS() \
++ do \
++ { \
++ builtin_assert ("machine=riscv"); \
++ \
++ builtin_assert ("cpu=riscv"); \
++ builtin_define ("__riscv__"); \
++ builtin_define ("__riscv"); \
++ builtin_define ("_riscv"); \
++ \
++ if (TARGET_64BIT) \
++ { \
++ builtin_define ("__riscv64"); \
++ builtin_define ("_RISCV_SIM=_ABI64"); \
++ } \
++ else \
++ builtin_define ("_RISCV_SIM=_ABI32"); \
++ \
++ builtin_define ("_ABI32=1"); \
++ builtin_define ("_ABI64=3"); \
++ \
++ \
++ builtin_define_with_int_value ("_RISCV_SZINT", INT_TYPE_SIZE); \
++ builtin_define_with_int_value ("_RISCV_SZLONG", LONG_TYPE_SIZE); \
++ builtin_define_with_int_value ("_RISCV_SZPTR", POINTER_SIZE); \
++ builtin_define_with_int_value ("_RISCV_FPSET", 32); \
++ \
++ if (TARGET_ATOMIC) { \
++ builtin_define ("__riscv_atomic"); \
++ } \
++ \
++ /* These defines reflect the ABI in use, not whether the \
++ FPU is directly accessible. */ \
++ if (TARGET_HARD_FLOAT_ABI) { \
++ builtin_define ("__riscv_hard_float"); \
++ if (TARGET_FDIV) { \
++ builtin_define ("__riscv_fdiv"); \
++ builtin_define ("__riscv_fsqrt"); \
++ } \
++ } else \
++ builtin_define ("__riscv_soft_float"); \
++ \
++ /* The base RISC-V ISA is always little-endian. */ \
++ builtin_define_std ("RISCVEL"); \
++ builtin_define ("_RISCVEL"); \
++ \
++ /* Macros dependent on the C dialect. */ \
++ if (preprocessing_asm_p ()) \
++ { \
++ builtin_define_std ("LANGUAGE_ASSEMBLY"); \
++ builtin_define ("_LANGUAGE_ASSEMBLY"); \
++ } \
++ else if (c_dialect_cxx ()) \
++ { \
++ builtin_define ("_LANGUAGE_C_PLUS_PLUS"); \
++ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \
++ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \
++ } \
++ else \
++ { \
++ builtin_define_std ("LANGUAGE_C"); \
++ builtin_define ("_LANGUAGE_C"); \
++ } \
++ if (c_dialect_objc ()) \
++ { \
++ builtin_define ("_LANGUAGE_OBJECTIVE_C"); \
++ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \
++ /* Bizarre, but needed at least for Irix. */ \
++ builtin_define_std ("LANGUAGE_C"); \
++ builtin_define ("_LANGUAGE_C"); \
++ } \
++ } \
++ while (0)
++
++/* Default target_flags if no switches are specified */
++
++#ifndef TARGET_DEFAULT
++#define TARGET_DEFAULT (TARGET_ATOMIC |
++#endif
++
++#ifndef RISCV_ARCH_STRING_DEFAULT
++#define RISCV_ARCH_STRING_DEFAULT "IMAFD"
++#endif
++
++#ifndef RISCV_TUNE_STRING_DEFAULT
++#define RISCV_TUNE_STRING_DEFAULT "rocket"
++#endif
++
++#ifndef TARGET_64BIT_DEFAULT
++#define TARGET_64BIT_DEFAULT 1
++#endif
++
++#if TARGET_64BIT_DEFAULT
++# define MULTILIB_ARCH_DEFAULT "m64"
++# define OPT_ARCH64 "!m32"
++# define OPT_ARCH32 "m32"
++#else
++# define MULTILIB_ARCH_DEFAULT "m32"
++# define OPT_ARCH64 "m64"
++# define OPT_ARCH32 "!m64"
++#endif
++
++#ifndef MULTILIB_DEFAULTS
++#define MULTILIB_DEFAULTS \
++ { MULTILIB_ARCH_DEFAULT }
++#endif
++
++
++/* Support for a compile-time default CPU, et cetera. The rules are:
++ --with-arch is ignored if -march is specified.
++ --with-tune is ignored if -mtune is specified.
++ --with-float is ignored if -mhard-float or -msoft-float are specified. */
++#define OPTION_DEFAULT_SPECS \
++ {"arch_32", "%{" OPT_ARCH32 ":%{m32}}" }, \
++ {"arch_64", "%{" OPT_ARCH64 ":%{m64}}" }, \
++ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
++ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }, \
++
++#define DRIVER_SELF_SPECS ""
++
++#ifdef IN_LIBGCC2
++#undef TARGET_64BIT
++/* Make this compile time constant for libgcc2 */
++#ifdef __riscv64
++#define TARGET_64BIT 1
++#else
++#define TARGET_64BIT 0
++#endif
++#endif /* IN_LIBGCC2 */
++
++/* Tell collect what flags to pass to nm. */
++#ifndef NM_FLAGS
++#define NM_FLAGS "-Bn"
++#endif
++
++#undef ASM_SPEC
++#define ASM_SPEC "\
++%(subtarget_asm_debugging_spec) \
++%{m32} %{m64} %{!m32:%{!m64: %(asm_abi_default_spec)}} \
++%{fPIC|fpic|fPIE|fpie:-fpic} \
++%{march=*} \
++%(subtarget_asm_spec)"
++
++/* Extra switches sometimes passed to the linker. */
++
++#ifndef LINK_SPEC
++#define LINK_SPEC "\
++%{!T:-dT riscv.ld} \
++%{m64:-melf64lriscv} \
++%{m32:-melf32lriscv} \
++%{shared}"
++#endif /* LINK_SPEC defined */
++
++/* This macro defines names of additional specifications to put in the specs
++ that can be used in various specifications like CC1_SPEC. Its definition
++ is an initializer with a subgrouping for each command option.
++
++ Each subgrouping contains a string constant, that defines the
++ specification name, and a string constant that used by the GCC driver
++ program.
++
++ Do not define this macro if it does not need to do anything. */
++
++#define EXTRA_SPECS \
++ { "asm_abi_default_spec", "-" MULTILIB_ARCH_DEFAULT }, \
++ SUBTARGET_EXTRA_SPECS
++
++#ifndef SUBTARGET_EXTRA_SPECS
++#define SUBTARGET_EXTRA_SPECS
++#endif
++
++/* By default, turn on GDB extensions. */
++#define DEFAULT_GDB_EXTENSIONS 1
++
++#define LOCAL_LABEL_PREFIX "."
++#define USER_LABEL_PREFIX ""
++
++#define DWARF2_DEBUGGING_INFO 1
++#define DWARF2_ASM_LINE_DEBUG_INFO 0
++
++/* The mapping from gcc register number to DWARF 2 CFA column number. */
++#define DWARF_FRAME_REGNUM(REGNO) \
++ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
++
++/* The DWARF 2 CFA column which tracks the return address. */
++#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
++
++/* Don't emit .cfi_sections, as it does not work */
++#undef HAVE_GAS_CFI_SECTIONS_DIRECTIVE
++#define HAVE_GAS_CFI_SECTIONS_DIRECTIVE 0
++
++/* Before the prologue, RA lives in r31. */
++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
++
++/* Describe how we implement __builtin_eh_return. */
++#define EH_RETURN_DATA_REGNO(N) \
++ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
++
++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
++
++/* Target machine storage layout */
++
++#define BITS_BIG_ENDIAN 0
++#define BYTES_BIG_ENDIAN 0
++#define WORDS_BIG_ENDIAN 0
++
++#define MAX_BITS_PER_WORD 64
++
++/* Width of a word, in units (bytes). */
++#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
++#ifndef IN_LIBGCC2
++#define MIN_UNITS_PER_WORD 4
++#endif
++
++/* We currently require both or neither of the `F' and `D' extensions. */
++#define UNITS_PER_FPREG 8
++
++/* If FP regs aren't wide enough for a given FP argument, it is passed in
++ integer registers. */
++#define MIN_FPRS_PER_FMT 1
++
++/* The largest size of value that can be held in floating-point
++ registers and moved with a single instruction. */
++#define UNITS_PER_HWFPVALUE \
++ (TARGET_SOFT_FLOAT_ABI ? 0 : UNITS_PER_FPREG)
++
++/* The largest size of value that can be held in floating-point
++ registers. */
++#define UNITS_PER_FPVALUE \
++ (TARGET_SOFT_FLOAT_ABI ? 0 \
++ : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
++
++/* The number of bytes in a double. */
++#define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
++
++/* Set the sizes of the core types. */
++#define SHORT_TYPE_SIZE 16
++#define INT_TYPE_SIZE 32
++#define LONG_TYPE_SIZE (TARGET_64BIT ? 64 : 32)
++#define LONG_LONG_TYPE_SIZE 64
++
++#define FLOAT_TYPE_SIZE 32
++#define DOUBLE_TYPE_SIZE 64
++/* XXX The ABI says long doubles are IEEE-754-2008 float128s. */
++#define LONG_DOUBLE_TYPE_SIZE 64
++
++#ifdef IN_LIBGCC2
++# define LIBGCC2_LONG_DOUBLE_TYPE_SIZE LONG_DOUBLE_TYPE_SIZE
++#endif
++
++/* Allocation boundary (in *bits*) for storing arguments in argument list. */
++#define PARM_BOUNDARY BITS_PER_WORD
++
++/* Allocation boundary (in *bits*) for the code of a function. */
++#define FUNCTION_BOUNDARY 32
++
++/* There is no point aligning anything to a rounder boundary than this. */
++#define BIGGEST_ALIGNMENT 128
++
++/* All accesses must be aligned. */
++#define STRICT_ALIGNMENT 1
++
++/* Define this if you wish to imitate the way many other C compilers
++ handle alignment of bitfields and the structures that contain
++ them.
++
++ The behavior is that the type written for a bit-field (`int',
++ `short', or other integer type) imposes an alignment for the
++ entire structure, as if the structure really did contain an
++ ordinary field of that type. In addition, the bit-field is placed
++ within the structure so that it would fit within such a field,
++ not crossing a boundary for it.
++
++ Thus, on most machines, a bit-field whose type is written as `int'
++ would not cross a four-byte boundary, and would force four-byte
++ alignment for the whole structure. (The alignment used may not
++ be four bytes; it is controlled by the other alignment
++ parameters.)
++
++ If the macro is defined, its definition should be a C expression;
++ a nonzero value for the expression enables this behavior. */
++
++#define PCC_BITFIELD_TYPE_MATTERS 1
++
++/* If defined, a C expression to compute the alignment given to a
++ constant that is being placed in memory. CONSTANT is the constant
++ and ALIGN is the alignment that the object would ordinarily have.
++ The value of this macro is used instead of that alignment to align
++ the object.
++
++ If this macro is not defined, then ALIGN is used.
++
++ The typical use of this macro is to increase alignment for string
++ constants to be word aligned so that `strcpy' calls that copy
++ constants can be done inline. */
++
++#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
++ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
++
++/* If defined, a C expression to compute the alignment for a static
++ variable. TYPE is the data type, and ALIGN is the alignment that
++ the object would ordinarily have. The value of this macro is used
++ instead of that alignment to align the object.
++
++ If this macro is not defined, then ALIGN is used.
++
++ One use of this macro is to increase alignment of medium-size
++ data to make it all fit in fewer cache lines. Another is to
++ cause character arrays to be word-aligned so that `strcpy' calls
++ that copy constants to character arrays can be done inline. */
++
++#undef DATA_ALIGNMENT
++#define DATA_ALIGNMENT(TYPE, ALIGN) \
++ ((((ALIGN) < BITS_PER_WORD) \
++ && (TREE_CODE (TYPE) == ARRAY_TYPE \
++ || TREE_CODE (TYPE) == UNION_TYPE \
++ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
++
++/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
++ character arrays to be word-aligned so that `strcpy' calls that copy
++ constants to character arrays can be done inline, and 'strcmp' can be
++ optimised to use word loads. */
++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
++ DATA_ALIGNMENT (TYPE, ALIGN)
++
++/* Define if operations between registers always perform the operation
++ on the full register even if a narrower mode is specified. */
++#define WORD_REGISTER_OPERATIONS
++
++/* When in 64-bit mode, move insns will sign extend SImode and CCmode
++ moves. All other references are zero extended. */
++#define LOAD_EXTEND_OP(MODE) \
++ (TARGET_64BIT && ((MODE) == SImode || (MODE) == CCmode) \
++ ? SIGN_EXTEND : ZERO_EXTEND)
++
++/* Define this macro if it is advisable to hold scalars in registers
++ in a wider mode than that declared by the program. In such cases,
++ the value is constrained to be within the bounds of the declared
++ type, but kept valid in the wider mode. The signedness of the
++ extension may differ from that of the type. */
++
++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
++ if (GET_MODE_CLASS (MODE) == MODE_INT \
++ && GET_MODE_SIZE (MODE) < 4) \
++ { \
++ (MODE) = Pmode; \
++ }
++
++/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
++ Extensions of pointers to word_mode must be signed. */
++#define POINTERS_EXTEND_UNSIGNED false
++
++/* RV32 double-precision FP <-> integer moves go through memory */
++#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
++ (!TARGET_64BIT && GET_MODE_SIZE (MODE) == 8 && \
++ (((CLASS1) == FP_REGS && (CLASS2) != FP_REGS) \
++ || ((CLASS2) == FP_REGS && (CLASS1) != FP_REGS)))
++
++/* Define if loading short immediate values into registers sign extends. */
++#define SHORT_IMMEDIATES_SIGN_EXTEND
++
++/* Standard register usage. */
++
++/* Number of hardware registers. We have:
++
++ - 32 integer registers
++ - 32 floating point registers
++ - 32 vector integer registers
++ - 32 vector floating point registers
++ - 2 fake registers:
++ - ARG_POINTER_REGNUM
++ - FRAME_POINTER_REGNUM */
++
++#define FIRST_PSEUDO_REGISTER 66
++
++/* x0, sp, gp, and tp are fixed. */
++
++#define FIXED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ /* Floating-point registers. */ \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++
++/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
++ The call RTLs themselves clobber ra. */
++
++#define CALL_USED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Floating-point registers. */ \
++ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++#define CALL_REALLY_USED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Floating-point registers. */ \
++ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++/* Internal macros to classify an ISA register's type. */
++
++#define GP_REG_FIRST 0
++#define GP_REG_LAST 31
++#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
++
++#define FP_REG_FIRST 32
++#define FP_REG_LAST 63
++#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
++
++/* The DWARF 2 CFA column which tracks the return address from a
++ signal handler context. This means that to maintain backwards
++ compatibility, no hard register can be assigned this column if it
++ would need to be handled by the DWARF unwinder. */
++#define DWARF_ALT_FRAME_RETURN_COLUMN 64
++
++#define GP_REG_P(REGNO) \
++ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
++#define FP_REG_P(REGNO) \
++ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
++
++#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
++
++/* Return coprocessor number from register number. */
++
++#define COPNUM_AS_CHAR_FROM_REGNUM(REGNO) \
++ (COP0_REG_P (REGNO) ? '0' : COP2_REG_P (REGNO) ? '2' \
++ : COP3_REG_P (REGNO) ? '3' : '?')
++
++
++#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
++
++#define HARD_REGNO_MODE_OK(REGNO, MODE) \
++ riscv_hard_regno_mode_ok[ (int)(MODE) ][ (REGNO) ]
++
++#define MODES_TIEABLE_P(MODE1, MODE2) \
++ ((MODE1) == (MODE2) || (GET_MODE_CLASS (MODE1) == MODE_INT \
++ && GET_MODE_CLASS (MODE2) == MODE_INT))
++
++/* Use s0 as the frame pointer if it is so requested. */
++#define HARD_FRAME_POINTER_REGNUM 8
++#define STACK_POINTER_REGNUM 2
++#define THREAD_POINTER_REGNUM 4
++
++/* These two registers don't really exist: they get eliminated to either
++ the stack or hard frame pointer. */
++#define ARG_POINTER_REGNUM 64
++#define FRAME_POINTER_REGNUM 65
++
++#define HARD_FRAME_POINTER_IS_FRAME_POINTER 0
++#define HARD_FRAME_POINTER_IS_ARG_POINTER 0
++
++/* Register in which static-chain is passed to a function. */
++#define STATIC_CHAIN_REGNUM GP_TEMP_FIRST
++
++/* Registers used as temporaries in prologue/epilogue code.
++
++ The prologue registers mustn't conflict with any
++ incoming arguments, the static chain pointer, or the frame pointer.
++ The epilogue temporary mustn't conflict with the return registers,
++ the frame pointer, the EH stack adjustment, or the EH data registers. */
++
++#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
++#define RISCV_EPILOGUE_TEMP_REGNUM RISCV_PROLOGUE_TEMP_REGNUM
++
++#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
++#define RISCV_EPILOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_EPILOGUE_TEMP_REGNUM)
++
++#define FUNCTION_PROFILER(STREAM, LABELNO) \
++{ \
++ sorry ("profiler support for RISC-V"); \
++}
++
++/* Define this macro if it is as good or better to call a constant
++ function address than to call an address kept in a register. */
++#define NO_FUNCTION_CSE 1
++
++/* Define the classes of registers for register constraints in the
++ machine description. Also define ranges of constants.
++
++ One of the classes must always be named ALL_REGS and include all hard regs.
++ If there is more than one class, another class must be named NO_REGS
++ and contain no registers.
++
++ The name GENERAL_REGS must be the name of a class (or an alias for
++ another name such as ALL_REGS). This is the class of registers
++ that is allowed by "g" or "r" in a register constraint.
++ Also, registers outside this class are allocated only when
++ instructions express preferences for them.
++
++ The classes must be numbered in nondecreasing order; that is,
++ a larger-numbered class must never be contained completely
++ in a smaller-numbered class.
++
++ For any two classes, it is very desirable that there be another
++ class that represents their union. */
++
++enum reg_class
++{
++ NO_REGS, /* no registers in set */
++ T_REGS, /* registers used by indirect sibcalls */
++ GR_REGS, /* integer registers */
++ FP_REGS, /* floating point registers */
++ FRAME_REGS, /* $arg and $frame */
++ ALL_REGS, /* all registers */
++ LIM_REG_CLASSES /* max value + 1 */
++};
++
++#define N_REG_CLASSES (int) LIM_REG_CLASSES
++
++#define GENERAL_REGS GR_REGS
++
++/* An initializer containing the names of the register classes as C
++ string constants. These names are used in writing some of the
++ debugging dumps. */
++
++#define REG_CLASS_NAMES \
++{ \
++ "NO_REGS", \
++ "T_REGS", \
++ "GR_REGS", \
++ "FP_REGS", \
++ "FRAME_REGS", \
++ "ALL_REGS" \
++}
++
++/* An initializer containing the contents of the register classes,
++ as integers which are bit masks. The Nth integer specifies the
++ contents of class N. The way the integer MASK is interpreted is
++ that register R is in the class if `MASK & (1 << R)' is 1.
++
++ When the machine has more than 32 registers, an integer does not
++ suffice. Then the integers are replaced by sub-initializers,
++ braced groupings containing several integers. Each
++ sub-initializer must be suitable as an initializer for the type
++ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
++
++#define REG_CLASS_CONTENTS \
++{ \
++ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
++ { 0xf00000e0, 0x00000000, 0x00000000 }, /* T_REGS */ \
++ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
++ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
++ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
++ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
++}
++
++/* A C expression whose value is a register class containing hard
++ register REGNO. In general there is more that one such class;
++ choose a class which is "minimal", meaning that no smaller class
++ also contains the register. */
++
++#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
++
++/* A macro whose definition is the name of the class to which a
++ valid base register must belong. A base register is one used in
++ an address which is the register value plus a displacement. */
++
++#define BASE_REG_CLASS GR_REGS
++
++/* A macro whose definition is the name of the class to which a
++ valid index register must belong. An index register is one used
++ in an address where its value is either multiplied by a scale
++ factor or added to another register (as well as added to a
++ displacement). */
++
++#define INDEX_REG_CLASS NO_REGS
++
++/* We generally want to put call-clobbered registers ahead of
++ call-saved ones. (IRA expects this.) */
++
++#define REG_ALLOC_ORDER \
++{ \
++ /* Call-clobbered GPRs. */ \
++ 15, 14, 13, 12, 11, 10, 16, 17, 5, 6, 7, 28, 29, 30, 31, 1, \
++ /* Call-saved GPRs. */ \
++ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
++ /* GPRs that can never be exposed to the register allocator. */ \
++ 0, 2, 3, 4, \
++ /* Call-clobbered FPRs. */ \
++ 32, 33, 34, 35, 36, 37, 38, 39, 42, 43, 44, 45, 46, 47, 48, 49, \
++ 60, 61, 62, 63, \
++ /* Call-saved FPRs. */ \
++ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
++ /* None of the remaining classes have defined call-saved \
++ registers. */ \
++ 64, 65 \
++}
++
++/* True if VALUE is a signed 16-bit number. */
++
++#include "opcode-riscv.h"
++#define SMALL_OPERAND(VALUE) \
++ ((unsigned HOST_WIDE_INT) (VALUE) + RISCV_IMM_REACH/2 < RISCV_IMM_REACH)
++
++/* True if VALUE can be loaded into a register using LUI. */
++
++#define LUI_OPERAND(VALUE) \
++ (((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) == ((1UL<<31) - RISCV_IMM_REACH) \
++ || ((VALUE) | ((1UL<<31) - RISCV_IMM_REACH)) + RISCV_IMM_REACH == 0)
++
++/* Return a value X with the low 16 bits clear, and such that
++ VALUE - X is a signed 16-bit value. */
++
++#define SMALL_INT(X) SMALL_OPERAND (INTVAL (X))
++#define LUI_INT(X) LUI_OPERAND (INTVAL (X))
++
++/* The HI and LO registers can only be reloaded via the general
++ registers. Condition code registers can only be loaded to the
++ general registers, and from the floating point registers. */
++
++#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, X) \
++ riscv_secondary_reload_class (CLASS, MODE, X, true)
++#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, X) \
++ riscv_secondary_reload_class (CLASS, MODE, X, false)
++
++/* Return the maximum number of consecutive registers
++ needed to represent mode MODE in a register of class CLASS. */
++
++#define CLASS_MAX_NREGS(CLASS, MODE) riscv_class_max_nregs (CLASS, MODE)
++
++/* It is undefined to interpret an FP register in a different format than
++ that which it was created to be. */
++
++#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
++ reg_classes_intersect_p (FP_REGS, CLASS)
++
++/* Stack layout; function entry, exit and calling. */
++
++#define STACK_GROWS_DOWNWARD
++
++#define FRAME_GROWS_DOWNWARD 1
++
++#define STARTING_FRAME_OFFSET 0
++
++#define RETURN_ADDR_RTX riscv_return_addr
++
++#define ELIMINABLE_REGS \
++{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
++
++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
++ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
++
++/* Allocate stack space for arguments at the beginning of each function. */
++#define ACCUMULATE_OUTGOING_ARGS 1
++
++/* The argument pointer always points to the first argument. */
++#define FIRST_PARM_OFFSET(FNDECL) 0
++
++#define REG_PARM_STACK_SPACE(FNDECL) 0
++
++/* Define this if it is the responsibility of the caller to
++ allocate the area reserved for arguments passed in registers.
++ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
++ of this macro is to determine whether the space is included in
++ `crtl->outgoing_args_size'. */
++#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
++
++#define STACK_BOUNDARY 128
++
++/* Symbolic macros for the registers used to return integer and floating
++ point values. */
++
++#define GP_RETURN GP_ARG_FIRST
++#define FP_RETURN ((TARGET_SOFT_FLOAT) ? GP_RETURN : FP_ARG_FIRST)
++
++#define MAX_ARGS_IN_REGISTERS 8
++
++/* Symbolic macros for the first/last argument registers. */
++
++#define GP_ARG_FIRST (GP_REG_FIRST + 10)
++#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
++#define FP_ARG_FIRST (FP_REG_FIRST + 10)
++#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++
++#define LIBCALL_VALUE(MODE) \
++ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
++
++#define FUNCTION_VALUE(VALTYPE, FUNC) \
++ riscv_function_value (VALTYPE, FUNC, VOIDmode)
++
++#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
++
++/* 1 if N is a possible register number for function argument passing.
++ We have no FP argument registers when soft-float. When FP registers
++ are 32 bits, we can't directly reference the odd numbered ones. */
++
++/* Accept arguments in a0-a7 and/or fa0-fa7. */
++#define FUNCTION_ARG_REGNO_P(N) \
++ (IN_RANGE((N), GP_ARG_FIRST, GP_ARG_LAST) \
++ || IN_RANGE((N), FP_ARG_FIRST, FP_ARG_LAST))
++
++/* The ABI views the arguments as a structure, of which the first 8
++ words go in registers and the rest go on the stack. If I < 8, N, the Ith
++ word might go in the Ith integer argument register or the Ith
++ floating-point argument register. */
++
++typedef struct {
++ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
++ unsigned int num_gprs;
++
++ /* Number of words passed on the stack. */
++ unsigned int stack_words;
++} CUMULATIVE_ARGS;
++
++/* Initialize a variable CUM of type CUMULATIVE_ARGS
++ for a call to a function whose data type is FNTYPE.
++ For a library call, FNTYPE is 0. */
++
++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
++ memset (&(CUM), 0, sizeof (CUM))
++
++#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
++
++/* ABI requires 16-byte alignment, even on ven on RV32. */
++#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
++
++#define NO_PROFILE_COUNTERS 1
++
++/* Define this macro if the code for function profiling should come
++ before the function prologue. Normally, the profiling code comes
++ after. */
++
++/* #define PROFILE_BEFORE_PROLOGUE */
++
++/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
++ the stack pointer does not matter. The value is tested only in
++ functions that have frame pointers.
++ No definition is equivalent to always zero. */
++
++#define EXIT_IGNORE_STACK 1
++
++
++/* Trampolines are a block of code followed by two pointers. */
++
++#define TRAMPOLINE_CODE_SIZE 16
++#define TRAMPOLINE_SIZE (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2)
++#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
++
++/* Addressing modes, and classification of registers for them. */
++
++#define REGNO_OK_FOR_INDEX_P(REGNO) 0
++#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
++
++/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
++ and check its validity for a certain class.
++ We have two alternate definitions for each of them.
++ The usual definition accepts all pseudo regs; the other rejects them all.
++ The symbol REG_OK_STRICT causes the latter definition to be used.
++
++ Most source files want to accept pseudo regs in the hope that
++ they will get allocated to the class that the insn wants them to be in.
++ Some source files that are used after register allocation
++ need to be strict. */
++
++#ifndef REG_OK_STRICT
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
++#else
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
++#endif
++
++#define REG_OK_FOR_INDEX_P(X) 0
++
++
++/* Maximum number of registers that can appear in a valid memory address. */
++
++#define MAX_REGS_PER_ADDRESS 1
++
++#define CONSTANT_ADDRESS_P(X) \
++ (CONSTANT_P (X) && memory_address_p (SImode, X))
++
++/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
++ 'the start of the function that this code is output in'. */
++
++#define ASM_OUTPUT_LABELREF(FILE,NAME) \
++ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
++ asm_fprintf ((FILE), "%U%s", \
++ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
++ else \
++ asm_fprintf ((FILE), "%U%s", (NAME))
++
++/* This flag marks functions that cannot be lazily bound. */
++#define SYMBOL_FLAG_BIND_NOW (SYMBOL_FLAG_MACH_DEP << 1)
++#define SYMBOL_REF_BIND_NOW_P(RTX) \
++ ((SYMBOL_REF_FLAGS (RTX) & SYMBOL_FLAG_BIND_NOW) != 0)
++
++#define JUMP_TABLES_IN_TEXT_SECTION 0
++#define CASE_VECTOR_MODE SImode
++
++/* Define this as 1 if `char' should by default be signed; else as 0. */
++#define DEFAULT_SIGNED_CHAR 0
++
++/* Consider using fld/fsd to move 8 bytes at a time for RV32IFD. */
++#define MOVE_MAX UNITS_PER_WORD
++#define MAX_MOVE_MAX 8
++
++#define SLOW_BYTE_ACCESS 0
++
++#define SHIFT_COUNT_TRUNCATED 1
++
++/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
++ is done just by pretending it is already truncated. */
++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) \
++ (TARGET_64BIT ? ((INPREC) <= 32 || (OUTPREC) < 32) : 1)
++
++/* Specify the machine mode that pointers have.
++ After generation of rtl, the compiler makes no further distinction
++ between pointers and any other objects of this machine mode. */
++
++#ifndef Pmode
++#define Pmode (TARGET_64BIT ? DImode : SImode)
++#endif
++
++/* Give call MEMs SImode since it is the "most permissive" mode
++ for both 32-bit and 64-bit targets. */
++
++#define FUNCTION_MODE SImode
++
++/* A C expression for the cost of a branch instruction. A value of 2
++ seems to minimize code size. */
++
++#define BRANCH_COST(speed_p, predictable_p) \
++ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
++
++#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
++
++/* Control the assembler format that we output. */
++
++/* Output to assembler file text saying following lines
++ may contain character constants, extra white space, comments, etc. */
++
++#ifndef ASM_APP_ON
++#define ASM_APP_ON " #APP\n"
++#endif
++
++/* Output to assembler file text saying following lines
++ no longer contain unusual constructs. */
++
++#ifndef ASM_APP_OFF
++#define ASM_APP_OFF " #NO_APP\n"
++#endif
++
++#define REGISTER_NAMES \
++{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
++ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
++ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
++ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
++ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
++ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
++ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
++ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
++ "arg", "frame", }
++
++#define ADDITIONAL_REGISTER_NAMES \
++{ \
++ { "x0", 0 + GP_REG_FIRST }, \
++ { "x1", 1 + GP_REG_FIRST }, \
++ { "x2", 2 + GP_REG_FIRST }, \
++ { "x3", 3 + GP_REG_FIRST }, \
++ { "x4", 4 + GP_REG_FIRST }, \
++ { "x5", 5 + GP_REG_FIRST }, \
++ { "x6", 6 + GP_REG_FIRST }, \
++ { "x7", 7 + GP_REG_FIRST }, \
++ { "x8", 8 + GP_REG_FIRST }, \
++ { "x9", 9 + GP_REG_FIRST }, \
++ { "x10", 10 + GP_REG_FIRST }, \
++ { "x11", 11 + GP_REG_FIRST }, \
++ { "x12", 12 + GP_REG_FIRST }, \
++ { "x13", 13 + GP_REG_FIRST }, \
++ { "x14", 14 + GP_REG_FIRST }, \
++ { "x15", 15 + GP_REG_FIRST }, \
++ { "x16", 16 + GP_REG_FIRST }, \
++ { "x17", 17 + GP_REG_FIRST }, \
++ { "x18", 18 + GP_REG_FIRST }, \
++ { "x19", 19 + GP_REG_FIRST }, \
++ { "x20", 20 + GP_REG_FIRST }, \
++ { "x21", 21 + GP_REG_FIRST }, \
++ { "x22", 22 + GP_REG_FIRST }, \
++ { "x23", 23 + GP_REG_FIRST }, \
++ { "x24", 24 + GP_REG_FIRST }, \
++ { "x25", 25 + GP_REG_FIRST }, \
++ { "x26", 26 + GP_REG_FIRST }, \
++ { "x27", 27 + GP_REG_FIRST }, \
++ { "x28", 28 + GP_REG_FIRST }, \
++ { "x29", 29 + GP_REG_FIRST }, \
++ { "x30", 30 + GP_REG_FIRST }, \
++ { "x31", 31 + GP_REG_FIRST }, \
++ { "f0", 0 + FP_REG_FIRST }, \
++ { "f1", 1 + FP_REG_FIRST }, \
++ { "f2", 2 + FP_REG_FIRST }, \
++ { "f3", 3 + FP_REG_FIRST }, \
++ { "f4", 4 + FP_REG_FIRST }, \
++ { "f5", 5 + FP_REG_FIRST }, \
++ { "f6", 6 + FP_REG_FIRST }, \
++ { "f7", 7 + FP_REG_FIRST }, \
++ { "f8", 8 + FP_REG_FIRST }, \
++ { "f9", 9 + FP_REG_FIRST }, \
++ { "f10", 10 + FP_REG_FIRST }, \
++ { "f11", 11 + FP_REG_FIRST }, \
++ { "f12", 12 + FP_REG_FIRST }, \
++ { "f13", 13 + FP_REG_FIRST }, \
++ { "f14", 14 + FP_REG_FIRST }, \
++ { "f15", 15 + FP_REG_FIRST }, \
++ { "f16", 16 + FP_REG_FIRST }, \
++ { "f17", 17 + FP_REG_FIRST }, \
++ { "f18", 18 + FP_REG_FIRST }, \
++ { "f19", 19 + FP_REG_FIRST }, \
++ { "f20", 20 + FP_REG_FIRST }, \
++ { "f21", 21 + FP_REG_FIRST }, \
++ { "f22", 22 + FP_REG_FIRST }, \
++ { "f23", 23 + FP_REG_FIRST }, \
++ { "f24", 24 + FP_REG_FIRST }, \
++ { "f25", 25 + FP_REG_FIRST }, \
++ { "f26", 26 + FP_REG_FIRST }, \
++ { "f27", 27 + FP_REG_FIRST }, \
++ { "f28", 28 + FP_REG_FIRST }, \
++ { "f29", 29 + FP_REG_FIRST }, \
++ { "f30", 30 + FP_REG_FIRST }, \
++ { "f31", 31 + FP_REG_FIRST }, \
++}
++
++/* Globalizing directive for a label. */
++#define GLOBAL_ASM_OP "\t.globl\t"
++
++/* This is how to store into the string LABEL
++ the symbol_ref name of an internal numbered label where
++ PREFIX is the class of label and NUM is the number within the class.
++ This is suitable for output with `assemble_name'. */
++
++#undef ASM_GENERATE_INTERNAL_LABEL
++#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
++ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
++
++/* This is how to output an element of a case-vector that is absolute. */
++
++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
++ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
++
++/* This is how to output an element of a PIC case-vector. */
++
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
++ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
++ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
++
++/* This is how to output an assembler line
++ that says to advance the location counter
++ to a multiple of 2**LOG bytes. */
++
++#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
++ fprintf (STREAM, "\t.align\t%d\n", (LOG))
++
++/* Define the strings to put out for each section in the object file. */
++#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
++#define DATA_SECTION_ASM_OP "\t.data" /* large data */
++#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
++#define BSS_SECTION_ASM_OP "\t.bss"
++#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
++#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
++
++#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
++do \
++ { \
++ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM], \
++ TARGET_64BIT ? "sd" : "sw", \
++ reg_names[REGNO], \
++ reg_names[STACK_POINTER_REGNUM]); \
++ } \
++while (0)
++
++#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
++do \
++ { \
++ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
++ TARGET_64BIT ? "ld" : "lw", \
++ reg_names[REGNO], \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM]); \
++ } \
++while (0)
++
++#define ASM_COMMENT_START "#"
++
++#undef SIZE_TYPE
++#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
++
++#undef PTRDIFF_TYPE
++#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
++
++/* The maximum number of bytes that can be copied by one iteration of
++ a movmemsi loop; see riscv_block_move_loop. */
++#define RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
++
++/* The maximum number of bytes that can be copied by a straight-line
++ implementation of movmemsi; see riscv_block_move_straight. We want
++ to make sure that any loop-based implementation will iterate at
++ least twice. */
++#define RISCV_MAX_MOVE_BYTES_STRAIGHT (RISCV_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
++
++/* The base cost of a memcpy call, for MOVE_RATIO and friends. */
++
++#define RISCV_CALL_RATIO 6
++
++/* Any loop-based implementation of movmemsi will have at least
++ RISCV_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
++ moves, so allow individual copies of fewer elements.
++
++ When movmemsi is not available, use a value approximating
++ the length of a memcpy call sequence, so that move_by_pieces
++ will generate inline code if it is shorter than a function call.
++ Since move_by_pieces_ninsns counts memory-to-memory moves, but
++ we'll have to generate a load/store pair for each, halve the
++ value of RISCV_CALL_RATIO to take that into account. */
++
++#define MOVE_RATIO(speed) \
++ (HAVE_movmemsi \
++ ? RISCV_MAX_MOVE_BYTES_STRAIGHT / MOVE_MAX \
++ : RISCV_CALL_RATIO / 2)
++
++/* movmemsi is meant to generate code that is at least as good as
++ move_by_pieces. However, movmemsi effectively uses a by-pieces
++ implementation both for moves smaller than a word and for word-aligned
++ moves of no more than RISCV_MAX_MOVE_BYTES_STRAIGHT bytes. We should
++ allow the tree-level optimisers to do such moves by pieces, as it
++ often exposes other optimization opportunities. We might as well
++ continue to use movmemsi at the rtl level though, as it produces
++ better code when scheduling is disabled (such as at -O). */
++
++#define MOVE_BY_PIECES_P(SIZE, ALIGN) \
++ (HAVE_movmemsi \
++ ? (!currently_expanding_to_rtl \
++ && ((ALIGN) < BITS_PER_WORD \
++ ? (SIZE) < UNITS_PER_WORD \
++ : (SIZE) <= RISCV_MAX_MOVE_BYTES_STRAIGHT)) \
++ : (move_by_pieces_ninsns (SIZE, ALIGN, MOVE_MAX_PIECES + 1) \
++ < (unsigned int) MOVE_RATIO (false)))
++
++/* For CLEAR_RATIO, when optimizing for size, give a better estimate
++ of the length of a memset call, but use the default otherwise. */
++
++#define CLEAR_RATIO(speed)\
++ ((speed) ? 15 : RISCV_CALL_RATIO)
++
++/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
++ optimizing for size adjust the ratio to account for the overhead of
++ loading the constant and replicating it across the word. */
++
++#define SET_RATIO(speed) \
++ ((speed) ? 15 : RISCV_CALL_RATIO - 2)
++
++/* STORE_BY_PIECES_P can be used when copying a constant string, but
++ in that case each word takes 3 insns (lui, ori, sw), or more in
++ 64-bit mode, instead of 2 (lw, sw). For now we always fail this
++ and let the move_by_pieces code copy the string from read-only
++ memory. In the future, this could be tuned further for multi-issue
++ CPUs that can issue stores down one pipe and arithmetic instructions
++ down another; in that case, the lui/ori/sw combination would be a
++ win for long enough strings. */
++
++#define STORE_BY_PIECES_P(SIZE, ALIGN) 0
++
++#ifndef HAVE_AS_TLS
++#define HAVE_AS_TLS 0
++#endif
++
++#ifndef USED_FOR_TARGET
++
++extern const enum reg_class riscv_regno_to_class[];
++extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
++extern const char* riscv_hi_relocs[];
++#endif
++
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
++ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
+diff -urN original-gcc/gcc/config/riscv/riscv.md gcc/gcc/config/riscv/riscv.md
+--- original-gcc/gcc/config/riscv/riscv.md 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv.md 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,2423 @@
++;; Machine description for RISC-V for GNU compiler.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_c_enum "unspec" [
++ ;; Floating-point moves.
++ UNSPEC_LOAD_LOW
++ UNSPEC_LOAD_HIGH
++ UNSPEC_STORE_WORD
++
++ ;; GP manipulation.
++ UNSPEC_EH_RETURN
++
++ ;; Symbolic accesses.
++ UNSPEC_ADDRESS_FIRST
++ UNSPEC_LOAD_GOT
++ UNSPEC_TLS
++ UNSPEC_TLS_LE
++ UNSPEC_TLS_IE
++ UNSPEC_TLS_GD
++
++ ;; Blockage and synchronisation.
++ UNSPEC_BLOCKAGE
++ UNSPEC_FENCE
++ UNSPEC_FENCE_I
++])
++
++(define_constants
++ [(RETURN_ADDR_REGNUM 1)
++])
++
++(include "predicates.md")
++(include "constraints.md")
++
++;; ....................
++;;
++;; Attributes
++;;
++;; ....................
++
++(define_attr "got" "unset,xgot_high,load"
++ (const_string "unset"))
++
++;; For jal instructions, this attribute is DIRECT when the target address
++;; is symbolic and INDIRECT when it is a register.
++(define_attr "jal" "unset,direct,indirect"
++ (const_string "unset"))
++
++;; Classification of moves, extensions and truncations. Most values
++;; are as for "type" (see below) but there are also the following
++;; move-specific values:
++;;
++;; andi a single ANDI instruction
++;; shift_shift a shift left followed by a shift right
++;;
++;; This attribute is used to determine the instruction's length and
++;; scheduling type. For doubleword moves, the attribute always describes
++;; the split instructions; in some cases, it is more appropriate for the
++;; scheduling type to be "multi" instead.
++(define_attr "move_type"
++ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
++ const,logical,arith,andi,shift_shift"
++ (const_string "unknown"))
++
++(define_attr "alu_type" "unknown,add,sub,and,or,xor"
++ (const_string "unknown"))
++
++;; Main data type used by the insn
++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FPSW"
++ (const_string "unknown"))
++
++;; True if the main data type is twice the size of a word.
++(define_attr "dword_mode" "no,yes"
++ (cond [(and (eq_attr "mode" "DI,DF")
++ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
++ (const_string "yes")
++
++ (and (eq_attr "mode" "TI,TF")
++ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
++ (const_string "yes")]
++ (const_string "no")))
++
++;; Classification of each insn.
++;; branch conditional branch
++;; jump unconditional jump
++;; call unconditional call
++;; load load instruction(s)
++;; fpload floating point load
++;; fpidxload floating point indexed load
++;; store store instruction(s)
++;; fpstore floating point store
++;; fpidxstore floating point indexed store
++;; mtc transfer to coprocessor
++;; mfc transfer from coprocessor
++;; const load constant
++;; arith integer arithmetic instructions
++;; logical integer logical instructions
++;; shift integer shift instructions
++;; slt set less than instructions
++;; imul integer multiply
++;; idiv integer divide
++;; move integer register move (addi rd, rs1, 0)
++;; fmove floating point register move
++;; fadd floating point add/subtract
++;; fmul floating point multiply
++;; fmadd floating point multiply-add
++;; fdiv floating point divide
++;; fcmp floating point compare
++;; fcvt floating point convert
++;; fsqrt floating point square root
++;; multi multiword sequence (or user asm statements)
++;; nop no operation
++;; ghost an instruction that produces no real code
++(define_attr "type"
++ "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
++ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
++ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
++ (cond [(eq_attr "jal" "!unset") (const_string "call")
++ (eq_attr "got" "load") (const_string "load")
++
++ (eq_attr "alu_type" "add,sub") (const_string "arith")
++
++ (eq_attr "alu_type" "and,or,xor") (const_string "logical")
++
++ ;; If a doubleword move uses these expensive instructions,
++ ;; it is usually better to schedule them in the same way
++ ;; as the singleword form, rather than as "multi".
++ (eq_attr "move_type" "load") (const_string "load")
++ (eq_attr "move_type" "fpload") (const_string "fpload")
++ (eq_attr "move_type" "store") (const_string "store")
++ (eq_attr "move_type" "fpstore") (const_string "fpstore")
++ (eq_attr "move_type" "mtc") (const_string "mtc")
++ (eq_attr "move_type" "mfc") (const_string "mfc")
++
++ ;; These types of move are always single insns.
++ (eq_attr "move_type" "fmove") (const_string "fmove")
++ (eq_attr "move_type" "arith") (const_string "arith")
++ (eq_attr "move_type" "logical") (const_string "logical")
++ (eq_attr "move_type" "andi") (const_string "logical")
++
++ ;; These types of move are always split.
++ (eq_attr "move_type" "shift_shift")
++ (const_string "multi")
++
++ ;; These types of move are split for doubleword modes only.
++ (and (eq_attr "move_type" "move,const")
++ (eq_attr "dword_mode" "yes"))
++ (const_string "multi")
++ (eq_attr "move_type" "move") (const_string "move")
++ (eq_attr "move_type" "const") (const_string "const")]
++ (const_string "unknown")))
++
++;; Mode for conversion types (fcvt)
++;; I2S integer to float single (SI/DI to SF)
++;; I2D integer to float double (SI/DI to DF)
++;; S2I float to integer (SF to SI/DI)
++;; D2I float to integer (DF to SI/DI)
++;; D2S double to float single
++;; S2D float single to double
++
++(define_attr "cnv_mode" "unknown,I2S,I2D,S2I,D2I,D2S,S2D"
++ (const_string "unknown"))
++
++;; Length of instruction in bytes.
++(define_attr "length" ""
++ (cond [
++ ;; Direct branch instructions have a range of [-0x1000,0xffc],
++ ;; relative to the address of the delay slot. If a branch is
++ ;; outside this range, convert a branch like:
++ ;;
++ ;; bne r1,r2,target
++ ;;
++ ;; to:
++ ;;
++ ;; beq r1,r2,1f
++ ;; j target
++ ;; 1:
++ ;;
++ (eq_attr "type" "branch")
++ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
++ (le (minus (pc) (match_dup 0)) (const_int 4092)))
++ (const_int 4)
++ (const_int 8))
++
++ ;; Conservatively assume calls take two instructions, as in:
++ ;; auipc t0, %pcrel_hi(target)
++ ;; jalr ra, t0, %lo(target)
++ ;; The linker will relax these into JAL when appropriate.
++ (eq_attr "type" "call")
++ (const_int 8)
++
++ ;; "Ghost" instructions occupy no space.
++ (eq_attr "type" "ghost")
++ (const_int 0)
++
++ (eq_attr "got" "load") (const_int 8)
++
++ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
++ (eq_attr "move_type" "shift_shift")
++ (const_int 8)
++
++ ;; Check for doubleword moves that are decomposed into two
++ ;; instructions.
++ (and (eq_attr "move_type" "mtc,mfc,move")
++ (eq_attr "dword_mode" "yes"))
++ (const_int 8)
++
++ ;; Doubleword CONST{,N} moves are split into two word
++ ;; CONST{,N} moves.
++ (and (eq_attr "move_type" "const")
++ (eq_attr "dword_mode" "yes"))
++ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
++
++ ;; Otherwise, constants, loads and stores are handled by external
++ ;; routines.
++ (eq_attr "move_type" "load,fpload")
++ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
++ (eq_attr "move_type" "store,fpstore")
++ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
++ ] (const_int 4)))
++
++;; Describe a user's asm statement.
++(define_asm_attributes
++ [(set_attr "type" "multi")])
++
++;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
++;; from the same template.
++(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
++(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
++
++;; A copy of GPR that can be used when a pattern has two independent
++;; modes.
++(define_mode_iterator GPR2 [SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :P to be used for patterns that operate on
++;; pointer-sized quantities. Exactly one of the two alternatives will match.
++(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
++
++;; 32-bit integer moves for which we provide move patterns.
++(define_mode_iterator IMOVE32 [SI])
++
++;; 64-bit modes for which we provide move patterns.
++(define_mode_iterator MOVE64 [DI DF])
++
++;; 128-bit modes for which we provide move patterns on 64-bit targets.
++(define_mode_iterator MOVE128 [TI TF])
++
++;; This mode iterator allows the QI and HI extension patterns to be
++;; defined from the same template.
++(define_mode_iterator SHORT [QI HI])
++
++;; Likewise the 64-bit truncate-and-shift patterns.
++(define_mode_iterator SUBDI [QI HI SI])
++(define_mode_iterator HISI [HI SI])
++(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :ANYF to be used wherever a scalar or vector
++;; floating-point mode is allowed.
++(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
++ (DF "TARGET_HARD_FLOAT")])
++(define_mode_iterator ANYIF [QI HI SI (DI "TARGET_64BIT")
++ (SF "TARGET_HARD_FLOAT")
++ (DF "TARGET_HARD_FLOAT")])
++
++;; Like ANYF, but only applies to scalar modes.
++(define_mode_iterator SCALARF [(SF "TARGET_HARD_FLOAT")
++ (DF "TARGET_HARD_FLOAT")])
++
++;; A floating-point mode for which moves involving FPRs may need to be split.
++(define_mode_iterator SPLITF
++ [(DF "!TARGET_64BIT")
++ (DI "!TARGET_64BIT")
++ (TF "TARGET_64BIT")])
++
++;; This attribute gives the length suffix for a sign- or zero-extension
++;; instruction.
++(define_mode_attr size [(QI "b") (HI "h")])
++
++;; Mode attributes for loads.
++(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
++
++;; Instruction names for stores.
++(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
++
++;; This attribute gives the best constraint to use for registers of
++;; a given mode.
++(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
++
++;; This attribute gives the format suffix for floating-point operations.
++(define_mode_attr fmt [(SF "s") (DF "d")])
++
++;; This attribute gives the format suffix for atomic memory operations.
++(define_mode_attr amo [(SI "w") (DI "d")])
++
++;; This attribute gives the upper-case mode name for one unit of a
++;; floating-point mode.
++(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
++
++;; This attribute gives the integer mode that has half the size of
++;; the controlling mode.
++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
++
++;; This code iterator allows signed and unsigned widening multiplications
++;; to use the same template.
++(define_code_iterator any_extend [sign_extend zero_extend])
++
++;; This code iterator allows the two right shift instructions to be
++;; generated from the same template.
++(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
++
++;; This code iterator allows the three shift instructions to be generated
++;; from the same template.
++(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
++
++;; This code iterator allows unsigned and signed division to be generated
++;; from the same template.
++(define_code_iterator any_div [div udiv])
++
++;; This code iterator allows unsigned and signed modulus to be generated
++;; from the same template.
++(define_code_iterator any_mod [mod umod])
++
++;; These code iterators allow the signed and unsigned scc operations to use
++;; the same template.
++(define_code_iterator any_gt [gt gtu])
++(define_code_iterator any_ge [ge geu])
++(define_code_iterator any_lt [lt ltu])
++(define_code_iterator any_le [le leu])
++
++;; <u> expands to an empty string when doing a signed operation and
++;; "u" when doing an unsigned operation.
++(define_code_attr u [(sign_extend "") (zero_extend "u")
++ (div "") (udiv "u")
++ (mod "") (umod "u")
++ (gt "") (gtu "u")
++ (ge "") (geu "u")
++ (lt "") (ltu "u")
++ (le "") (leu "u")])
++
++;; <su> is like <u>, but the signed form expands to "s" rather than "".
++(define_code_attr su [(sign_extend "s") (zero_extend "u")])
++
++;; <optab> expands to the name of the optab for a particular code.
++(define_code_attr optab [(ashift "ashl")
++ (ashiftrt "ashr")
++ (lshiftrt "lshr")
++ (ior "ior")
++ (xor "xor")
++ (and "and")
++ (plus "add")
++ (minus "sub")])
++
++;; <insn> expands to the name of the insn that implements a particular code.
++(define_code_attr insn [(ashift "sll")
++ (ashiftrt "sra")
++ (lshiftrt "srl")
++ (ior "or")
++ (xor "xor")
++ (and "and")
++ (plus "add")
++ (minus "sub")])
++
++;; Pipeline descriptions.
++;;
++;; generic.md provides a fallback for processors without a specific
++;; pipeline description. It is derived from the old define_function_unit
++;; version and uses the "alu" and "imuldiv" units declared below.
++;;
++;; Some of the processor-specific files are also derived from old
++;; define_function_unit descriptions and simply override the parts of
++;; generic.md that don't apply. The other processor-specific files
++;; are self-contained.
++(define_automaton "alu,imuldiv")
++
++(define_cpu_unit "alu" "alu")
++(define_cpu_unit "imuldiv" "imuldiv")
++
++;; Ghost instructions produce no real code and introduce no hazards.
++;; They exist purely to express an effect on dataflow.
++(define_insn_reservation "ghost" 0
++ (eq_attr "type" "ghost")
++ "nothing")
++
++(include "generic.md")
++
++;;
++;; ....................
++;;
++;; ADDITION
++;;
++;; ....................
++;;
++
++(define_insn "add<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ ""
++ "fadd.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "add<mode>3"
++ [(set (match_operand:GPR 0 "register_operand")
++ (plus:GPR (match_operand:GPR 1 "register_operand")
++ (match_operand:GPR 2 "arith_operand")))]
++ "")
++
++(define_insn "*addsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (match_operand:GPR 1 "register_operand" "r,r")
++ (match_operand:GPR2 2 "arith_operand" "r,Q")))]
++ ""
++ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddi3"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++ (match_operand:DI 2 "arith_operand" "r,Q")))]
++ "TARGET_64BIT"
++ "add\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*addsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI
++ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
++ (truncate:SI (match_operand:DI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddisisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (truncate:SI (match_operand:DI 1 "register_operand" "r,r"))
++ (match_operand:SI 2 "arith_operand" "r,Q")))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*adddi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (truncate:SI
++ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++ (match_operand:DI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; SUBTRACTION
++;;
++;; ....................
++;;
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ ""
++ "fsub.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "sub<mode>3"
++ [(set (match_operand:GPR 0 "register_operand")
++ (minus:GPR (match_operand:GPR 1 "reg_or_0_operand")
++ (match_operand:GPR 2 "register_operand")))]
++ "")
++
++(define_insn "*subdi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_64BIT"
++ "sub\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*subsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (match_operand:GPR 1 "reg_or_0_operand" "rJ")
++ (match_operand:GPR2 2 "register_operand" "r")))]
++ ""
++ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*subdisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
++ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subdisisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (truncate:SI (match_operand:DI 1 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subsidisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subdi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (truncate:SI
++ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,r")
++ (match_operand:DI 2 "arith_operand" "r,Q"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; MULTIPLICATION
++;;
++;; ....................
++;;
++
++(define_insn "mul<mode>3"
++ [(set (match_operand:SCALARF 0 "register_operand" "=f")
++ (mult:SCALARF (match_operand:SCALARF 1 "register_operand" "f")
++ (match_operand:SCALARF 2 "register_operand" "f")))]
++ ""
++ "fmul.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmul")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_expand "mul<mode>3"
++ [(set (match_operand:GPR 0 "register_operand")
++ (mult:GPR (match_operand:GPR 1 "reg_or_0_operand")
++ (match_operand:GPR 2 "register_operand")))]
++ "TARGET_MULDIV")
++
++(define_insn "*mulsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR2 2 "register_operand" "r")))]
++ "TARGET_MULDIV"
++ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*muldisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
++ (truncate:SI (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "mulw\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*muldi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (mult:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r"))))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "mulw\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*muldi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "mul\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++;;
++;; ........................
++;;
++;; MULTIPLICATION HIGH-PART
++;;
++;; ........................
++;;
++
++
++;; Using a clobber here is ghetto, but I'm not smart enough to do better. '
++(define_insn_and_split "<u>mulditi3"
++ [(set (match_operand:TI 0 "register_operand" "=r")
++ (mult:TI (any_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (any_extend:TI
++ (match_operand:DI 2 "register_operand" "r"))))
++ (clobber (match_scratch:DI 3 "=r"))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "#"
++ "reload_completed"
++ [
++ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
++ (set (match_dup 4) (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (any_extend:TI (match_dup 1))
++ (any_extend:TI (match_dup 2)))
++ (const_int 64))))
++ (set (match_dup 5) (match_dup 3))
++ ]
++{
++ operands[4] = riscv_subword (operands[0], true);
++ operands[5] = riscv_subword (operands[0], false);
++}
++ )
++
++(define_insn "<u>muldi3_highpart"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (any_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (any_extend:TI
++ (match_operand:DI 2 "register_operand" "r")))
++ (const_int 64))))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "mulh<u>\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++
++(define_insn_and_split "usmulditi3"
++ [(set (match_operand:TI 0 "register_operand" "=r")
++ (mult:TI (zero_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (sign_extend:TI
++ (match_operand:DI 2 "register_operand" "r"))))
++ (clobber (match_scratch:DI 3 "=r"))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "#"
++ "reload_completed"
++ [
++ (set (match_dup 3) (mult:DI (match_dup 1) (match_dup 2)))
++ (set (match_dup 4) (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (zero_extend:TI (match_dup 1))
++ (sign_extend:TI (match_dup 2)))
++ (const_int 64))))
++ (set (match_dup 5) (match_dup 3))
++ ]
++{
++ operands[4] = riscv_subword (operands[0], true);
++ operands[5] = riscv_subword (operands[0], false);
++}
++ )
++
++(define_insn "usmuldi3_highpart"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (zero_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (sign_extend:TI
++ (match_operand:DI 2 "register_operand" "r")))
++ (const_int 64))))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "mulhsu\t%0,%2,%1"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++(define_expand "<u>mulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (any_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (any_extend:DI
++ (match_operand:SI 2 "register_operand" "r"))))
++ (clobber (match_scratch:SI 3 "=r"))]
++ "TARGET_MULDIV && !TARGET_64BIT"
++{
++ rtx temp = gen_reg_rtx (SImode);
++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++ emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
++ operands[1], operands[2]));
++ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++ DONE;
++}
++ )
++
++(define_insn "<u>mulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (any_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (any_extend:DI
++ (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_MULDIV && !TARGET_64BIT"
++ "mulh<u>\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++
++(define_expand "usmulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (zero_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI
++ (match_operand:SI 2 "register_operand" "r"))))
++ (clobber (match_scratch:SI 3 "=r"))]
++ "TARGET_MULDIV && !TARGET_64BIT"
++{
++ rtx temp = gen_reg_rtx (SImode);
++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
++ operands[1], operands[2]));
++ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++ DONE;
++}
++ )
++
++(define_insn "usmulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (zero_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI
++ (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_MULDIV && !TARGET_64BIT"
++ "mulhsu\t%0,%2,%1"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; DIVISION and REMAINDER
++;;
++;; ....................
++;;
++
++(define_insn "<u>divsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_MULDIV"
++ { return TARGET_64BIT ? "div<u>w\t%0,%1,%2" : "div<u>\t%0,%1,%2"; }
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "SI")])
++
++(define_insn "<u>divdi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_div:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "div<u>\t%0,%1,%2"
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "DI")])
++
++(define_insn "<u>modsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_mod:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_MULDIV"
++ { return TARGET_64BIT ? "rem<u>w\t%0,%1,%2" : "rem<u>\t%0,%1,%2"; }
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "SI")])
++
++(define_insn "<u>moddi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_mod:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_MULDIV && TARGET_64BIT"
++ "rem<u>\t%0,%1,%2"
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "DI")])
++
++(define_insn "div<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_FDIV"
++ "fdiv.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fdiv")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; SQUARE ROOT
++;;
++;; ....................
++
++(define_insn "sqrt<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_FDIV"
++{
++ return "fsqrt.<fmt>\t%0,%1";
++}
++ [(set_attr "type" "fsqrt")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; Floating point multiply accumulate instructions.
++
++(define_insn "fma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "fms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT"
++ "fmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "nfma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT"
++ "fnmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "nfms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
++ "TARGET_HARD_FLOAT"
++ "fnmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; modulo signed zeros, -(a*b+c) == -c-a*b
++(define_insn "*nfma<mode>4_fastmath"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF
++ (match_operand:ANYF 3 "register_operand" "f")
++ (mult:ANYF
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++ (match_operand:ANYF 2 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fnmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; modulo signed zeros, -(a*b-c) == c-a*b
++(define_insn "*nfms<mode>4_fastmath"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF
++ (match_operand:ANYF 3 "register_operand" "f")
++ (mult:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fnmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; ABSOLUTE VALUE
++;;
++;; ....................
++
++(define_insn "abs<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fabs.<fmt>\t%0,%1"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++
++;;
++;; ....................
++;;
++;; MIN/MAX
++;;
++;; ....................
++
++(define_insn "smin<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmin.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "smax<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmax.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++
++;;
++;; ....................
++;;
++;; NEGATION and ONE'S COMPLEMENT '
++;;
++;; ....................
++
++(define_insn "neg<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fneg.<fmt>\t%0,%1"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "one_cmpl<mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (not:GPR (match_operand:GPR 1 "register_operand" "r")))]
++ ""
++ "not\t%0,%1"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; LOGICAL
++;;
++;; ....................
++;;
++
++(define_insn "and<mode>3"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (and:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++ (match_operand:GPR 2 "arith_operand" "r,Q")))]
++ ""
++ "and\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "ior<mode>3"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (ior:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++ (match_operand:GPR 2 "arith_operand" "r,Q")))]
++ ""
++ "or\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "xor<mode>3"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (xor:GPR (match_operand:GPR 1 "register_operand" "%r,r")
++ (match_operand:GPR 2 "arith_operand" "r,Q")))]
++ ""
++ "xor\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; TRUNCATION
++;;
++;; ....................
++
++(define_insn "truncdfsf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.s.d\t%0,%1"
++ [(set_attr "type" "fcvt")
++ (set_attr "cnv_mode" "D2S")
++ (set_attr "mode" "SF")])
++
++;; Integer truncation patterns. Truncating to HImode/QImode is a no-op.
++;; Truncating from DImode to SImode is not, because we always keep SImode
++;; values sign-extended in a register so we can safely use DImode branches
++;; and comparisons on SImode values.
++
++(define_insn "truncdisi2"
++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
++ (truncate:SI (match_operand:DI 1 "register_operand" "r,r")))]
++ "TARGET_64BIT"
++ "@
++ sext.w\t%0,%1
++ sw\t%1,%0"
++ [(set_attr "move_type" "arith,store")
++ (set_attr "mode" "SI")])
++
++;; Combiner patterns to optimize shift/truncate combinations.
++
++(define_insn "*ashr_trunc<mode>"
++ [(set (match_operand:SUBDI 0 "register_operand" "=r")
++ (truncate:SUBDI
++ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "const_arith_operand" ""))))]
++ "TARGET_64BIT && IN_RANGE (INTVAL (operands[2]), 32, 63)"
++ "sra\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "*lshr32_trunc<mode>"
++ [(set (match_operand:SUBDI 0 "register_operand" "=r")
++ (truncate:SUBDI
++ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
++ (const_int 32))))]
++ "TARGET_64BIT"
++ "sra\t%0,%1,32"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; ZERO EXTENSION
++;;
++;; ....................
++
++;; Extension insns.
++
++(define_insn_and_split "zero_extendsidi2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,W")))]
++ "TARGET_64BIT"
++ "@
++ #
++ lwu\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (const_int 32)))
++ (set (match_dup 0)
++ (lshiftrt:DI (match_dup 0) (const_int 32)))]
++ { operands[1] = gen_lowpart (DImode, operands[1]); }
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "DI")])
++
++;; Combine is not allowed to convert this insn into a zero_extendsidi2
++;; because of TRULY_NOOP_TRUNCATION.
++
++(define_insn_and_split "*clear_upper32"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (and:DI (match_operand:DI 1 "nonimmediate_operand" "r,W")
++ (const_int 4294967295)))]
++ "TARGET_64BIT"
++{
++ if (which_alternative == 0)
++ return "#";
++
++ operands[1] = gen_lowpart (SImode, operands[1]);
++ return "lwu\t%0,%1";
++}
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (const_int 32)))
++ (set (match_dup 0)
++ (lshiftrt:DI (match_dup 0) (const_int 32)))]
++ ""
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "DI")])
++
++(define_insn_and_split "zero_extendhi<GPR:mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ #
++ lhu\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:GPR (match_dup 1) (match_dup 2)))
++ (set (match_dup 0)
++ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
++ {
++ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
++ operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
++ }
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "zero_extendqi<SUPERQI:mode>2"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++ (zero_extend:SUPERQI
++ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ and\t%0,%1,0xff
++ lbu\t%0,%1"
++ [(set_attr "move_type" "andi,load")
++ (set_attr "mode" "<SUPERQI:MODE>")])
++
++;;
++;; ....................
++;;
++;; SIGN EXTENSION
++;;
++;; ....................
++
++;; Extension insns.
++;; Those for integer source operand are ordered widest source type first.
++
++;; When TARGET_64BIT, all SImode integer registers should already be in
++;; sign-extended form (see TRULY_NOOP_TRUNCATION and truncdisi2). We can
++;; therefore get rid of register->register instructions if we constrain
++;; the source to be in the same register as the destination.
++;;
++;; The register alternative has type "arith" so that the pre-reload
++;; scheduler will treat it as a move. This reflects what happens if
++;; the register alternative needs a reload.
++(define_insn_and_split "extendsidi2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
++ "TARGET_64BIT"
++ "@
++ #
++ lw\t%0,%1"
++ "&& reload_completed && register_operand (operands[1], VOIDmode)"
++ [(set (match_dup 0) (match_dup 1))]
++{
++ if (REGNO (operands[0]) == REGNO (operands[1]))
++ {
++ emit_note (NOTE_INSN_DELETED);
++ DONE;
++ }
++ operands[1] = gen_rtx_REG (DImode, REGNO (operands[1]));
++}
++ [(set_attr "move_type" "move,load")
++ (set_attr "mode" "DI")])
++
++(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++ (sign_extend:SUPERQI
++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ #
++ l<SHORT:size>\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
++ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
++{
++ operands[0] = gen_lowpart (SImode, operands[0]);
++ operands[1] = gen_lowpart (SImode, operands[1]);
++ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
++ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
++}
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "SI")])
++
++(define_insn "extendsfdf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.d.s\t%0,%1"
++ [(set_attr "type" "fcvt")
++ (set_attr "cnv_mode" "S2D")
++ (set_attr "mode" "DF")])
++
++;;
++;; ....................
++;;
++;; CONVERSIONS
++;;
++;; ....................
++
++(define_insn "fix_truncdfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.w.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fix_truncsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (fix:SI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.w.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++
++(define_insn "fix_truncdfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (fix:DI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.l.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fix_truncsfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (fix:DI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.l.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++
++(define_insn "floatsidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.d.w\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatdidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.d.l\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatsisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.s.w\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "floatdisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.s.l\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "floatunssidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (unsigned_float:DF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.d.wu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatunsdidf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (unsigned_float:DF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.d.lu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "I2D")])
++
++
++(define_insn "floatunssisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (unsigned_float:SF (match_operand:SI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.s.wu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "floatunsdisf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (unsigned_float:SF (match_operand:DI 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.s.lu\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "I2S")])
++
++
++(define_insn "fixuns_truncdfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.wu.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fixuns_truncsfsi2"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unsigned_fix:SI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.wu.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++
++(define_insn "fixuns_truncdfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unsigned_fix:DI (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.lu.d %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")
++ (set_attr "cnv_mode" "D2I")])
++
++
++(define_insn "fixuns_truncsfdi2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (unsigned_fix:DI (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT"
++ "fcvt.lu.s %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")
++ (set_attr "cnv_mode" "S2I")])
++
++;;
++;; ....................
++;;
++;; DATA MOVEMENT
++;;
++;; ....................
++
++;; Lower-level instructions for loading an address from the GOT.
++;; We could use MEMs, but an unspec gives more optimization
++;; opportunities.
++
++(define_insn "got_load<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_LOAD_GOT))]
++ "flag_pic"
++ "la\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "tls_add_tp_le<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "register_operand" "r")
++ (match_operand:P 2 "register_operand" "r")
++ (match_operand:P 3 "symbolic_operand" "")]
++ UNSPEC_TLS_LE))]
++ "!flag_pic || flag_pie"
++ "add\t%0,%1,%2,%%tprel_add(%3)"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_gd<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_TLS_GD))]
++ "flag_pic"
++ "la.tls.gd\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_ie<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_TLS_IE))]
++ "flag_pic"
++ "la.tls.ie\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++;; Instructions for adding the low 16 bits of an address to a register.
++;; Operand 2 is the address: riscv_print_operand works out which relocation
++;; should be applied.
++
++(define_insn "*low<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (lo_sum:P (match_operand:P 1 "register_operand" "r")
++ (match_operand:P 2 "immediate_operand" "")))]
++ ""
++ "add\t%0,%1,%R2"
++ [(set_attr "alu_type" "add")
++ (set_attr "mode" "<MODE>")])
++
++;; Allow combine to split complex const_int load sequences, using operand 2
++;; to store the intermediate results. See move_operand for details.
++(define_split
++ [(set (match_operand:GPR 0 "register_operand")
++ (match_operand:GPR 1 "splittable_const_int_operand"))
++ (clobber (match_operand:GPR 2 "register_operand"))]
++ ""
++ [(const_int 0)]
++{
++ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
++ DONE;
++})
++
++;; Likewise, for symbolic operands.
++(define_split
++ [(set (match_operand:P 0 "register_operand")
++ (match_operand:P 1))
++ (clobber (match_operand:P 2 "register_operand"))]
++ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
++ [(set (match_dup 0) (match_dup 3))]
++{
++ riscv_split_symbol (operands[2], operands[1],
++ MAX_MACHINE_MODE, &operands[3]);
++})
++
++;; 64-bit integer moves
++
++;; Unlike most other insns, the move insns can't be split with '
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++
++(define_expand "movdi"
++ [(set (match_operand:DI 0 "")
++ (match_operand:DI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movdi_32bit"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
++ (match_operand:DI 1 "move_operand" "r,i,m,r,*J*r,*m,*f,*f"))]
++ "!TARGET_64BIT
++ && (register_operand (operands[0], DImode)
++ || reg_or_0_operand (operands[1], DImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
++ (set_attr "mode" "DI")])
++
++(define_insn "*movdi_64bit"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
++ (match_operand:DI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
++ "TARGET_64BIT
++ && (register_operand (operands[0], DImode)
++ || reg_or_0_operand (operands[1], DImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
++ (set_attr "mode" "DI")])
++
++;; 32-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++
++(define_expand "mov<mode>"
++ [(set (match_operand:IMOVE32 0 "")
++ (match_operand:IMOVE32 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
++ DONE;
++})
++
++;; The difference between these two is whether or not ints are allowed
++;; in FP registers (off by default, use -mdebugh to enable).
++
++(define_insn "*mov<mode>_internal"
++ [(set (match_operand:IMOVE32 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
++ (match_operand:IMOVE32 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
++ "(register_operand (operands[0], <MODE>mode)
++ || reg_or_0_operand (operands[1], <MODE>mode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
++ (set_attr "mode" "SI")])
++
++;; 16-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
++
++(define_expand "movhi"
++ [(set (match_operand:HI 0 "")
++ (match_operand:HI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movhi_internal"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
++ "(register_operand (operands[0], HImode)
++ || reg_or_0_operand (operands[1], HImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++ (set_attr "mode" "HI")])
++
++;; HImode constant generation; see riscv_move_integer for details.
++;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
++
++(define_insn "add<mode>hi3"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
++ (match_operand:HISI 2 "arith_operand" "r,Q")))]
++ ""
++ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "HI")])
++
++(define_insn "xor<mode>hi3"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
++ (match_operand:HISI 2 "arith_operand" "r,Q")))]
++ ""
++ "xor\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "HI")])
++
++;; 8-bit Integer moves
++
++(define_expand "movqi"
++ [(set (match_operand:QI 0 "")
++ (match_operand:QI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movqi_internal"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
++ "(register_operand (operands[0], QImode)
++ || reg_or_0_operand (operands[1], QImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++ (set_attr "mode" "QI")])
++
++;; 32-bit floating point moves
++
++(define_expand "movsf"
++ [(set (match_operand:SF 0 "")
++ (match_operand:SF 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movsf_hardfloat"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
++ "TARGET_HARD_FLOAT
++ && (register_operand (operands[0], SFmode)
++ || reg_or_0_operand (operands[1], SFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++ (set_attr "mode" "SF")])
++
++(define_insn "*movsf_softfloat"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
++ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
++ "TARGET_SOFT_FLOAT
++ && (register_operand (operands[0], SFmode)
++ || reg_or_0_operand (operands[1], SFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,load,store")
++ (set_attr "mode" "SF")])
++
++;; 64-bit floating point moves
++
++(define_expand "movdf"
++ [(set (match_operand:DF 0 "")
++ (match_operand:DF 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
++ DONE;
++})
++
++;; In RV32, we lack mtf.d/mff.d. Go through memory instead.
++;; (except for moving a constant 0 to an FPR. for that we use fcvt.d.w.)
++(define_insn "*movdf_hardfloat_rv32"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
++ "!TARGET_64BIT && TARGET_HARD_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_insn "*movdf_hardfloat_rv64"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
++ "TARGET_64BIT && TARGET_HARD_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_insn "*movdf_softfloat"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
++ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
++ "TARGET_SOFT_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,load,store")
++ (set_attr "mode" "DF")])
++
++;; 128-bit integer moves
++
++(define_expand "movti"
++ [(set (match_operand:TI 0)
++ (match_operand:TI 1))]
++ "TARGET_64BIT"
++{
++ if (riscv_legitimize_move (TImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movti"
++ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
++ (match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
++ "TARGET_64BIT
++ && (register_operand (operands[0], TImode)
++ || reg_or_0_operand (operands[1], TImode))"
++ "#"
++ [(set_attr "move_type" "move,const,load,store")
++ (set_attr "mode" "TI")])
++
++(define_split
++ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
++ (match_operand:MOVE64 1 "move_operand"))]
++ "reload_completed && !TARGET_64BIT
++ && riscv_split_64bit_move_p (operands[0], operands[1])"
++ [(const_int 0)]
++{
++ riscv_split_doubleword_move (operands[0], operands[1]);
++ DONE;
++})
++
++(define_split
++ [(set (match_operand:MOVE128 0 "nonimmediate_operand")
++ (match_operand:MOVE128 1 "move_operand"))]
++ "TARGET_64BIT && reload_completed"
++ [(const_int 0)]
++{
++ riscv_split_doubleword_move (operands[0], operands[1]);
++ DONE;
++})
++
++;; 64-bit paired-single floating point moves
++
++;; Load the low word of operand 0 with operand 1.
++(define_insn "load_low<mode>"
++ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
++ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")]
++ UNSPEC_LOAD_LOW))]
++ "TARGET_HARD_FLOAT"
++{
++ operands[0] = riscv_subword (operands[0], 0);
++ return riscv_output_move (operands[0], operands[1]);
++}
++ [(set_attr "move_type" "mtc,fpload")
++ (set_attr "mode" "<HALFMODE>")])
++
++;; Load the high word of operand 0 from operand 1, preserving the value
++;; in the low word.
++(define_insn "load_high<mode>"
++ [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
++ (unspec:SPLITF [(match_operand:<HALFMODE> 1 "general_operand" "rJ,m")
++ (match_operand:SPLITF 2 "register_operand" "0,0")]
++ UNSPEC_LOAD_HIGH))]
++ "TARGET_HARD_FLOAT"
++{
++ operands[0] = riscv_subword (operands[0], 1);
++ return riscv_output_move (operands[0], operands[1]);
++}
++ [(set_attr "move_type" "mtc,fpload")
++ (set_attr "mode" "<HALFMODE>")])
++
++;; Store one word of operand 1 in operand 0. Operand 2 is 1 to store the
++;; high word and 0 to store the low word.
++(define_insn "store_word<mode>"
++ [(set (match_operand:<HALFMODE> 0 "nonimmediate_operand" "=r,m")
++ (unspec:<HALFMODE> [(match_operand:SPLITF 1 "register_operand" "f,f")
++ (match_operand 2 "const_int_operand")]
++ UNSPEC_STORE_WORD))]
++ "TARGET_HARD_FLOAT"
++{
++ operands[1] = riscv_subword (operands[1], INTVAL (operands[2]));
++ return riscv_output_move (operands[0], operands[1]);
++}
++ [(set_attr "move_type" "mfc,fpstore")
++ (set_attr "mode" "<HALFMODE>")])
++
++;; Expand in-line code to clear the instruction cache between operand[0] and
++;; operand[1].
++(define_expand "clear_cache"
++ [(match_operand 0 "pmode_register_operand")
++ (match_operand 1 "pmode_register_operand")]
++ ""
++ "
++{
++ emit_insn(gen_fence_i());
++ DONE;
++}")
++
++(define_insn "fence"
++ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE)]
++ ""
++ "%|fence%-")
++
++(define_insn "fence_i"
++ [(unspec_volatile [(const_int 0)] UNSPEC_FENCE_I)]
++ ""
++ "fence.i")
++
++;; Block moves, see riscv.c for more details.
++;; Argument 0 is the destination
++;; Argument 1 is the source
++;; Argument 2 is the length
++;; Argument 3 is the alignment
++
++(define_expand "movmemsi"
++ [(parallel [(set (match_operand:BLK 0 "general_operand")
++ (match_operand:BLK 1 "general_operand"))
++ (use (match_operand:SI 2 ""))
++ (use (match_operand:SI 3 "const_int_operand"))])]
++ "!TARGET_MEMCPY"
++{
++ if (riscv_expand_block_move (operands[0], operands[1], operands[2]))
++ DONE;
++ else
++ FAIL;
++})
++
++;;
++;; ....................
++;;
++;; SHIFTS
++;;
++;; ....................
++
++(define_insn "<optab>si3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2])
++ & (GET_MODE_BITSIZE (SImode) - 1));
++
++ return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "*<optab>disi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_shift:SI (truncate:SI (match_operand:DI 1 "register_operand" "r"))
++ (truncate:SI (match_operand:DI 2 "arith_operand" "rI"))))]
++ "TARGET_64BIT"
++ "<insn>w\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "*ashldi3_truncsi"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (ashift:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "const_arith_operand" "I"))))]
++ "TARGET_64BIT && INTVAL (operands[2]) < 32"
++ "sllw\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "*ashldisi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (ashift:SI (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR2 2 "arith_operand" "rI")))]
++ "TARGET_64BIT && (GET_CODE (operands[2]) == CONST_INT ? INTVAL (operands[2]) < 32 : 1)"
++ "sllw\t%0,%1,%2"
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "<optab>di3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "arith_operand" "rI")))]
++ "TARGET_64BIT"
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2])
++ & (GET_MODE_BITSIZE (DImode) - 1));
++
++ return "<insn>\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "DI")])
++
++(define_insn "<optab>si3_extend"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "arith_operand" "rI"))))]
++ "TARGET_64BIT"
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
++
++ return "<insn>w\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; CONDITIONAL BRANCHES
++;;
++;; ....................
++
++;; Conditional branches
++
++(define_insn "*branch_order<mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 1 "order_operator"
++ [(match_operand:GPR 2 "register_operand" "r")
++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")])
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "b%C1\t%2,%z3,%0"
++ [(set_attr "type" "branch")
++ (set_attr "mode" "none")])
++
++;; Used to implement built-in functions.
++(define_expand "condjump"
++ [(set (pc)
++ (if_then_else (match_operand 0)
++ (label_ref (match_operand 1))
++ (pc)))])
++
++(define_expand "cbranch<mode>4"
++ [(set (pc)
++ (if_then_else (match_operator 0 "comparison_operator"
++ [(match_operand:GPR 1 "register_operand")
++ (match_operand:GPR 2 "nonmemory_operand")])
++ (label_ref (match_operand 3 ""))
++ (pc)))]
++ ""
++{
++ riscv_expand_conditional_branch (operands);
++ DONE;
++})
++
++(define_expand "cbranch<mode>4"
++ [(set (pc)
++ (if_then_else (match_operator 0 "comparison_operator"
++ [(match_operand:SCALARF 1 "register_operand")
++ (match_operand:SCALARF 2 "register_operand")])
++ (label_ref (match_operand 3 ""))
++ (pc)))]
++ ""
++{
++ riscv_expand_conditional_branch (operands);
++ DONE;
++})
++
++(define_insn_and_split "*branch_on_bit<GPR:mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 0 "equality_operator"
++ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
++ (const_int 1)
++ (match_operand 3 "const_int_operand"))
++ (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))
++ (clobber (match_scratch:GPR 4 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(set (match_dup 4)
++ (ashift:GPR (match_dup 2) (match_dup 3)))
++ (set (pc)
++ (if_then_else
++ (match_op_dup 0 [(match_dup 4) (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))]
++{
++ int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
++ operands[3] = GEN_INT (shift);
++
++ if (GET_CODE (operands[0]) == EQ)
++ operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
++ else
++ operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
++})
++
++(define_insn_and_split "*branch_on_bit_range<GPR:mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 0 "equality_operator"
++ [(zero_extract:GPR (match_operand:GPR 2 "register_operand" "r")
++ (match_operand 3 "const_int_operand")
++ (const_int 0))
++ (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))
++ (clobber (match_scratch:GPR 4 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(set (match_dup 4)
++ (ashift:GPR (match_dup 2) (match_dup 3)))
++ (set (pc)
++ (if_then_else
++ (match_op_dup 0 [(match_dup 4) (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))]
++{
++ operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
++})
++
++;;
++;; ....................
++;;
++;; SETTING A REGISTER FROM A COMPARISON
++;;
++;; ....................
++
++;; Destination is always set in SI mode.
++
++(define_expand "cstore<mode>4"
++ [(set (match_operand:SI 0 "register_operand")
++ (match_operator:SI 1 "order_operator"
++ [(match_operand:GPR 2 "register_operand")
++ (match_operand:GPR 3 "nonmemory_operand")]))]
++ ""
++{
++ riscv_expand_scc (operands);
++ DONE;
++})
++
++(define_insn "cstore<mode>4"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (match_operator:SI 1 "fp_order_operator"
++ [(match_operand:SCALARF 2 "register_operand" "f")
++ (match_operand:SCALARF 3 "register_operand" "f")]))]
++ "TARGET_HARD_FLOAT"
++ "f%C1.<fmt>\t%0,%2,%3"
++ [(set_attr "type" "fcmp")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "*seq_zero_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (eq:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (const_int 0)))]
++ ""
++ "seqz\t%0,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sne_zero_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (ne:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (const_int 0)))]
++ ""
++ "snez\t%0,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sgt<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_gt:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR 2 "reg_or_0_operand" "rJ")))]
++ ""
++ "slt<u>\t%0,%z2,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sge<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_ge:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (const_int 1)))]
++ ""
++ "slt<u>\t%0,zero,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*slt<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_lt:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR 2 "arith_operand" "rI")))]
++ ""
++ "slt<u>\t%0,%1,%2"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "*sle<u>_<GPR:mode><GPR2:mode>"
++ [(set (match_operand:GPR2 0 "register_operand" "=r")
++ (any_le:GPR2 (match_operand:GPR 1 "register_operand" "r")
++ (match_operand:GPR 2 "sle_operand" "")))]
++ ""
++{
++ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
++ return "slt<u>\t%0,%1,%2";
++}
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<GPR:MODE>")])
++
++;;
++;; ....................
++;;
++;; UNCONDITIONAL BRANCHES
++;;
++;; ....................
++
++;; Unconditional branches.
++
++(define_insn "jump"
++ [(set (pc)
++ (label_ref (match_operand 0 "" "")))]
++ ""
++ "j\t%l0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++(define_expand "indirect_jump"
++ [(set (pc) (match_operand 0 "register_operand"))]
++ ""
++{
++ operands[0] = force_reg (Pmode, operands[0]);
++ if (Pmode == SImode)
++ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
++ else
++ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
++ DONE;
++})
++
++(define_insn "indirect_jump<mode>"
++ [(set (pc) (match_operand:P 0 "register_operand" "r"))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++(define_expand "tablejump"
++ [(set (pc) (match_operand 0 "register_operand" ""))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++{
++ if (flag_pic)
++ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
++ gen_rtx_LABEL_REF (Pmode, operands[1]),
++ NULL_RTX, 0, OPTAB_DIRECT);
++
++ if (flag_pic && Pmode == DImode)
++ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
++ else
++ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
++ DONE;
++})
++
++(define_insn "tablejump<mode>"
++ [(set (pc) (match_operand:GPR 0 "register_operand" "r"))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;;
++;; ....................
++;;
++;; Function prologue/epilogue
++;;
++;; ....................
++;;
++
++(define_expand "prologue"
++ [(const_int 1)]
++ ""
++{
++ riscv_expand_prologue ();
++ DONE;
++})
++
++;; Block any insns from being moved before this point, since the
++;; profiling call to mcount can use various registers that aren't
++;; saved or used to pass arguments.
++
++(define_insn "blockage"
++ [(unspec_volatile [(const_int 0)] UNSPEC_BLOCKAGE)]
++ ""
++ ""
++ [(set_attr "type" "ghost")
++ (set_attr "mode" "none")])
++
++(define_expand "epilogue"
++ [(const_int 2)]
++ ""
++{
++ riscv_expand_epilogue (false);
++ DONE;
++})
++
++(define_expand "sibcall_epilogue"
++ [(const_int 2)]
++ ""
++{
++ riscv_expand_epilogue (true);
++ DONE;
++})
++
++;; Trivial return. Make it look like a normal return insn as that
++;; allows jump optimizations to work better.
++
++(define_expand "return"
++ [(simple_return)]
++ "riscv_can_use_return_insn ()"
++ "")
++
++(define_insn "simple_return"
++ [(simple_return)]
++ ""
++ "ret"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;; Normal return.
++
++(define_insn "simple_return_internal"
++ [(simple_return)
++ (use (match_operand 0 "pmode_register_operand" ""))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;; This is used in compiling the unwind routines.
++(define_expand "eh_return"
++ [(use (match_operand 0 "general_operand"))]
++ ""
++{
++ if (GET_MODE (operands[0]) != word_mode)
++ operands[0] = convert_to_mode (word_mode, operands[0], 0);
++ if (TARGET_64BIT)
++ emit_insn (gen_eh_set_lr_di (operands[0]));
++ else
++ emit_insn (gen_eh_set_lr_si (operands[0]));
++ DONE;
++})
++
++;; Clobber the return address on the stack. We can't expand this
++;; until we know where it will be put in the stack frame.
++
++(define_insn "eh_set_lr_si"
++ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:SI 1 "=&r"))]
++ "! TARGET_64BIT"
++ "#")
++
++(define_insn "eh_set_lr_di"
++ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:DI 1 "=&r"))]
++ "TARGET_64BIT"
++ "#")
++
++(define_split
++ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch 1))]
++ "reload_completed"
++ [(const_int 0)]
++{
++ riscv_set_return_address (operands[0], operands[1]);
++ DONE;
++})
++
++;;
++;; ....................
++;;
++;; FUNCTION CALLS
++;;
++;; ....................
++
++;; Sibling calls. All these patterns use jump instructions.
++
++;; call_insn_operand will only accept constant
++;; addresses if a direct jump is acceptable. Since the 'S' constraint
++;; is defined in terms of call_insn_operand, the same is true of the
++;; constraints.
++
++;; When we use an indirect jump, we need a register that will be
++;; preserved by the epilogue (constraint j).
++
++(define_expand "sibcall"
++ [(parallel [(call (match_operand 0 "")
++ (match_operand 1 ""))
++ (use (match_operand 2 "")) ;; next_arg_reg
++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
++ ""
++{
++ riscv_expand_call (true, NULL_RTX, XEXP (operands[0], 0), operands[1]);
++ DONE;
++})
++
++(define_insn "sibcall_internal"
++ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S"))
++ (match_operand 1 "" ""))]
++ "SIBLING_CALL_P (insn)"
++ { return REG_P (operands[0]) ? "jr\t%0"
++ : absolute_symbolic_operand (operands[0], VOIDmode) ? "tail\t%0"
++ : "tail\t%0@"; }
++ [(set_attr "type" "call")])
++
++(define_expand "sibcall_value"
++ [(parallel [(set (match_operand 0 "")
++ (call (match_operand 1 "")
++ (match_operand 2 "")))
++ (use (match_operand 3 ""))])] ;; next_arg_reg
++ ""
++{
++ riscv_expand_call (true, operands[0], XEXP (operands[1], 0), operands[2]);
++ DONE;
++})
++
++(define_insn "sibcall_value_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
++ (match_operand 2 "" "")))]
++ "SIBLING_CALL_P (insn)"
++ { return REG_P (operands[1]) ? "jr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
++ : "tail\t%1@"; }
++ [(set_attr "type" "call")])
++
++(define_insn "sibcall_value_multiple_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S"))
++ (match_operand 2 "" "")))
++ (set (match_operand 3 "register_operand" "")
++ (call (mem:SI (match_dup 1))
++ (match_dup 2)))
++ (clobber (match_scratch:SI 4 "=j,j"))]
++ "SIBLING_CALL_P (insn)"
++ { return REG_P (operands[1]) ? "jr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "tail\t%1"
++ : "tail\t%1@"; }
++ [(set_attr "type" "call")])
++
++(define_expand "call"
++ [(parallel [(call (match_operand 0 "")
++ (match_operand 1 ""))
++ (use (match_operand 2 "")) ;; next_arg_reg
++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
++ ""
++{
++ riscv_expand_call (false, NULL_RTX, XEXP (operands[0], 0), operands[1]);
++ DONE;
++})
++
++(define_insn "call_internal"
++ [(call (mem:SI (match_operand 0 "call_insn_operand" "r,S"))
++ (match_operand 1 "" ""))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ { return REG_P (operands[0]) ? "jalr\t%0"
++ : absolute_symbolic_operand (operands[0], VOIDmode) ? "call\t%0"
++ : "call\t%0@"; }
++ [(set_attr "jal" "indirect,direct")])
++
++(define_expand "call_value"
++ [(parallel [(set (match_operand 0 "")
++ (call (match_operand 1 "")
++ (match_operand 2 "")))
++ (use (match_operand 3 ""))])] ;; next_arg_reg
++ ""
++{
++ riscv_expand_call (false, operands[0], XEXP (operands[1], 0), operands[2]);
++ DONE;
++})
++
++;; See comment for call_internal.
++(define_insn "call_value_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "r,S"))
++ (match_operand 2 "" "")))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ { return REG_P (operands[1]) ? "jalr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
++ : "call\t%1@"; }
++ [(set_attr "jal" "indirect,direct")])
++
++;; See comment for call_internal.
++(define_insn "call_value_multiple_internal"
++ [(set (match_operand 0 "register_operand" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "r,S"))
++ (match_operand 2 "" "")))
++ (set (match_operand 3 "register_operand" "")
++ (call (mem:SI (match_dup 1))
++ (match_dup 2)))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ { return REG_P (operands[1]) ? "jalr\t%1"
++ : absolute_symbolic_operand (operands[1], VOIDmode) ? "call\t%1"
++ : "call\t%1@"; }
++ [(set_attr "jal" "indirect,direct")])
++
++;; Call subroutine returning any type.
++
++(define_expand "untyped_call"
++ [(parallel [(call (match_operand 0 "")
++ (const_int 0))
++ (match_operand 1 "")
++ (match_operand 2 "")])]
++ ""
++{
++ int i;
++
++ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
++
++ for (i = 0; i < XVECLEN (operands[2], 0); i++)
++ {
++ rtx set = XVECEXP (operands[2], 0, i);
++ riscv_emit_move (SET_DEST (set), SET_SRC (set));
++ }
++
++ emit_insn (gen_blockage ());
++ DONE;
++})
++
++(define_insn "nop"
++ [(const_int 0)]
++ ""
++ "nop"
++ [(set_attr "type" "nop")
++ (set_attr "mode" "none")])
++
++(define_insn "trap"
++ [(trap_if (const_int 1) (const_int 0))]
++ ""
++ "sbreak")
++
++(include "sync.md")
++(include "peephole.md")
+diff -urN original-gcc/gcc/config/riscv/riscv-modes.def gcc/gcc/config/riscv/riscv-modes.def
+--- original-gcc/gcc/config/riscv/riscv-modes.def 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv-modes.def 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,26 @@
++/* Extra machine modes for RISC-V target.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++FLOAT_MODE (TF, 16, ieee_quad_format);
++
++/* Vector modes. */
++VECTOR_MODES (INT, 4); /* V8QI V4HI V2SI */
++VECTOR_MODES (FLOAT, 4); /* V4HF V2SF */
+diff -urN original-gcc/gcc/config/riscv/riscv-opc.h gcc/gcc/config/riscv/riscv-opc.h
+--- original-gcc/gcc/config/riscv/riscv-opc.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv-opc.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,1216 @@
++/* Automatically generated by parse-opcodes */
++#ifndef RISCV_ENCODING_H
++#define RISCV_ENCODING_H
++#define MATCH_CUSTOM3_RD_RS1_RS2 0x707b
++#define MASK_CUSTOM3_RD_RS1_RS2 0x707f
++#define MATCH_VLSEGSTWU 0xc00305b
++#define MASK_VLSEGSTWU 0x1e00707f
++#define MATCH_C_LW0 0x12
++#define MASK_C_LW0 0x801f
++#define MATCH_FMV_D_X 0xf2000053
++#define MASK_FMV_D_X 0xfff0707f
++#define MATCH_VLH 0x200205b
++#define MASK_VLH 0xfff0707f
++#define MATCH_C_LI 0x0
++#define MASK_C_LI 0x1f
++#define MATCH_FADD_D 0x2000053
++#define MASK_FADD_D 0xfe00007f
++#define MATCH_C_LD 0x9
++#define MASK_C_LD 0x1f
++#define MATCH_VLD 0x600205b
++#define MASK_VLD 0xfff0707f
++#define MATCH_FADD_S 0x53
++#define MASK_FADD_S 0xfe00007f
++#define MATCH_C_LW 0xa
++#define MASK_C_LW 0x1f
++#define MATCH_VLW 0x400205b
++#define MASK_VLW 0xfff0707f
++#define MATCH_VSSEGSTW 0x400307b
++#define MASK_VSSEGSTW 0x1e00707f
++#define MATCH_UTIDX 0x6077
++#define MASK_UTIDX 0xfffff07f
++#define MATCH_C_FLW 0x14
++#define MASK_C_FLW 0x1f
++#define MATCH_FSUB_D 0xa000053
++#define MASK_FSUB_D 0xfe00007f
++#define MATCH_VSSEGSTD 0x600307b
++#define MASK_VSSEGSTD 0x1e00707f
++#define MATCH_VSSEGSTB 0x307b
++#define MASK_VSSEGSTB 0x1e00707f
++#define MATCH_DIV 0x2004033
++#define MASK_DIV 0xfe00707f
++#define MATCH_FMV_H_X 0xf4000053
++#define MASK_FMV_H_X 0xfff0707f
++#define MATCH_C_FLD 0x15
++#define MASK_C_FLD 0x1f
++#define MATCH_FRRM 0x202073
++#define MASK_FRRM 0xfffff07f
++#define MATCH_VFMSV_S 0x1000202b
++#define MASK_VFMSV_S 0xfff0707f
++#define MATCH_C_LWSP 0x5
++#define MASK_C_LWSP 0x1f
++#define MATCH_FENCE 0xf
++#define MASK_FENCE 0x707f
++#define MATCH_FNMSUB_S 0x4b
++#define MASK_FNMSUB_S 0x600007f
++#define MATCH_FLE_S 0xa0000053
++#define MASK_FLE_S 0xfe00707f
++#define MATCH_FNMSUB_H 0x400004b
++#define MASK_FNMSUB_H 0x600007f
++#define MATCH_FLE_H 0xbc000053
++#define MASK_FLE_H 0xfe00707f
++#define MATCH_FLW 0x2007
++#define MASK_FLW 0x707f
++#define MATCH_VSETVL 0x600b
++#define MASK_VSETVL 0xfff0707f
++#define MATCH_VFMSV_D 0x1200202b
++#define MASK_VFMSV_D 0xfff0707f
++#define MATCH_FLE_D 0xa2000053
++#define MASK_FLE_D 0xfe00707f
++#define MATCH_FENCE_I 0x100f
++#define MASK_FENCE_I 0x707f
++#define MATCH_FNMSUB_D 0x200004b
++#define MASK_FNMSUB_D 0x600007f
++#define MATCH_ADDW 0x3b
++#define MASK_ADDW 0xfe00707f
++#define MATCH_XOR 0x4033
++#define MASK_XOR 0xfe00707f
++#define MATCH_SUB 0x40000033
++#define MASK_SUB 0xfe00707f
++#define MATCH_VSSTW 0x400307b
++#define MASK_VSSTW 0xfe00707f
++#define MATCH_VSSTH 0x200307b
++#define MASK_VSSTH 0xfe00707f
++#define MATCH_SC_W 0x1800202f
++#define MASK_SC_W 0xf800707f
++#define MATCH_VSSTB 0x307b
++#define MASK_VSSTB 0xfe00707f
++#define MATCH_VSSTD 0x600307b
++#define MASK_VSSTD 0xfe00707f
++#define MATCH_ADDI 0x13
++#define MASK_ADDI 0x707f
++#define MATCH_RDTIMEH 0xc8102073
++#define MASK_RDTIMEH 0xfffff07f
++#define MATCH_MULH 0x2001033
++#define MASK_MULH 0xfe00707f
++#define MATCH_CSRRSI 0x6073
++#define MASK_CSRRSI 0x707f
++#define MATCH_FCVT_D_WU 0xd2100053
++#define MASK_FCVT_D_WU 0xfff0007f
++#define MATCH_MULW 0x200003b
++#define MASK_MULW 0xfe00707f
++#define MATCH_CUSTOM1_RD_RS1_RS2 0x702b
++#define MASK_CUSTOM1_RD_RS1_RS2 0x707f
++#define MATCH_VENQIMM1 0xc00302b
++#define MASK_VENQIMM1 0xfe007fff
++#define MATCH_VENQIMM2 0xe00302b
++#define MASK_VENQIMM2 0xfe007fff
++#define MATCH_RDINSTRET 0xc0202073
++#define MASK_RDINSTRET 0xfffff07f
++#define MATCH_C_SWSP 0x8
++#define MASK_C_SWSP 0x1f
++#define MATCH_VLSTW 0x400305b
++#define MASK_VLSTW 0xfe00707f
++#define MATCH_VLSTH 0x200305b
++#define MASK_VLSTH 0xfe00707f
++#define MATCH_VLSTB 0x305b
++#define MASK_VLSTB 0xfe00707f
++#define MATCH_VLSTD 0x600305b
++#define MASK_VLSTD 0xfe00707f
++#define MATCH_ANDI 0x7013
++#define MASK_ANDI 0x707f
++#define MATCH_FMV_X_S 0xe0000053
++#define MASK_FMV_X_S 0xfff0707f
++#define MATCH_CUSTOM0_RD_RS1_RS2 0x700b
++#define MASK_CUSTOM0_RD_RS1_RS2 0x707f
++#define MATCH_FNMADD_S 0x4f
++#define MASK_FNMADD_S 0x600007f
++#define MATCH_LWU 0x6003
++#define MASK_LWU 0x707f
++#define MATCH_CUSTOM0_RS1 0x200b
++#define MASK_CUSTOM0_RS1 0x707f
++#define MATCH_VLSEGSTBU 0x800305b
++#define MASK_VLSEGSTBU 0x1e00707f
++#define MATCH_FNMADD_D 0x200004f
++#define MASK_FNMADD_D 0x600007f
++#define MATCH_FCVT_W_S 0xc0000053
++#define MASK_FCVT_W_S 0xfff0007f
++#define MATCH_C_SRAI 0x1019
++#define MASK_C_SRAI 0x1c1f
++#define MATCH_MULHSU 0x2002033
++#define MASK_MULHSU 0xfe00707f
++#define MATCH_FCVT_D_LU 0xd2300053
++#define MASK_FCVT_D_LU 0xfff0007f
++#define MATCH_FCVT_W_D 0xc2000053
++#define MASK_FCVT_W_D 0xfff0007f
++#define MATCH_FSUB_H 0xc000053
++#define MASK_FSUB_H 0xfe00007f
++#define MATCH_DIVUW 0x200503b
++#define MASK_DIVUW 0xfe00707f
++#define MATCH_SLTI 0x2013
++#define MASK_SLTI 0x707f
++#define MATCH_VLSTBU 0x800305b
++#define MASK_VLSTBU 0xfe00707f
++#define MATCH_SLTU 0x3033
++#define MASK_SLTU 0xfe00707f
++#define MATCH_FLH 0x1007
++#define MASK_FLH 0x707f
++#define MATCH_CUSTOM2_RD_RS1_RS2 0x705b
++#define MASK_CUSTOM2_RD_RS1_RS2 0x707f
++#define MATCH_FLD 0x3007
++#define MASK_FLD 0x707f
++#define MATCH_FSUB_S 0x8000053
++#define MASK_FSUB_S 0xfe00007f
++#define MATCH_FCVT_H_LU 0x6c000053
++#define MASK_FCVT_H_LU 0xfff0007f
++#define MATCH_CUSTOM0 0xb
++#define MASK_CUSTOM0 0x707f
++#define MATCH_CUSTOM1 0x2b
++#define MASK_CUSTOM1 0x707f
++#define MATCH_CUSTOM2 0x5b
++#define MASK_CUSTOM2 0x707f
++#define MATCH_CUSTOM3 0x7b
++#define MASK_CUSTOM3 0x707f
++#define MATCH_VXCPTSAVE 0x302b
++#define MASK_VXCPTSAVE 0xfff07fff
++#define MATCH_VMSV 0x200202b
++#define MASK_VMSV 0xfff0707f
++#define MATCH_FCVT_LU_S 0xc0300053
++#define MASK_FCVT_LU_S 0xfff0007f
++#define MATCH_AUIPC 0x17
++#define MASK_AUIPC 0x7f
++#define MATCH_FRFLAGS 0x102073
++#define MASK_FRFLAGS 0xfffff07f
++#define MATCH_FCVT_LU_D 0xc2300053
++#define MASK_FCVT_LU_D 0xfff0007f
++#define MATCH_CSRRWI 0x5073
++#define MASK_CSRRWI 0x707f
++#define MATCH_FADD_H 0x4000053
++#define MASK_FADD_H 0xfe00007f
++#define MATCH_FSQRT_S 0x58000053
++#define MASK_FSQRT_S 0xfff0007f
++#define MATCH_VXCPTKILL 0x400302b
++#define MASK_VXCPTKILL 0xffffffff
++#define MATCH_STOP 0x5077
++#define MASK_STOP 0xffffffff
++#define MATCH_FSGNJN_S 0x20001053
++#define MASK_FSGNJN_S 0xfe00707f
++#define MATCH_FSGNJN_H 0x34000053
++#define MASK_FSGNJN_H 0xfe00707f
++#define MATCH_FSQRT_D 0x5a000053
++#define MASK_FSQRT_D 0xfff0007f
++#define MATCH_XORI 0x4013
++#define MASK_XORI 0x707f
++#define MATCH_DIVU 0x2005033
++#define MASK_DIVU 0xfe00707f
++#define MATCH_FSGNJN_D 0x22001053
++#define MASK_FSGNJN_D 0xfe00707f
++#define MATCH_FSQRT_H 0x24000053
++#define MASK_FSQRT_H 0xfff0007f
++#define MATCH_VSSEGSTH 0x200307b
++#define MASK_VSSEGSTH 0x1e00707f
++#define MATCH_SW 0x2023
++#define MASK_SW 0x707f
++#define MATCH_VLSTWU 0xc00305b
++#define MASK_VLSTWU 0xfe00707f
++#define MATCH_VFSSEGW 0x1400207b
++#define MASK_VFSSEGW 0x1ff0707f
++#define MATCH_LHU 0x5003
++#define MASK_LHU 0x707f
++#define MATCH_SH 0x1023
++#define MASK_SH 0x707f
++#define MATCH_FMSUB_H 0x4000047
++#define MASK_FMSUB_H 0x600007f
++#define MATCH_VXCPTAUX 0x200402b
++#define MASK_VXCPTAUX 0xfffff07f
++#define MATCH_FMSUB_D 0x2000047
++#define MASK_FMSUB_D 0x600007f
++#define MATCH_VFSSEGD 0x1600207b
++#define MASK_VFSSEGD 0x1ff0707f
++#define MATCH_VLSEGHU 0xa00205b
++#define MASK_VLSEGHU 0x1ff0707f
++#define MATCH_MOVN 0x2007077
++#define MASK_MOVN 0xfe00707f
++#define MATCH_CUSTOM1_RS1 0x202b
++#define MASK_CUSTOM1_RS1 0x707f
++#define MATCH_VLSTHU 0xa00305b
++#define MASK_VLSTHU 0xfe00707f
++#define MATCH_MOVZ 0x7077
++#define MASK_MOVZ 0xfe00707f
++#define MATCH_CSRRW 0x1073
++#define MASK_CSRRW 0x707f
++#define MATCH_LD 0x3003
++#define MASK_LD 0x707f
++#define MATCH_LB 0x3
++#define MASK_LB 0x707f
++#define MATCH_VLWU 0xc00205b
++#define MASK_VLWU 0xfff0707f
++#define MATCH_LH 0x1003
++#define MASK_LH 0x707f
++#define MATCH_LW 0x2003
++#define MASK_LW 0x707f
++#define MATCH_CSRRC 0x3073
++#define MASK_CSRRC 0x707f
++#define MATCH_FCVT_LU_H 0x4c000053
++#define MASK_FCVT_LU_H 0xfff0007f
++#define MATCH_FCVT_S_D 0x40100053
++#define MASK_FCVT_S_D 0xfff0007f
++#define MATCH_BGEU 0x7063
++#define MASK_BGEU 0x707f
++#define MATCH_VFLSTD 0x1600305b
++#define MASK_VFLSTD 0xfe00707f
++#define MATCH_FCVT_S_L 0xd0200053
++#define MASK_FCVT_S_L 0xfff0007f
++#define MATCH_FCVT_S_H 0x84000053
++#define MASK_FCVT_S_H 0xfff0007f
++#define MATCH_FSCSR 0x301073
++#define MASK_FSCSR 0xfff0707f
++#define MATCH_FCVT_S_W 0xd0000053
++#define MASK_FCVT_S_W 0xfff0007f
++#define MATCH_VFLSTW 0x1400305b
++#define MASK_VFLSTW 0xfe00707f
++#define MATCH_VXCPTEVAC 0x600302b
++#define MASK_VXCPTEVAC 0xfff07fff
++#define MATCH_AMOMINU_D 0xc000302f
++#define MASK_AMOMINU_D 0xf800707f
++#define MATCH_FSFLAGS 0x101073
++#define MASK_FSFLAGS 0xfff0707f
++#define MATCH_SRLI 0x5013
++#define MASK_SRLI 0xfc00707f
++#define MATCH_C_SRLI 0x819
++#define MASK_C_SRLI 0x1c1f
++#define MATCH_AMOMINU_W 0xc000202f
++#define MASK_AMOMINU_W 0xf800707f
++#define MATCH_SRLW 0x503b
++#define MASK_SRLW 0xfe00707f
++#define MATCH_VFLSEGW 0x1400205b
++#define MASK_VFLSEGW 0x1ff0707f
++#define MATCH_C_LD0 0x8012
++#define MASK_C_LD0 0x801f
++#define MATCH_VLSEGBU 0x800205b
++#define MASK_VLSEGBU 0x1ff0707f
++#define MATCH_JALR 0x67
++#define MASK_JALR 0x707f
++#define MATCH_BLT 0x4063
++#define MASK_BLT 0x707f
++#define MATCH_CUSTOM2_RD_RS1 0x605b
++#define MASK_CUSTOM2_RD_RS1 0x707f
++#define MATCH_FCLASS_S 0xe0001053
++#define MASK_FCLASS_S 0xfff0707f
++#define MATCH_REM 0x2006033
++#define MASK_REM 0xfe00707f
++#define MATCH_FCLASS_D 0xe2001053
++#define MASK_FCLASS_D 0xfff0707f
++#define MATCH_FMUL_S 0x10000053
++#define MASK_FMUL_S 0xfe00007f
++#define MATCH_RDCYCLEH 0xc8002073
++#define MASK_RDCYCLEH 0xfffff07f
++#define MATCH_VLSEGSTHU 0xa00305b
++#define MASK_VLSEGSTHU 0x1e00707f
++#define MATCH_FMUL_D 0x12000053
++#define MASK_FMUL_D 0xfe00007f
++#define MATCH_ORI 0x6013
++#define MASK_ORI 0x707f
++#define MATCH_FMUL_H 0x14000053
++#define MASK_FMUL_H 0xfe00007f
++#define MATCH_VFLSEGD 0x1600205b
++#define MASK_VFLSEGD 0x1ff0707f
++#define MATCH_FEQ_S 0xa0002053
++#define MASK_FEQ_S 0xfe00707f
++#define MATCH_FSGNJX_D 0x22002053
++#define MASK_FSGNJX_D 0xfe00707f
++#define MATCH_SRAIW 0x4000501b
++#define MASK_SRAIW 0xfe00707f
++#define MATCH_FSGNJX_H 0x3c000053
++#define MASK_FSGNJX_H 0xfe00707f
++#define MATCH_FSGNJX_S 0x20002053
++#define MASK_FSGNJX_S 0xfe00707f
++#define MATCH_FEQ_D 0xa2002053
++#define MASK_FEQ_D 0xfe00707f
++#define MATCH_CUSTOM1_RD_RS1 0x602b
++#define MASK_CUSTOM1_RD_RS1 0x707f
++#define MATCH_FEQ_H 0xac000053
++#define MASK_FEQ_H 0xfe00707f
++#define MATCH_AMOMAXU_D 0xe000302f
++#define MASK_AMOMAXU_D 0xf800707f
++#define MATCH_DIVW 0x200403b
++#define MASK_DIVW 0xfe00707f
++#define MATCH_AMOMAXU_W 0xe000202f
++#define MASK_AMOMAXU_W 0xf800707f
++#define MATCH_SRAI_RV32 0x40005013
++#define MASK_SRAI_RV32 0xfe00707f
++#define MATCH_C_SRLI32 0xc19
++#define MASK_C_SRLI32 0x1c1f
++#define MATCH_VFSSTW 0x1400307b
++#define MASK_VFSSTW 0xfe00707f
++#define MATCH_CUSTOM0_RD 0x400b
++#define MASK_CUSTOM0_RD 0x707f
++#define MATCH_C_BEQ 0x10
++#define MASK_C_BEQ 0x1f
++#define MATCH_VFSSTD 0x1600307b
++#define MASK_VFSSTD 0xfe00707f
++#define MATCH_CUSTOM3_RD_RS1 0x607b
++#define MASK_CUSTOM3_RD_RS1 0x707f
++#define MATCH_LR_D 0x1000302f
++#define MASK_LR_D 0xf9f0707f
++#define MATCH_LR_W 0x1000202f
++#define MASK_LR_W 0xf9f0707f
++#define MATCH_FCVT_H_WU 0x7c000053
++#define MASK_FCVT_H_WU 0xfff0007f
++#define MATCH_VMVV 0x200002b
++#define MASK_VMVV 0xfff0707f
++#define MATCH_SLLW 0x103b
++#define MASK_SLLW 0xfe00707f
++#define MATCH_SLLI 0x1013
++#define MASK_SLLI 0xfc00707f
++#define MATCH_BEQ 0x63
++#define MASK_BEQ 0x707f
++#define MATCH_AND 0x7033
++#define MASK_AND 0xfe00707f
++#define MATCH_LBU 0x4003
++#define MASK_LBU 0x707f
++#define MATCH_FSGNJ_S 0x20000053
++#define MASK_FSGNJ_S 0xfe00707f
++#define MATCH_FMSUB_S 0x47
++#define MASK_FMSUB_S 0x600007f
++#define MATCH_C_SUB3 0x11c
++#define MASK_C_SUB3 0x31f
++#define MATCH_FSGNJ_H 0x2c000053
++#define MASK_FSGNJ_H 0xfe00707f
++#define MATCH_VLB 0x205b
++#define MASK_VLB 0xfff0707f
++#define MATCH_C_ADDIW 0x1d
++#define MASK_C_ADDIW 0x1f
++#define MATCH_CUSTOM3_RS1_RS2 0x307b
++#define MASK_CUSTOM3_RS1_RS2 0x707f
++#define MATCH_FSGNJ_D 0x22000053
++#define MASK_FSGNJ_D 0xfe00707f
++#define MATCH_VLSEGWU 0xc00205b
++#define MASK_VLSEGWU 0x1ff0707f
++#define MATCH_FCVT_S_WU 0xd0100053
++#define MASK_FCVT_S_WU 0xfff0007f
++#define MATCH_CUSTOM3_RS1 0x207b
++#define MASK_CUSTOM3_RS1 0x707f
++#define MATCH_SC_D 0x1800302f
++#define MASK_SC_D 0xf800707f
++#define MATCH_VFSW 0x1400207b
++#define MASK_VFSW 0xfff0707f
++#define MATCH_AMOSWAP_D 0x800302f
++#define MASK_AMOSWAP_D 0xf800707f
++#define MATCH_SB 0x23
++#define MASK_SB 0x707f
++#define MATCH_AMOSWAP_W 0x800202f
++#define MASK_AMOSWAP_W 0xf800707f
++#define MATCH_VFSD 0x1600207b
++#define MASK_VFSD 0xfff0707f
++#define MATCH_CUSTOM2_RS1 0x205b
++#define MASK_CUSTOM2_RS1 0x707f
++#define MATCH_SD 0x3023
++#define MASK_SD 0x707f
++#define MATCH_FMV_S_X 0xf0000053
++#define MASK_FMV_S_X 0xfff0707f
++#define MATCH_REMUW 0x200703b
++#define MASK_REMUW 0xfe00707f
++#define MATCH_JAL 0x6f
++#define MASK_JAL 0x7f
++#define MATCH_C_FSD 0x18
++#define MASK_C_FSD 0x1f
++#define MATCH_RDCYCLE 0xc0002073
++#define MASK_RDCYCLE 0xfffff07f
++#define MATCH_C_BNE 0x11
++#define MASK_C_BNE 0x1f
++#define MATCH_C_ADD 0x1a
++#define MASK_C_ADD 0x801f
++#define MATCH_VXCPTCAUSE 0x402b
++#define MASK_VXCPTCAUSE 0xfffff07f
++#define MATCH_VGETCFG 0x400b
++#define MASK_VGETCFG 0xfffff07f
++#define MATCH_LUI 0x37
++#define MASK_LUI 0x7f
++#define MATCH_VSETCFG 0x200b
++#define MASK_VSETCFG 0x7fff
++#define MATCH_C_SDSP 0x6
++#define MASK_C_SDSP 0x1f
++#define MATCH_C_LDSP 0x4
++#define MASK_C_LDSP 0x1f
++#define MATCH_FNMADD_H 0x400004f
++#define MASK_FNMADD_H 0x600007f
++#define MATCH_CUSTOM0_RS1_RS2 0x300b
++#define MASK_CUSTOM0_RS1_RS2 0x707f
++#define MATCH_SLLI_RV32 0x1013
++#define MASK_SLLI_RV32 0xfe00707f
++#define MATCH_MUL 0x2000033
++#define MASK_MUL 0xfe00707f
++#define MATCH_CSRRCI 0x7073
++#define MASK_CSRRCI 0x707f
++#define MATCH_C_SRAI32 0x1419
++#define MASK_C_SRAI32 0x1c1f
++#define MATCH_FLT_H 0xb4000053
++#define MASK_FLT_H 0xfe00707f
++#define MATCH_SRAI 0x40005013
++#define MASK_SRAI 0xfc00707f
++#define MATCH_AMOAND_D 0x6000302f
++#define MASK_AMOAND_D 0xf800707f
++#define MATCH_FLT_D 0xa2001053
++#define MASK_FLT_D 0xfe00707f
++#define MATCH_SRAW 0x4000503b
++#define MASK_SRAW 0xfe00707f
++#define MATCH_CSRRS 0x2073
++#define MASK_CSRRS 0x707f
++#define MATCH_FLT_S 0xa0001053
++#define MASK_FLT_S 0xfe00707f
++#define MATCH_ADDIW 0x1b
++#define MASK_ADDIW 0x707f
++#define MATCH_AMOAND_W 0x6000202f
++#define MASK_AMOAND_W 0xf800707f
++#define MATCH_CUSTOM2_RD 0x405b
++#define MASK_CUSTOM2_RD 0x707f
++#define MATCH_FCVT_WU_D 0xc2100053
++#define MASK_FCVT_WU_D 0xfff0007f
++#define MATCH_AMOXOR_W 0x2000202f
++#define MASK_AMOXOR_W 0xf800707f
++#define MATCH_FCVT_D_L 0xd2200053
++#define MASK_FCVT_D_L 0xfff0007f
++#define MATCH_FCVT_WU_H 0x5c000053
++#define MASK_FCVT_WU_H 0xfff0007f
++#define MATCH_C_SLLI 0x19
++#define MASK_C_SLLI 0x1c1f
++#define MATCH_AMOXOR_D 0x2000302f
++#define MASK_AMOXOR_D 0xf800707f
++#define MATCH_FCVT_WU_S 0xc0100053
++#define MASK_FCVT_WU_S 0xfff0007f
++#define MATCH_CUSTOM3_RD 0x407b
++#define MASK_CUSTOM3_RD 0x707f
++#define MATCH_FMAX_H 0xcc000053
++#define MASK_FMAX_H 0xfe00707f
++#define MATCH_VENQCNT 0x1000302b
++#define MASK_VENQCNT 0xfe007fff
++#define MATCH_VLBU 0x800205b
++#define MASK_VLBU 0xfff0707f
++#define MATCH_VLHU 0xa00205b
++#define MASK_VLHU 0xfff0707f
++#define MATCH_C_SW 0xd
++#define MASK_C_SW 0x1f
++#define MATCH_C_SD 0xc
++#define MASK_C_SD 0x1f
++#define MATCH_C_OR3 0x21c
++#define MASK_C_OR3 0x31f
++#define MATCH_C_AND3 0x31c
++#define MASK_C_AND3 0x31f
++#define MATCH_VFSSEGSTW 0x1400307b
++#define MASK_VFSSEGSTW 0x1e00707f
++#define MATCH_SLT 0x2033
++#define MASK_SLT 0xfe00707f
++#define MATCH_AMOOR_D 0x4000302f
++#define MASK_AMOOR_D 0xf800707f
++#define MATCH_REMU 0x2007033
++#define MASK_REMU 0xfe00707f
++#define MATCH_REMW 0x200603b
++#define MASK_REMW 0xfe00707f
++#define MATCH_SLL 0x1033
++#define MASK_SLL 0xfe00707f
++#define MATCH_VFSSEGSTD 0x1600307b
++#define MASK_VFSSEGSTD 0x1e00707f
++#define MATCH_AMOOR_W 0x4000202f
++#define MASK_AMOOR_W 0xf800707f
++#define MATCH_CUSTOM2_RS1_RS2 0x305b
++#define MASK_CUSTOM2_RS1_RS2 0x707f
++#define MATCH_VF 0x10202b
++#define MASK_VF 0x1f0707f
++#define MATCH_VFMVV 0x1000002b
++#define MASK_VFMVV 0xfff0707f
++#define MATCH_VFLSEGSTW 0x1400305b
++#define MASK_VFLSEGSTW 0x1e00707f
++#define MATCH_VXCPTRESTORE 0x200302b
++#define MASK_VXCPTRESTORE 0xfff07fff
++#define MATCH_VXCPTHOLD 0x800302b
++#define MASK_VXCPTHOLD 0xffffffff
++#define MATCH_SLTIU 0x3013
++#define MASK_SLTIU 0x707f
++#define MATCH_VFLSEGSTD 0x1600305b
++#define MASK_VFLSEGSTD 0x1e00707f
++#define MATCH_VFLD 0x1600205b
++#define MASK_VFLD 0xfff0707f
++#define MATCH_FMADD_S 0x43
++#define MASK_FMADD_S 0x600007f
++#define MATCH_VFLW 0x1400205b
++#define MASK_VFLW 0xfff0707f
++#define MATCH_FMADD_D 0x2000043
++#define MASK_FMADD_D 0x600007f
++#define MATCH_FMADD_H 0x4000043
++#define MASK_FMADD_H 0x600007f
++#define MATCH_SRET 0x80000073
++#define MASK_SRET 0xffffffff
++#define MATCH_VSSEGW 0x400207b
++#define MASK_VSSEGW 0x1ff0707f
++#define MATCH_CUSTOM0_RD_RS1 0x600b
++#define MASK_CUSTOM0_RD_RS1 0x707f
++#define MATCH_VSSEGH 0x200207b
++#define MASK_VSSEGH 0x1ff0707f
++#define MATCH_FRCSR 0x302073
++#define MASK_FRCSR 0xfffff07f
++#define MATCH_VSSEGD 0x600207b
++#define MASK_VSSEGD 0x1ff0707f
++#define MATCH_VSSEGB 0x207b
++#define MASK_VSSEGB 0x1ff0707f
++#define MATCH_FMIN_H 0xc4000053
++#define MASK_FMIN_H 0xfe00707f
++#define MATCH_FMIN_D 0x2a000053
++#define MASK_FMIN_D 0xfe00707f
++#define MATCH_BLTU 0x6063
++#define MASK_BLTU 0x707f
++#define MATCH_FMIN_S 0x28000053
++#define MASK_FMIN_S 0xfe00707f
++#define MATCH_SRLI_RV32 0x5013
++#define MASK_SRLI_RV32 0xfe00707f
++#define MATCH_SLLIW 0x101b
++#define MASK_SLLIW 0xfe00707f
++#define MATCH_FMAX_S 0x28001053
++#define MASK_FMAX_S 0xfe00707f
++#define MATCH_FCVT_D_H 0x8c000053
++#define MASK_FCVT_D_H 0xfff0007f
++#define MATCH_FCVT_D_W 0xd2000053
++#define MASK_FCVT_D_W 0xfff0007f
++#define MATCH_ADD 0x33
++#define MASK_ADD 0xfe00707f
++#define MATCH_FCVT_D_S 0x42000053
++#define MASK_FCVT_D_S 0xfff0007f
++#define MATCH_FMAX_D 0x2a001053
++#define MASK_FMAX_D 0xfe00707f
++#define MATCH_BNE 0x1063
++#define MASK_BNE 0x707f
++#define MATCH_CUSTOM1_RD 0x402b
++#define MASK_CUSTOM1_RD 0x707f
++#define MATCH_FSRM 0x201073
++#define MASK_FSRM 0xfff0707f
++#define MATCH_FDIV_D 0x1a000053
++#define MASK_FDIV_D 0xfe00007f
++#define MATCH_VSW 0x400207b
++#define MASK_VSW 0xfff0707f
++#define MATCH_FCVT_L_S 0xc0200053
++#define MASK_FCVT_L_S 0xfff0007f
++#define MATCH_FDIV_H 0x1c000053
++#define MASK_FDIV_H 0xfe00007f
++#define MATCH_VSB 0x207b
++#define MASK_VSB 0xfff0707f
++#define MATCH_FDIV_S 0x18000053
++#define MASK_FDIV_S 0xfe00007f
++#define MATCH_FSRMI 0x205073
++#define MASK_FSRMI 0xfff0707f
++#define MATCH_FCVT_L_H 0x44000053
++#define MASK_FCVT_L_H 0xfff0007f
++#define MATCH_VSH 0x200207b
++#define MASK_VSH 0xfff0707f
++#define MATCH_FCVT_L_D 0xc2200053
++#define MASK_FCVT_L_D 0xfff0007f
++#define MATCH_FCVT_H_S 0x90000053
++#define MASK_FCVT_H_S 0xfff0007f
++#define MATCH_SCALL 0x73
++#define MASK_SCALL 0xffffffff
++#define MATCH_FSFLAGSI 0x105073
++#define MASK_FSFLAGSI 0xfff0707f
++#define MATCH_FCVT_H_W 0x74000053
++#define MASK_FCVT_H_W 0xfff0007f
++#define MATCH_FCVT_H_L 0x64000053
++#define MASK_FCVT_H_L 0xfff0007f
++#define MATCH_SRLIW 0x501b
++#define MASK_SRLIW 0xfe00707f
++#define MATCH_FCVT_S_LU 0xd0300053
++#define MASK_FCVT_S_LU 0xfff0007f
++#define MATCH_FCVT_H_D 0x92000053
++#define MASK_FCVT_H_D 0xfff0007f
++#define MATCH_SBREAK 0x100073
++#define MASK_SBREAK 0xffffffff
++#define MATCH_RDINSTRETH 0xc8202073
++#define MASK_RDINSTRETH 0xfffff07f
++#define MATCH_SRA 0x40005033
++#define MASK_SRA 0xfe00707f
++#define MATCH_BGE 0x5063
++#define MASK_BGE 0x707f
++#define MATCH_SRL 0x5033
++#define MASK_SRL 0xfe00707f
++#define MATCH_VENQCMD 0xa00302b
++#define MASK_VENQCMD 0xfe007fff
++#define MATCH_OR 0x6033
++#define MASK_OR 0xfe00707f
++#define MATCH_SUBW 0x4000003b
++#define MASK_SUBW 0xfe00707f
++#define MATCH_FMV_X_D 0xe2000053
++#define MASK_FMV_X_D 0xfff0707f
++#define MATCH_RDTIME 0xc0102073
++#define MASK_RDTIME 0xfffff07f
++#define MATCH_AMOADD_D 0x302f
++#define MASK_AMOADD_D 0xf800707f
++#define MATCH_AMOMAX_W 0xa000202f
++#define MASK_AMOMAX_W 0xf800707f
++#define MATCH_C_MOVE 0x2
++#define MASK_C_MOVE 0x801f
++#define MATCH_FMOVN 0x6007077
++#define MASK_FMOVN 0xfe00707f
++#define MATCH_C_FSW 0x16
++#define MASK_C_FSW 0x1f
++#define MATCH_AMOADD_W 0x202f
++#define MASK_AMOADD_W 0xf800707f
++#define MATCH_AMOMAX_D 0xa000302f
++#define MASK_AMOMAX_D 0xf800707f
++#define MATCH_FMOVZ 0x4007077
++#define MASK_FMOVZ 0xfe00707f
++#define MATCH_CUSTOM1_RS1_RS2 0x302b
++#define MASK_CUSTOM1_RS1_RS2 0x707f
++#define MATCH_FMV_X_H 0xe4000053
++#define MASK_FMV_X_H 0xfff0707f
++#define MATCH_VSD 0x600207b
++#define MASK_VSD 0xfff0707f
++#define MATCH_VLSEGSTW 0x400305b
++#define MASK_VLSEGSTW 0x1e00707f
++#define MATCH_C_ADDI 0x1
++#define MASK_C_ADDI 0x1f
++#define MATCH_C_SLLIW 0x1819
++#define MASK_C_SLLIW 0x1c1f
++#define MATCH_VLSEGSTB 0x305b
++#define MASK_VLSEGSTB 0x1e00707f
++#define MATCH_VLSEGSTD 0x600305b
++#define MASK_VLSEGSTD 0x1e00707f
++#define MATCH_VLSEGSTH 0x200305b
++#define MASK_VLSEGSTH 0x1e00707f
++#define MATCH_MULHU 0x2003033
++#define MASK_MULHU 0xfe00707f
++#define MATCH_AMOMIN_W 0x8000202f
++#define MASK_AMOMIN_W 0xf800707f
++#define MATCH_C_SLLI32 0x419
++#define MASK_C_SLLI32 0x1c1f
++#define MATCH_C_ADD3 0x1c
++#define MASK_C_ADD3 0x31f
++#define MATCH_VGETVL 0x200400b
++#define MASK_VGETVL 0xfffff07f
++#define MATCH_AMOMIN_D 0x8000302f
++#define MASK_AMOMIN_D 0xf800707f
++#define MATCH_FCVT_W_H 0x54000053
++#define MASK_FCVT_W_H 0xfff0007f
++#define MATCH_VLSEGB 0x205b
++#define MASK_VLSEGB 0x1ff0707f
++#define MATCH_FSD 0x3027
++#define MASK_FSD 0x707f
++#define MATCH_VLSEGD 0x600205b
++#define MASK_VLSEGD 0x1ff0707f
++#define MATCH_FSH 0x1027
++#define MASK_FSH 0x707f
++#define MATCH_VLSEGH 0x200205b
++#define MASK_VLSEGH 0x1ff0707f
++#define MATCH_C_SUB 0x801a
++#define MASK_C_SUB 0x801f
++#define MATCH_VLSEGW 0x400205b
++#define MASK_VLSEGW 0x1ff0707f
++#define MATCH_FSW 0x2027
++#define MASK_FSW 0x707f
++#define MATCH_C_J 0x8002
++#define MASK_C_J 0x801f
++#define CSR_FFLAGS 0x1
++#define CSR_FRM 0x2
++#define CSR_FCSR 0x3
++#define CSR_STATS 0xc0
++#define CSR_SUP0 0x500
++#define CSR_SUP1 0x501
++#define CSR_EPC 0x502
++#define CSR_BADVADDR 0x503
++#define CSR_PTBR 0x504
++#define CSR_ASID 0x505
++#define CSR_COUNT 0x506
++#define CSR_COMPARE 0x507
++#define CSR_EVEC 0x508
++#define CSR_CAUSE 0x509
++#define CSR_STATUS 0x50a
++#define CSR_HARTID 0x50b
++#define CSR_IMPL 0x50c
++#define CSR_FATC 0x50d
++#define CSR_SEND_IPI 0x50e
++#define CSR_CLEAR_IPI 0x50f
++#define CSR_RESET 0x51d
++#define CSR_TOHOST 0x51e
++#define CSR_FROMHOST 0x51f
++#define CSR_CYCLE 0xc00
++#define CSR_TIME 0xc01
++#define CSR_INSTRET 0xc02
++#define CSR_UARCH0 0xcc0
++#define CSR_UARCH1 0xcc1
++#define CSR_UARCH2 0xcc2
++#define CSR_UARCH3 0xcc3
++#define CSR_UARCH4 0xcc4
++#define CSR_UARCH5 0xcc5
++#define CSR_UARCH6 0xcc6
++#define CSR_UARCH7 0xcc7
++#define CSR_UARCH8 0xcc8
++#define CSR_UARCH9 0xcc9
++#define CSR_UARCH10 0xcca
++#define CSR_UARCH11 0xccb
++#define CSR_UARCH12 0xccc
++#define CSR_UARCH13 0xccd
++#define CSR_UARCH14 0xcce
++#define CSR_UARCH15 0xccf
++#define CSR_COUNTH 0x586
++#define CSR_CYCLEH 0xc80
++#define CSR_TIMEH 0xc81
++#define CSR_INSTRETH 0xc82
++#define CAUSE_MISALIGNED_FETCH 0x0
++#define CAUSE_FAULT_FETCH 0x1
++#define CAUSE_ILLEGAL_INSTRUCTION 0x2
++#define CAUSE_PRIVILEGED_INSTRUCTION 0x3
++#define CAUSE_FP_DISABLED 0x4
++#define CAUSE_SYSCALL 0x6
++#define CAUSE_BREAKPOINT 0x7
++#define CAUSE_MISALIGNED_LOAD 0x8
++#define CAUSE_MISALIGNED_STORE 0x9
++#define CAUSE_FAULT_LOAD 0xa
++#define CAUSE_FAULT_STORE 0xb
++#define CAUSE_ACCELERATOR_DISABLED 0xc
++#endif
++#ifdef DECLARE_INSN
++DECLARE_INSN(custom3_rd_rs1_rs2, MATCH_CUSTOM3_RD_RS1_RS2, MASK_CUSTOM3_RD_RS1_RS2)
++DECLARE_INSN(vlsegstwu, MATCH_VLSEGSTWU, MASK_VLSEGSTWU)
++DECLARE_INSN(c_lw0, MATCH_C_LW0, MASK_C_LW0)
++DECLARE_INSN(fmv_d_x, MATCH_FMV_D_X, MASK_FMV_D_X)
++DECLARE_INSN(vlh, MATCH_VLH, MASK_VLH)
++DECLARE_INSN(c_li, MATCH_C_LI, MASK_C_LI)
++DECLARE_INSN(fadd_d, MATCH_FADD_D, MASK_FADD_D)
++DECLARE_INSN(c_ld, MATCH_C_LD, MASK_C_LD)
++DECLARE_INSN(vld, MATCH_VLD, MASK_VLD)
++DECLARE_INSN(fadd_s, MATCH_FADD_S, MASK_FADD_S)
++DECLARE_INSN(c_lw, MATCH_C_LW, MASK_C_LW)
++DECLARE_INSN(vlw, MATCH_VLW, MASK_VLW)
++DECLARE_INSN(vssegstw, MATCH_VSSEGSTW, MASK_VSSEGSTW)
++DECLARE_INSN(utidx, MATCH_UTIDX, MASK_UTIDX)
++DECLARE_INSN(c_flw, MATCH_C_FLW, MASK_C_FLW)
++DECLARE_INSN(fsub_d, MATCH_FSUB_D, MASK_FSUB_D)
++DECLARE_INSN(vssegstd, MATCH_VSSEGSTD, MASK_VSSEGSTD)
++DECLARE_INSN(vssegstb, MATCH_VSSEGSTB, MASK_VSSEGSTB)
++DECLARE_INSN(div, MATCH_DIV, MASK_DIV)
++DECLARE_INSN(fmv_h_x, MATCH_FMV_H_X, MASK_FMV_H_X)
++DECLARE_INSN(c_fld, MATCH_C_FLD, MASK_C_FLD)
++DECLARE_INSN(frrm, MATCH_FRRM, MASK_FRRM)
++DECLARE_INSN(vfmsv_s, MATCH_VFMSV_S, MASK_VFMSV_S)
++DECLARE_INSN(c_lwsp, MATCH_C_LWSP, MASK_C_LWSP)
++DECLARE_INSN(fence, MATCH_FENCE, MASK_FENCE)
++DECLARE_INSN(fnmsub_s, MATCH_FNMSUB_S, MASK_FNMSUB_S)
++DECLARE_INSN(fle_s, MATCH_FLE_S, MASK_FLE_S)
++DECLARE_INSN(fnmsub_h, MATCH_FNMSUB_H, MASK_FNMSUB_H)
++DECLARE_INSN(fle_h, MATCH_FLE_H, MASK_FLE_H)
++DECLARE_INSN(flw, MATCH_FLW, MASK_FLW)
++DECLARE_INSN(vsetvl, MATCH_VSETVL, MASK_VSETVL)
++DECLARE_INSN(vfmsv_d, MATCH_VFMSV_D, MASK_VFMSV_D)
++DECLARE_INSN(fle_d, MATCH_FLE_D, MASK_FLE_D)
++DECLARE_INSN(fence_i, MATCH_FENCE_I, MASK_FENCE_I)
++DECLARE_INSN(fnmsub_d, MATCH_FNMSUB_D, MASK_FNMSUB_D)
++DECLARE_INSN(addw, MATCH_ADDW, MASK_ADDW)
++DECLARE_INSN(xor, MATCH_XOR, MASK_XOR)
++DECLARE_INSN(sub, MATCH_SUB, MASK_SUB)
++DECLARE_INSN(vsstw, MATCH_VSSTW, MASK_VSSTW)
++DECLARE_INSN(vssth, MATCH_VSSTH, MASK_VSSTH)
++DECLARE_INSN(sc_w, MATCH_SC_W, MASK_SC_W)
++DECLARE_INSN(vsstb, MATCH_VSSTB, MASK_VSSTB)
++DECLARE_INSN(vsstd, MATCH_VSSTD, MASK_VSSTD)
++DECLARE_INSN(addi, MATCH_ADDI, MASK_ADDI)
++DECLARE_INSN(rdtimeh, MATCH_RDTIMEH, MASK_RDTIMEH)
++DECLARE_INSN(mulh, MATCH_MULH, MASK_MULH)
++DECLARE_INSN(csrrsi, MATCH_CSRRSI, MASK_CSRRSI)
++DECLARE_INSN(fcvt_d_wu, MATCH_FCVT_D_WU, MASK_FCVT_D_WU)
++DECLARE_INSN(mulw, MATCH_MULW, MASK_MULW)
++DECLARE_INSN(custom1_rd_rs1_rs2, MATCH_CUSTOM1_RD_RS1_RS2, MASK_CUSTOM1_RD_RS1_RS2)
++DECLARE_INSN(venqimm1, MATCH_VENQIMM1, MASK_VENQIMM1)
++DECLARE_INSN(venqimm2, MATCH_VENQIMM2, MASK_VENQIMM2)
++DECLARE_INSN(rdinstret, MATCH_RDINSTRET, MASK_RDINSTRET)
++DECLARE_INSN(c_swsp, MATCH_C_SWSP, MASK_C_SWSP)
++DECLARE_INSN(vlstw, MATCH_VLSTW, MASK_VLSTW)
++DECLARE_INSN(vlsth, MATCH_VLSTH, MASK_VLSTH)
++DECLARE_INSN(vlstb, MATCH_VLSTB, MASK_VLSTB)
++DECLARE_INSN(vlstd, MATCH_VLSTD, MASK_VLSTD)
++DECLARE_INSN(andi, MATCH_ANDI, MASK_ANDI)
++DECLARE_INSN(fmv_x_s, MATCH_FMV_X_S, MASK_FMV_X_S)
++DECLARE_INSN(custom0_rd_rs1_rs2, MATCH_CUSTOM0_RD_RS1_RS2, MASK_CUSTOM0_RD_RS1_RS2)
++DECLARE_INSN(fnmadd_s, MATCH_FNMADD_S, MASK_FNMADD_S)
++DECLARE_INSN(lwu, MATCH_LWU, MASK_LWU)
++DECLARE_INSN(custom0_rs1, MATCH_CUSTOM0_RS1, MASK_CUSTOM0_RS1)
++DECLARE_INSN(vlsegstbu, MATCH_VLSEGSTBU, MASK_VLSEGSTBU)
++DECLARE_INSN(fnmadd_d, MATCH_FNMADD_D, MASK_FNMADD_D)
++DECLARE_INSN(fcvt_w_s, MATCH_FCVT_W_S, MASK_FCVT_W_S)
++DECLARE_INSN(c_srai, MATCH_C_SRAI, MASK_C_SRAI)
++DECLARE_INSN(mulhsu, MATCH_MULHSU, MASK_MULHSU)
++DECLARE_INSN(fcvt_d_lu, MATCH_FCVT_D_LU, MASK_FCVT_D_LU)
++DECLARE_INSN(fcvt_w_d, MATCH_FCVT_W_D, MASK_FCVT_W_D)
++DECLARE_INSN(fsub_h, MATCH_FSUB_H, MASK_FSUB_H)
++DECLARE_INSN(divuw, MATCH_DIVUW, MASK_DIVUW)
++DECLARE_INSN(slti, MATCH_SLTI, MASK_SLTI)
++DECLARE_INSN(vlstbu, MATCH_VLSTBU, MASK_VLSTBU)
++DECLARE_INSN(sltu, MATCH_SLTU, MASK_SLTU)
++DECLARE_INSN(flh, MATCH_FLH, MASK_FLH)
++DECLARE_INSN(custom2_rd_rs1_rs2, MATCH_CUSTOM2_RD_RS1_RS2, MASK_CUSTOM2_RD_RS1_RS2)
++DECLARE_INSN(fld, MATCH_FLD, MASK_FLD)
++DECLARE_INSN(fsub_s, MATCH_FSUB_S, MASK_FSUB_S)
++DECLARE_INSN(fcvt_h_lu, MATCH_FCVT_H_LU, MASK_FCVT_H_LU)
++DECLARE_INSN(custom0, MATCH_CUSTOM0, MASK_CUSTOM0)
++DECLARE_INSN(custom1, MATCH_CUSTOM1, MASK_CUSTOM1)
++DECLARE_INSN(custom2, MATCH_CUSTOM2, MASK_CUSTOM2)
++DECLARE_INSN(custom3, MATCH_CUSTOM3, MASK_CUSTOM3)
++DECLARE_INSN(vxcptsave, MATCH_VXCPTSAVE, MASK_VXCPTSAVE)
++DECLARE_INSN(vmsv, MATCH_VMSV, MASK_VMSV)
++DECLARE_INSN(fcvt_lu_s, MATCH_FCVT_LU_S, MASK_FCVT_LU_S)
++DECLARE_INSN(auipc, MATCH_AUIPC, MASK_AUIPC)
++DECLARE_INSN(frflags, MATCH_FRFLAGS, MASK_FRFLAGS)
++DECLARE_INSN(fcvt_lu_d, MATCH_FCVT_LU_D, MASK_FCVT_LU_D)
++DECLARE_INSN(csrrwi, MATCH_CSRRWI, MASK_CSRRWI)
++DECLARE_INSN(fadd_h, MATCH_FADD_H, MASK_FADD_H)
++DECLARE_INSN(fsqrt_s, MATCH_FSQRT_S, MASK_FSQRT_S)
++DECLARE_INSN(vxcptkill, MATCH_VXCPTKILL, MASK_VXCPTKILL)
++DECLARE_INSN(stop, MATCH_STOP, MASK_STOP)
++DECLARE_INSN(fsgnjn_s, MATCH_FSGNJN_S, MASK_FSGNJN_S)
++DECLARE_INSN(fsgnjn_h, MATCH_FSGNJN_H, MASK_FSGNJN_H)
++DECLARE_INSN(fsqrt_d, MATCH_FSQRT_D, MASK_FSQRT_D)
++DECLARE_INSN(xori, MATCH_XORI, MASK_XORI)
++DECLARE_INSN(divu, MATCH_DIVU, MASK_DIVU)
++DECLARE_INSN(fsgnjn_d, MATCH_FSGNJN_D, MASK_FSGNJN_D)
++DECLARE_INSN(fsqrt_h, MATCH_FSQRT_H, MASK_FSQRT_H)
++DECLARE_INSN(vssegsth, MATCH_VSSEGSTH, MASK_VSSEGSTH)
++DECLARE_INSN(sw, MATCH_SW, MASK_SW)
++DECLARE_INSN(vlstwu, MATCH_VLSTWU, MASK_VLSTWU)
++DECLARE_INSN(vfssegw, MATCH_VFSSEGW, MASK_VFSSEGW)
++DECLARE_INSN(lhu, MATCH_LHU, MASK_LHU)
++DECLARE_INSN(sh, MATCH_SH, MASK_SH)
++DECLARE_INSN(fmsub_h, MATCH_FMSUB_H, MASK_FMSUB_H)
++DECLARE_INSN(vxcptaux, MATCH_VXCPTAUX, MASK_VXCPTAUX)
++DECLARE_INSN(fmsub_d, MATCH_FMSUB_D, MASK_FMSUB_D)
++DECLARE_INSN(vfssegd, MATCH_VFSSEGD, MASK_VFSSEGD)
++DECLARE_INSN(vlseghu, MATCH_VLSEGHU, MASK_VLSEGHU)
++DECLARE_INSN(movn, MATCH_MOVN, MASK_MOVN)
++DECLARE_INSN(custom1_rs1, MATCH_CUSTOM1_RS1, MASK_CUSTOM1_RS1)
++DECLARE_INSN(vlsthu, MATCH_VLSTHU, MASK_VLSTHU)
++DECLARE_INSN(movz, MATCH_MOVZ, MASK_MOVZ)
++DECLARE_INSN(csrrw, MATCH_CSRRW, MASK_CSRRW)
++DECLARE_INSN(ld, MATCH_LD, MASK_LD)
++DECLARE_INSN(lb, MATCH_LB, MASK_LB)
++DECLARE_INSN(vlwu, MATCH_VLWU, MASK_VLWU)
++DECLARE_INSN(lh, MATCH_LH, MASK_LH)
++DECLARE_INSN(lw, MATCH_LW, MASK_LW)
++DECLARE_INSN(csrrc, MATCH_CSRRC, MASK_CSRRC)
++DECLARE_INSN(fcvt_lu_h, MATCH_FCVT_LU_H, MASK_FCVT_LU_H)
++DECLARE_INSN(fcvt_s_d, MATCH_FCVT_S_D, MASK_FCVT_S_D)
++DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
++DECLARE_INSN(vflstd, MATCH_VFLSTD, MASK_VFLSTD)
++DECLARE_INSN(fcvt_s_l, MATCH_FCVT_S_L, MASK_FCVT_S_L)
++DECLARE_INSN(fcvt_s_h, MATCH_FCVT_S_H, MASK_FCVT_S_H)
++DECLARE_INSN(fscsr, MATCH_FSCSR, MASK_FSCSR)
++DECLARE_INSN(fcvt_s_w, MATCH_FCVT_S_W, MASK_FCVT_S_W)
++DECLARE_INSN(vflstw, MATCH_VFLSTW, MASK_VFLSTW)
++DECLARE_INSN(vxcptevac, MATCH_VXCPTEVAC, MASK_VXCPTEVAC)
++DECLARE_INSN(amominu_d, MATCH_AMOMINU_D, MASK_AMOMINU_D)
++DECLARE_INSN(fsflags, MATCH_FSFLAGS, MASK_FSFLAGS)
++DECLARE_INSN(srli, MATCH_SRLI, MASK_SRLI)
++DECLARE_INSN(c_srli, MATCH_C_SRLI, MASK_C_SRLI)
++DECLARE_INSN(amominu_w, MATCH_AMOMINU_W, MASK_AMOMINU_W)
++DECLARE_INSN(srlw, MATCH_SRLW, MASK_SRLW)
++DECLARE_INSN(vflsegw, MATCH_VFLSEGW, MASK_VFLSEGW)
++DECLARE_INSN(c_ld0, MATCH_C_LD0, MASK_C_LD0)
++DECLARE_INSN(vlsegbu, MATCH_VLSEGBU, MASK_VLSEGBU)
++DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
++DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
++DECLARE_INSN(custom2_rd_rs1, MATCH_CUSTOM2_RD_RS1, MASK_CUSTOM2_RD_RS1)
++DECLARE_INSN(fclass_s, MATCH_FCLASS_S, MASK_FCLASS_S)
++DECLARE_INSN(rem, MATCH_REM, MASK_REM)
++DECLARE_INSN(fclass_d, MATCH_FCLASS_D, MASK_FCLASS_D)
++DECLARE_INSN(fmul_s, MATCH_FMUL_S, MASK_FMUL_S)
++DECLARE_INSN(rdcycleh, MATCH_RDCYCLEH, MASK_RDCYCLEH)
++DECLARE_INSN(vlsegsthu, MATCH_VLSEGSTHU, MASK_VLSEGSTHU)
++DECLARE_INSN(fmul_d, MATCH_FMUL_D, MASK_FMUL_D)
++DECLARE_INSN(ori, MATCH_ORI, MASK_ORI)
++DECLARE_INSN(fmul_h, MATCH_FMUL_H, MASK_FMUL_H)
++DECLARE_INSN(vflsegd, MATCH_VFLSEGD, MASK_VFLSEGD)
++DECLARE_INSN(feq_s, MATCH_FEQ_S, MASK_FEQ_S)
++DECLARE_INSN(fsgnjx_d, MATCH_FSGNJX_D, MASK_FSGNJX_D)
++DECLARE_INSN(sraiw, MATCH_SRAIW, MASK_SRAIW)
++DECLARE_INSN(fsgnjx_h, MATCH_FSGNJX_H, MASK_FSGNJX_H)
++DECLARE_INSN(fsgnjx_s, MATCH_FSGNJX_S, MASK_FSGNJX_S)
++DECLARE_INSN(feq_d, MATCH_FEQ_D, MASK_FEQ_D)
++DECLARE_INSN(custom1_rd_rs1, MATCH_CUSTOM1_RD_RS1, MASK_CUSTOM1_RD_RS1)
++DECLARE_INSN(feq_h, MATCH_FEQ_H, MASK_FEQ_H)
++DECLARE_INSN(amomaxu_d, MATCH_AMOMAXU_D, MASK_AMOMAXU_D)
++DECLARE_INSN(divw, MATCH_DIVW, MASK_DIVW)
++DECLARE_INSN(amomaxu_w, MATCH_AMOMAXU_W, MASK_AMOMAXU_W)
++DECLARE_INSN(srai_rv32, MATCH_SRAI_RV32, MASK_SRAI_RV32)
++DECLARE_INSN(c_srli32, MATCH_C_SRLI32, MASK_C_SRLI32)
++DECLARE_INSN(vfsstw, MATCH_VFSSTW, MASK_VFSSTW)
++DECLARE_INSN(custom0_rd, MATCH_CUSTOM0_RD, MASK_CUSTOM0_RD)
++DECLARE_INSN(c_beq, MATCH_C_BEQ, MASK_C_BEQ)
++DECLARE_INSN(vfsstd, MATCH_VFSSTD, MASK_VFSSTD)
++DECLARE_INSN(custom3_rd_rs1, MATCH_CUSTOM3_RD_RS1, MASK_CUSTOM3_RD_RS1)
++DECLARE_INSN(lr_d, MATCH_LR_D, MASK_LR_D)
++DECLARE_INSN(lr_w, MATCH_LR_W, MASK_LR_W)
++DECLARE_INSN(fcvt_h_wu, MATCH_FCVT_H_WU, MASK_FCVT_H_WU)
++DECLARE_INSN(vmvv, MATCH_VMVV, MASK_VMVV)
++DECLARE_INSN(sllw, MATCH_SLLW, MASK_SLLW)
++DECLARE_INSN(slli, MATCH_SLLI, MASK_SLLI)
++DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
++DECLARE_INSN(and, MATCH_AND, MASK_AND)
++DECLARE_INSN(lbu, MATCH_LBU, MASK_LBU)
++DECLARE_INSN(fsgnj_s, MATCH_FSGNJ_S, MASK_FSGNJ_S)
++DECLARE_INSN(fmsub_s, MATCH_FMSUB_S, MASK_FMSUB_S)
++DECLARE_INSN(c_sub3, MATCH_C_SUB3, MASK_C_SUB3)
++DECLARE_INSN(fsgnj_h, MATCH_FSGNJ_H, MASK_FSGNJ_H)
++DECLARE_INSN(vlb, MATCH_VLB, MASK_VLB)
++DECLARE_INSN(c_addiw, MATCH_C_ADDIW, MASK_C_ADDIW)
++DECLARE_INSN(custom3_rs1_rs2, MATCH_CUSTOM3_RS1_RS2, MASK_CUSTOM3_RS1_RS2)
++DECLARE_INSN(fsgnj_d, MATCH_FSGNJ_D, MASK_FSGNJ_D)
++DECLARE_INSN(vlsegwu, MATCH_VLSEGWU, MASK_VLSEGWU)
++DECLARE_INSN(fcvt_s_wu, MATCH_FCVT_S_WU, MASK_FCVT_S_WU)
++DECLARE_INSN(custom3_rs1, MATCH_CUSTOM3_RS1, MASK_CUSTOM3_RS1)
++DECLARE_INSN(sc_d, MATCH_SC_D, MASK_SC_D)
++DECLARE_INSN(vfsw, MATCH_VFSW, MASK_VFSW)
++DECLARE_INSN(amoswap_d, MATCH_AMOSWAP_D, MASK_AMOSWAP_D)
++DECLARE_INSN(sb, MATCH_SB, MASK_SB)
++DECLARE_INSN(amoswap_w, MATCH_AMOSWAP_W, MASK_AMOSWAP_W)
++DECLARE_INSN(vfsd, MATCH_VFSD, MASK_VFSD)
++DECLARE_INSN(custom2_rs1, MATCH_CUSTOM2_RS1, MASK_CUSTOM2_RS1)
++DECLARE_INSN(sd, MATCH_SD, MASK_SD)
++DECLARE_INSN(fmv_s_x, MATCH_FMV_S_X, MASK_FMV_S_X)
++DECLARE_INSN(remuw, MATCH_REMUW, MASK_REMUW)
++DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
++DECLARE_INSN(c_fsd, MATCH_C_FSD, MASK_C_FSD)
++DECLARE_INSN(rdcycle, MATCH_RDCYCLE, MASK_RDCYCLE)
++DECLARE_INSN(c_bne, MATCH_C_BNE, MASK_C_BNE)
++DECLARE_INSN(c_add, MATCH_C_ADD, MASK_C_ADD)
++DECLARE_INSN(vxcptcause, MATCH_VXCPTCAUSE, MASK_VXCPTCAUSE)
++DECLARE_INSN(vgetcfg, MATCH_VGETCFG, MASK_VGETCFG)
++DECLARE_INSN(lui, MATCH_LUI, MASK_LUI)
++DECLARE_INSN(vsetcfg, MATCH_VSETCFG, MASK_VSETCFG)
++DECLARE_INSN(c_sdsp, MATCH_C_SDSP, MASK_C_SDSP)
++DECLARE_INSN(c_ldsp, MATCH_C_LDSP, MASK_C_LDSP)
++DECLARE_INSN(fnmadd_h, MATCH_FNMADD_H, MASK_FNMADD_H)
++DECLARE_INSN(custom0_rs1_rs2, MATCH_CUSTOM0_RS1_RS2, MASK_CUSTOM0_RS1_RS2)
++DECLARE_INSN(slli_rv32, MATCH_SLLI_RV32, MASK_SLLI_RV32)
++DECLARE_INSN(mul, MATCH_MUL, MASK_MUL)
++DECLARE_INSN(csrrci, MATCH_CSRRCI, MASK_CSRRCI)
++DECLARE_INSN(c_srai32, MATCH_C_SRAI32, MASK_C_SRAI32)
++DECLARE_INSN(flt_h, MATCH_FLT_H, MASK_FLT_H)
++DECLARE_INSN(srai, MATCH_SRAI, MASK_SRAI)
++DECLARE_INSN(amoand_d, MATCH_AMOAND_D, MASK_AMOAND_D)
++DECLARE_INSN(flt_d, MATCH_FLT_D, MASK_FLT_D)
++DECLARE_INSN(sraw, MATCH_SRAW, MASK_SRAW)
++DECLARE_INSN(csrrs, MATCH_CSRRS, MASK_CSRRS)
++DECLARE_INSN(flt_s, MATCH_FLT_S, MASK_FLT_S)
++DECLARE_INSN(addiw, MATCH_ADDIW, MASK_ADDIW)
++DECLARE_INSN(amoand_w, MATCH_AMOAND_W, MASK_AMOAND_W)
++DECLARE_INSN(custom2_rd, MATCH_CUSTOM2_RD, MASK_CUSTOM2_RD)
++DECLARE_INSN(fcvt_wu_d, MATCH_FCVT_WU_D, MASK_FCVT_WU_D)
++DECLARE_INSN(amoxor_w, MATCH_AMOXOR_W, MASK_AMOXOR_W)
++DECLARE_INSN(fcvt_d_l, MATCH_FCVT_D_L, MASK_FCVT_D_L)
++DECLARE_INSN(fcvt_wu_h, MATCH_FCVT_WU_H, MASK_FCVT_WU_H)
++DECLARE_INSN(c_slli, MATCH_C_SLLI, MASK_C_SLLI)
++DECLARE_INSN(amoxor_d, MATCH_AMOXOR_D, MASK_AMOXOR_D)
++DECLARE_INSN(fcvt_wu_s, MATCH_FCVT_WU_S, MASK_FCVT_WU_S)
++DECLARE_INSN(custom3_rd, MATCH_CUSTOM3_RD, MASK_CUSTOM3_RD)
++DECLARE_INSN(fmax_h, MATCH_FMAX_H, MASK_FMAX_H)
++DECLARE_INSN(venqcnt, MATCH_VENQCNT, MASK_VENQCNT)
++DECLARE_INSN(vlbu, MATCH_VLBU, MASK_VLBU)
++DECLARE_INSN(vlhu, MATCH_VLHU, MASK_VLHU)
++DECLARE_INSN(c_sw, MATCH_C_SW, MASK_C_SW)
++DECLARE_INSN(c_sd, MATCH_C_SD, MASK_C_SD)
++DECLARE_INSN(c_or3, MATCH_C_OR3, MASK_C_OR3)
++DECLARE_INSN(c_and3, MATCH_C_AND3, MASK_C_AND3)
++DECLARE_INSN(vfssegstw, MATCH_VFSSEGSTW, MASK_VFSSEGSTW)
++DECLARE_INSN(slt, MATCH_SLT, MASK_SLT)
++DECLARE_INSN(amoor_d, MATCH_AMOOR_D, MASK_AMOOR_D)
++DECLARE_INSN(remu, MATCH_REMU, MASK_REMU)
++DECLARE_INSN(remw, MATCH_REMW, MASK_REMW)
++DECLARE_INSN(sll, MATCH_SLL, MASK_SLL)
++DECLARE_INSN(vfssegstd, MATCH_VFSSEGSTD, MASK_VFSSEGSTD)
++DECLARE_INSN(amoor_w, MATCH_AMOOR_W, MASK_AMOOR_W)
++DECLARE_INSN(custom2_rs1_rs2, MATCH_CUSTOM2_RS1_RS2, MASK_CUSTOM2_RS1_RS2)
++DECLARE_INSN(vf, MATCH_VF, MASK_VF)
++DECLARE_INSN(vfmvv, MATCH_VFMVV, MASK_VFMVV)
++DECLARE_INSN(vflsegstw, MATCH_VFLSEGSTW, MASK_VFLSEGSTW)
++DECLARE_INSN(vxcptrestore, MATCH_VXCPTRESTORE, MASK_VXCPTRESTORE)
++DECLARE_INSN(vxcpthold, MATCH_VXCPTHOLD, MASK_VXCPTHOLD)
++DECLARE_INSN(sltiu, MATCH_SLTIU, MASK_SLTIU)
++DECLARE_INSN(vflsegstd, MATCH_VFLSEGSTD, MASK_VFLSEGSTD)
++DECLARE_INSN(vfld, MATCH_VFLD, MASK_VFLD)
++DECLARE_INSN(fmadd_s, MATCH_FMADD_S, MASK_FMADD_S)
++DECLARE_INSN(vflw, MATCH_VFLW, MASK_VFLW)
++DECLARE_INSN(fmadd_d, MATCH_FMADD_D, MASK_FMADD_D)
++DECLARE_INSN(fmadd_h, MATCH_FMADD_H, MASK_FMADD_H)
++DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
++DECLARE_INSN(vssegw, MATCH_VSSEGW, MASK_VSSEGW)
++DECLARE_INSN(custom0_rd_rs1, MATCH_CUSTOM0_RD_RS1, MASK_CUSTOM0_RD_RS1)
++DECLARE_INSN(vssegh, MATCH_VSSEGH, MASK_VSSEGH)
++DECLARE_INSN(frcsr, MATCH_FRCSR, MASK_FRCSR)
++DECLARE_INSN(vssegd, MATCH_VSSEGD, MASK_VSSEGD)
++DECLARE_INSN(vssegb, MATCH_VSSEGB, MASK_VSSEGB)
++DECLARE_INSN(fmin_h, MATCH_FMIN_H, MASK_FMIN_H)
++DECLARE_INSN(fmin_d, MATCH_FMIN_D, MASK_FMIN_D)
++DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
++DECLARE_INSN(fmin_s, MATCH_FMIN_S, MASK_FMIN_S)
++DECLARE_INSN(srli_rv32, MATCH_SRLI_RV32, MASK_SRLI_RV32)
++DECLARE_INSN(slliw, MATCH_SLLIW, MASK_SLLIW)
++DECLARE_INSN(fmax_s, MATCH_FMAX_S, MASK_FMAX_S)
++DECLARE_INSN(fcvt_d_h, MATCH_FCVT_D_H, MASK_FCVT_D_H)
++DECLARE_INSN(fcvt_d_w, MATCH_FCVT_D_W, MASK_FCVT_D_W)
++DECLARE_INSN(add, MATCH_ADD, MASK_ADD)
++DECLARE_INSN(fcvt_d_s, MATCH_FCVT_D_S, MASK_FCVT_D_S)
++DECLARE_INSN(fmax_d, MATCH_FMAX_D, MASK_FMAX_D)
++DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
++DECLARE_INSN(custom1_rd, MATCH_CUSTOM1_RD, MASK_CUSTOM1_RD)
++DECLARE_INSN(fsrm, MATCH_FSRM, MASK_FSRM)
++DECLARE_INSN(fdiv_d, MATCH_FDIV_D, MASK_FDIV_D)
++DECLARE_INSN(vsw, MATCH_VSW, MASK_VSW)
++DECLARE_INSN(fcvt_l_s, MATCH_FCVT_L_S, MASK_FCVT_L_S)
++DECLARE_INSN(fdiv_h, MATCH_FDIV_H, MASK_FDIV_H)
++DECLARE_INSN(vsb, MATCH_VSB, MASK_VSB)
++DECLARE_INSN(fdiv_s, MATCH_FDIV_S, MASK_FDIV_S)
++DECLARE_INSN(fsrmi, MATCH_FSRMI, MASK_FSRMI)
++DECLARE_INSN(fcvt_l_h, MATCH_FCVT_L_H, MASK_FCVT_L_H)
++DECLARE_INSN(vsh, MATCH_VSH, MASK_VSH)
++DECLARE_INSN(fcvt_l_d, MATCH_FCVT_L_D, MASK_FCVT_L_D)
++DECLARE_INSN(fcvt_h_s, MATCH_FCVT_H_S, MASK_FCVT_H_S)
++DECLARE_INSN(scall, MATCH_SCALL, MASK_SCALL)
++DECLARE_INSN(fsflagsi, MATCH_FSFLAGSI, MASK_FSFLAGSI)
++DECLARE_INSN(fcvt_h_w, MATCH_FCVT_H_W, MASK_FCVT_H_W)
++DECLARE_INSN(fcvt_h_l, MATCH_FCVT_H_L, MASK_FCVT_H_L)
++DECLARE_INSN(srliw, MATCH_SRLIW, MASK_SRLIW)
++DECLARE_INSN(fcvt_s_lu, MATCH_FCVT_S_LU, MASK_FCVT_S_LU)
++DECLARE_INSN(fcvt_h_d, MATCH_FCVT_H_D, MASK_FCVT_H_D)
++DECLARE_INSN(sbreak, MATCH_SBREAK, MASK_SBREAK)
++DECLARE_INSN(rdinstreth, MATCH_RDINSTRETH, MASK_RDINSTRETH)
++DECLARE_INSN(sra, MATCH_SRA, MASK_SRA)
++DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
++DECLARE_INSN(srl, MATCH_SRL, MASK_SRL)
++DECLARE_INSN(venqcmd, MATCH_VENQCMD, MASK_VENQCMD)
++DECLARE_INSN(or, MATCH_OR, MASK_OR)
++DECLARE_INSN(subw, MATCH_SUBW, MASK_SUBW)
++DECLARE_INSN(fmv_x_d, MATCH_FMV_X_D, MASK_FMV_X_D)
++DECLARE_INSN(rdtime, MATCH_RDTIME, MASK_RDTIME)
++DECLARE_INSN(amoadd_d, MATCH_AMOADD_D, MASK_AMOADD_D)
++DECLARE_INSN(amomax_w, MATCH_AMOMAX_W, MASK_AMOMAX_W)
++DECLARE_INSN(c_move, MATCH_C_MOVE, MASK_C_MOVE)
++DECLARE_INSN(fmovn, MATCH_FMOVN, MASK_FMOVN)
++DECLARE_INSN(c_fsw, MATCH_C_FSW, MASK_C_FSW)
++DECLARE_INSN(amoadd_w, MATCH_AMOADD_W, MASK_AMOADD_W)
++DECLARE_INSN(amomax_d, MATCH_AMOMAX_D, MASK_AMOMAX_D)
++DECLARE_INSN(fmovz, MATCH_FMOVZ, MASK_FMOVZ)
++DECLARE_INSN(custom1_rs1_rs2, MATCH_CUSTOM1_RS1_RS2, MASK_CUSTOM1_RS1_RS2)
++DECLARE_INSN(fmv_x_h, MATCH_FMV_X_H, MASK_FMV_X_H)
++DECLARE_INSN(vsd, MATCH_VSD, MASK_VSD)
++DECLARE_INSN(vlsegstw, MATCH_VLSEGSTW, MASK_VLSEGSTW)
++DECLARE_INSN(c_addi, MATCH_C_ADDI, MASK_C_ADDI)
++DECLARE_INSN(c_slliw, MATCH_C_SLLIW, MASK_C_SLLIW)
++DECLARE_INSN(vlsegstb, MATCH_VLSEGSTB, MASK_VLSEGSTB)
++DECLARE_INSN(vlsegstd, MATCH_VLSEGSTD, MASK_VLSEGSTD)
++DECLARE_INSN(vlsegsth, MATCH_VLSEGSTH, MASK_VLSEGSTH)
++DECLARE_INSN(mulhu, MATCH_MULHU, MASK_MULHU)
++DECLARE_INSN(amomin_w, MATCH_AMOMIN_W, MASK_AMOMIN_W)
++DECLARE_INSN(c_slli32, MATCH_C_SLLI32, MASK_C_SLLI32)
++DECLARE_INSN(c_add3, MATCH_C_ADD3, MASK_C_ADD3)
++DECLARE_INSN(vgetvl, MATCH_VGETVL, MASK_VGETVL)
++DECLARE_INSN(amomin_d, MATCH_AMOMIN_D, MASK_AMOMIN_D)
++DECLARE_INSN(fcvt_w_h, MATCH_FCVT_W_H, MASK_FCVT_W_H)
++DECLARE_INSN(vlsegb, MATCH_VLSEGB, MASK_VLSEGB)
++DECLARE_INSN(fsd, MATCH_FSD, MASK_FSD)
++DECLARE_INSN(vlsegd, MATCH_VLSEGD, MASK_VLSEGD)
++DECLARE_INSN(fsh, MATCH_FSH, MASK_FSH)
++DECLARE_INSN(vlsegh, MATCH_VLSEGH, MASK_VLSEGH)
++DECLARE_INSN(c_sub, MATCH_C_SUB, MASK_C_SUB)
++DECLARE_INSN(vlsegw, MATCH_VLSEGW, MASK_VLSEGW)
++DECLARE_INSN(fsw, MATCH_FSW, MASK_FSW)
++DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
++#endif
++#ifdef DECLARE_CSR
++DECLARE_CSR(fflags, CSR_FFLAGS)
++DECLARE_CSR(frm, CSR_FRM)
++DECLARE_CSR(fcsr, CSR_FCSR)
++DECLARE_CSR(stats, CSR_STATS)
++DECLARE_CSR(sup0, CSR_SUP0)
++DECLARE_CSR(sup1, CSR_SUP1)
++DECLARE_CSR(epc, CSR_EPC)
++DECLARE_CSR(badvaddr, CSR_BADVADDR)
++DECLARE_CSR(ptbr, CSR_PTBR)
++DECLARE_CSR(asid, CSR_ASID)
++DECLARE_CSR(count, CSR_COUNT)
++DECLARE_CSR(compare, CSR_COMPARE)
++DECLARE_CSR(evec, CSR_EVEC)
++DECLARE_CSR(cause, CSR_CAUSE)
++DECLARE_CSR(status, CSR_STATUS)
++DECLARE_CSR(hartid, CSR_HARTID)
++DECLARE_CSR(impl, CSR_IMPL)
++DECLARE_CSR(fatc, CSR_FATC)
++DECLARE_CSR(send_ipi, CSR_SEND_IPI)
++DECLARE_CSR(clear_ipi, CSR_CLEAR_IPI)
++DECLARE_CSR(reset, CSR_RESET)
++DECLARE_CSR(tohost, CSR_TOHOST)
++DECLARE_CSR(fromhost, CSR_FROMHOST)
++DECLARE_CSR(cycle, CSR_CYCLE)
++DECLARE_CSR(time, CSR_TIME)
++DECLARE_CSR(instret, CSR_INSTRET)
++DECLARE_CSR(uarch0, CSR_UARCH0)
++DECLARE_CSR(uarch1, CSR_UARCH1)
++DECLARE_CSR(uarch2, CSR_UARCH2)
++DECLARE_CSR(uarch3, CSR_UARCH3)
++DECLARE_CSR(uarch4, CSR_UARCH4)
++DECLARE_CSR(uarch5, CSR_UARCH5)
++DECLARE_CSR(uarch6, CSR_UARCH6)
++DECLARE_CSR(uarch7, CSR_UARCH7)
++DECLARE_CSR(uarch8, CSR_UARCH8)
++DECLARE_CSR(uarch9, CSR_UARCH9)
++DECLARE_CSR(uarch10, CSR_UARCH10)
++DECLARE_CSR(uarch11, CSR_UARCH11)
++DECLARE_CSR(uarch12, CSR_UARCH12)
++DECLARE_CSR(uarch13, CSR_UARCH13)
++DECLARE_CSR(uarch14, CSR_UARCH14)
++DECLARE_CSR(uarch15, CSR_UARCH15)
++DECLARE_CSR(counth, CSR_COUNTH)
++DECLARE_CSR(cycleh, CSR_CYCLEH)
++DECLARE_CSR(timeh, CSR_TIMEH)
++DECLARE_CSR(instreth, CSR_INSTRETH)
++#endif
++#ifdef DECLARE_CAUSE
++DECLARE_CAUSE("fflags", CAUSE_FFLAGS)
++DECLARE_CAUSE("frm", CAUSE_FRM)
++DECLARE_CAUSE("fcsr", CAUSE_FCSR)
++DECLARE_CAUSE("stats", CAUSE_STATS)
++DECLARE_CAUSE("sup0", CAUSE_SUP0)
++DECLARE_CAUSE("sup1", CAUSE_SUP1)
++DECLARE_CAUSE("epc", CAUSE_EPC)
++DECLARE_CAUSE("badvaddr", CAUSE_BADVADDR)
++DECLARE_CAUSE("ptbr", CAUSE_PTBR)
++DECLARE_CAUSE("asid", CAUSE_ASID)
++DECLARE_CAUSE("count", CAUSE_COUNT)
++DECLARE_CAUSE("compare", CAUSE_COMPARE)
++DECLARE_CAUSE("evec", CAUSE_EVEC)
++DECLARE_CAUSE("cause", CAUSE_CAUSE)
++DECLARE_CAUSE("status", CAUSE_STATUS)
++DECLARE_CAUSE("hartid", CAUSE_HARTID)
++DECLARE_CAUSE("impl", CAUSE_IMPL)
++DECLARE_CAUSE("fatc", CAUSE_FATC)
++DECLARE_CAUSE("send_ipi", CAUSE_SEND_IPI)
++DECLARE_CAUSE("clear_ipi", CAUSE_CLEAR_IPI)
++DECLARE_CAUSE("reset", CAUSE_RESET)
++DECLARE_CAUSE("tohost", CAUSE_TOHOST)
++DECLARE_CAUSE("fromhost", CAUSE_FROMHOST)
++DECLARE_CAUSE("cycle", CAUSE_CYCLE)
++DECLARE_CAUSE("time", CAUSE_TIME)
++DECLARE_CAUSE("instret", CAUSE_INSTRET)
++DECLARE_CAUSE("uarch0", CAUSE_UARCH0)
++DECLARE_CAUSE("uarch1", CAUSE_UARCH1)
++DECLARE_CAUSE("uarch2", CAUSE_UARCH2)
++DECLARE_CAUSE("uarch3", CAUSE_UARCH3)
++DECLARE_CAUSE("uarch4", CAUSE_UARCH4)
++DECLARE_CAUSE("uarch5", CAUSE_UARCH5)
++DECLARE_CAUSE("uarch6", CAUSE_UARCH6)
++DECLARE_CAUSE("uarch7", CAUSE_UARCH7)
++DECLARE_CAUSE("uarch8", CAUSE_UARCH8)
++DECLARE_CAUSE("uarch9", CAUSE_UARCH9)
++DECLARE_CAUSE("uarch10", CAUSE_UARCH10)
++DECLARE_CAUSE("uarch11", CAUSE_UARCH11)
++DECLARE_CAUSE("uarch12", CAUSE_UARCH12)
++DECLARE_CAUSE("uarch13", CAUSE_UARCH13)
++DECLARE_CAUSE("uarch14", CAUSE_UARCH14)
++DECLARE_CAUSE("uarch15", CAUSE_UARCH15)
++DECLARE_CAUSE("counth", CAUSE_COUNTH)
++DECLARE_CAUSE("cycleh", CAUSE_CYCLEH)
++DECLARE_CAUSE("timeh", CAUSE_TIMEH)
++DECLARE_CAUSE("instreth", CAUSE_INSTRETH)
++#endif
+diff -urN original-gcc/gcc/config/riscv/riscv.opt gcc/gcc/config/riscv/riscv.opt
+--- original-gcc/gcc/config/riscv/riscv.opt 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv.opt 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,75 @@
++; Options for the MIPS port of the compiler
++;
++; Copyright (C) 2005, 2007, 2008, 2010, 2011 Free Software Foundation, Inc.
++;
++; This file is part of GCC.
++;
++; GCC is free software; you can redistribute it and/or modify it under
++; the terms of the GNU General Public License as published by the Free
++; Software Foundation; either version 3, or (at your option) any later
++; version.
++;
++; GCC is distributed in the hope that it will be useful, but WITHOUT
++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++; License for more details.
++;
++; You should have received a copy of the GNU General Public License
++; along with GCC; see the file COPYING3. If not see
++; <http://www.gnu.org/licenses/>.
++
++m32
++Target RejectNegative Mask(32BIT)
++Generate RV32 code
++
++m64
++Target RejectNegative InverseMask(32BIT, 64BIT)
++Generate RV64 code
++
++mbranch-cost=
++Target RejectNegative Joined UInteger Var(riscv_branch_cost)
++-mbranch-cost=COST Set the cost of branches to roughly COST instructions
++
++mhard-float
++Target Report RejectNegative InverseMask(SOFT_FLOAT_ABI, HARD_FLOAT_ABI)
++Allow the use of hardware floating-point ABI and instructions
++
++mmemcpy
++Target Report Mask(MEMCPY)
++Don't optimize block moves
++
++mplt
++Target Report Var(TARGET_PLT) Init(1)
++When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
++
++msoft-float
++Target Report RejectNegative Mask(SOFT_FLOAT_ABI)
++Prevent the use of all hardware floating-point instructions
++
++mfdiv
++Target Report RejectNegative Mask(FDIV)
++Use hardware floating-point divide and square root instructions
++
++march=
++Target RejectNegative Joined Var(riscv_arch_string)
++-march= Generate code for given RISC-V ISA (e.g. RV64IM)
++
++mtune=
++Target RejectNegative Joined Var(riscv_tune_string)
++-mtune=PROCESSOR Optimize the output for PROCESSOR
++
++msmall-data-limit=
++Target Joined Separate UInteger Var(g_switch_value) Init(8)
++-msmall-data-limit=<number> Put global and static data smaller than <number> bytes into a special section (on some targets)
++
++matomic
++Target Report Mask(ATOMIC)
++Use hardware atomic memory instructions.
++
++mmuldiv
++Target Report Mask(MULDIV)
++Use hardware instructions for integer multiplication and division.
++
++mlra
++Target Report Var(riscv_lra_flag) Init(0) Save
++Use LRA instead of reload
+diff -urN original-gcc/gcc/config/riscv/riscv-protos.h gcc/gcc/config/riscv/riscv-protos.h
+--- original-gcc/gcc/config/riscv/riscv-protos.h 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/riscv-protos.h 2015-03-07 09:51:45.663139025 +0100
+@@ -0,0 +1,89 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2011-2014 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#ifndef GCC_RISCV_PROTOS_H
++#define GCC_RISCV_PROTOS_H
++
++enum riscv_symbol_type {
++ SYMBOL_ABSOLUTE,
++ SYMBOL_GOT_DISP,
++ SYMBOL_TLS,
++ SYMBOL_TLS_LE,
++ SYMBOL_TLS_IE,
++ SYMBOL_TLS_GD
++};
++#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
++
++extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
++extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
++extern int riscv_address_insns (rtx, enum machine_mode, bool);
++extern int riscv_const_insns (rtx);
++extern int riscv_split_const_insns (rtx);
++extern int riscv_load_store_insns (rtx, rtx);
++extern rtx riscv_emit_move (rtx, rtx);
++extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
++extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
++extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
++extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
++extern bool riscv_legitimize_vector_move (enum machine_mode, rtx, rtx);
++
++extern rtx riscv_subword (rtx, bool);
++extern bool riscv_split_64bit_move_p (rtx, rtx);
++extern void riscv_split_doubleword_move (rtx, rtx);
++extern const char *riscv_output_move (rtx, rtx);
++extern const char *riscv_riscv_output_vector_move (enum machine_mode, rtx, rtx);
++#ifdef RTX_CODE
++extern void riscv_expand_scc (rtx *);
++extern void riscv_expand_conditional_branch (rtx *);
++#endif
++extern rtx riscv_expand_call (bool, rtx, rtx, rtx);
++extern void riscv_expand_fcc_reload (rtx, rtx, rtx);
++extern void riscv_set_return_address (rtx, rtx);
++extern bool riscv_expand_block_move (rtx, rtx, rtx);
++extern void riscv_expand_synci_loop (rtx, rtx);
++
++extern bool riscv_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
++ HOST_WIDE_INT);
++extern bool riscv_expand_ins_as_unaligned_store (rtx, rtx, HOST_WIDE_INT,
++ HOST_WIDE_INT);
++extern void riscv_order_regs_for_local_alloc (void);
++
++extern rtx riscv_return_addr (int, rtx);
++extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
++extern void riscv_expand_prologue (void);
++extern void riscv_expand_epilogue (bool);
++extern bool riscv_can_use_return_insn (void);
++extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
++
++extern enum reg_class riscv_secondary_reload_class (enum reg_class,
++ enum machine_mode,
++ rtx, bool);
++extern int riscv_class_max_nregs (enum reg_class, enum machine_mode);
++
++extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
++
++extern void irix_asm_output_align (FILE *, unsigned);
++extern const char *current_section_name (void);
++extern unsigned int current_section_flags (void);
++
++extern void riscv_expand_vector_init (rtx, rtx);
++
++#endif /* ! GCC_RISCV_PROTOS_H */
+diff -urN original-gcc/gcc/config/riscv/sync.md gcc/gcc/config/riscv/sync.md
+--- original-gcc/gcc/config/riscv/sync.md 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/sync.md 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,198 @@
++;; Machine description for RISC-V atomic operations.
++;; Copyright (C) 2011-2014 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (waterman@cs.berkeley.edu) at UC Berkeley.
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_c_enum "unspec" [
++ UNSPEC_COMPARE_AND_SWAP
++ UNSPEC_SYNC_OLD_OP
++ UNSPEC_SYNC_EXCHANGE
++ UNSPEC_ATOMIC_STORE
++ UNSPEC_MEMORY_BARRIER
++])
++
++(define_code_iterator any_atomic [plus ior xor and])
++(define_code_attr atomic_optab
++ [(plus "add") (ior "or") (xor "xor") (and "and")])
++
++;; Memory barriers.
++
++(define_expand "mem_thread_fence"
++ [(match_operand:SI 0 "const_int_operand" "")] ;; model
++ ""
++{
++ if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
++ {
++ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
++ MEM_VOLATILE_P (mem) = 1;
++ emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
++ }
++ DONE;
++})
++
++(define_insn "mem_thread_fence_1"
++ [(set (match_operand:BLK 0 "" "")
++ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
++ (match_operand:SI 1 "const_int_operand" "")] ;; model
++ ""
++{
++ switch (INTVAL (operands[1]))
++ {
++ case MEMMODEL_SEQ_CST:
++ case MEMMODEL_ACQ_REL:
++ return "fence rw,rw";
++ case MEMMODEL_ACQUIRE:
++ case MEMMODEL_CONSUME:
++ return "fence r,rw";
++ case MEMMODEL_RELEASE:
++ return "fence rw,w";
++ default:
++ gcc_unreachable();
++ }
++})
++
++;; Atomic memory operations.
++
++;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
++(define_insn "atomic_store<mode>"
++ [(set (match_operand:GPR 0 "memory_operand" "=A")
++ (unspec_volatile:GPR
++ [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "const_int_operand")] ;; model
++ UNSPEC_ATOMIC_STORE))]
++ "TARGET_ATOMIC"
++ "amoswap.<amo>%A2 zero,%z1,%0")
++
++(define_insn "atomic_<atomic_optab><mode>"
++ [(set (match_operand:GPR 0 "memory_operand" "+A")
++ (unspec_volatile:GPR
++ [(any_atomic:GPR (match_dup 0)
++ (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 2 "const_int_operand")] ;; model
++ UNSPEC_SYNC_OLD_OP))]
++ "TARGET_ATOMIC"
++ "amo<insn>.<amo>%A2 zero,%z1,%0")
++
++(define_insn "atomic_fetch_<atomic_optab><mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (match_operand:GPR 1 "memory_operand" "+A"))
++ (set (match_dup 1)
++ (unspec_volatile:GPR
++ [(any_atomic:GPR (match_dup 1)
++ (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 3 "const_int_operand")] ;; model
++ UNSPEC_SYNC_OLD_OP))]
++ "TARGET_ATOMIC"
++ "amo<insn>.<amo>%A3 %0,%z2,%1")
++
++(define_insn "atomic_exchange<mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (unspec_volatile:GPR
++ [(match_operand:GPR 1 "memory_operand" "+A")
++ (match_operand:SI 3 "const_int_operand")] ;; model
++ UNSPEC_SYNC_EXCHANGE))
++ (set (match_dup 1)
++ (match_operand:GPR 2 "register_operand" "0"))]
++ "TARGET_ATOMIC"
++ "amoswap.<amo>%A3 %0,%z2,%1")
++
++(define_insn "atomic_cas_value_strong<mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (match_operand:GPR 1 "memory_operand" "+A"))
++ (set (match_dup 1)
++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")
++ (match_operand:SI 4 "const_int_operand") ;; mod_s
++ (match_operand:SI 5 "const_int_operand")] ;; mod_f
++ UNSPEC_COMPARE_AND_SWAP))
++ (clobber (match_scratch:GPR 6 "=&r"))]
++ "TARGET_ATOMIC"
++ "1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
++ [(set (attr "length") (const_int 16))])
++
++(define_expand "atomic_compare_and_swap<mode>"
++ [(match_operand:SI 0 "register_operand" "") ;; bool output
++ (match_operand:GPR 1 "register_operand" "") ;; val output
++ (match_operand:GPR 2 "memory_operand" "") ;; memory
++ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
++ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
++ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
++ (match_operand:SI 6 "const_int_operand" "") ;; mod_s
++ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
++ "TARGET_ATOMIC"
++{
++ emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
++ operands[3], operands[4],
++ operands[6], operands[7]));
++
++ rtx compare = operands[1];
++ if (operands[3] != const0_rtx)
++ {
++ rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
++ compare = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_rtx_SET (VOIDmode, compare, difference));
++ }
++
++ rtx eq = gen_rtx_EQ (<MODE>mode, compare, const0_rtx);
++ rtx result = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_rtx_SET (VOIDmode, result, eq));
++ emit_insn (gen_rtx_SET (VOIDmode, operands[0], gen_lowpart (SImode, result)));
++ DONE;
++})
++
++(define_expand "atomic_test_and_set"
++ [(match_operand:QI 0 "register_operand" "") ;; bool output
++ (match_operand:QI 1 "memory_operand" "+A") ;; memory
++ (match_operand:SI 2 "const_int_operand" "")] ;; model
++ "TARGET_ATOMIC"
++{
++ /* We have no QImode atomics, so use the address LSBs to form a mask,
++ then use an aligned SImode atomic. */
++ rtx result = operands[0];
++ rtx mem = operands[1];
++ rtx model = operands[2];
++ rtx addr = force_reg (Pmode, XEXP (mem, 0));
++
++ rtx aligned_addr = gen_reg_rtx (Pmode);
++ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
++
++ rtx aligned_mem = change_address (mem, SImode, aligned_addr);
++ set_mem_alias_set (aligned_mem, 0);
++
++ rtx offset = gen_reg_rtx (SImode);
++ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
++ GEN_INT (3)));
++
++ rtx tmp = gen_reg_rtx (SImode);
++ emit_move_insn (tmp, GEN_INT (1));
++
++ rtx shmt = gen_reg_rtx (SImode);
++ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
++
++ rtx word = gen_reg_rtx (SImode);
++ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
++
++ tmp = gen_reg_rtx (SImode);
++ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
++
++ emit_move_insn (gen_lowpart (SImode, result),
++ gen_rtx_LSHIFTRT (SImode, tmp,
++ gen_lowpart (SImode, shmt)));
++ DONE;
++})
+diff -urN original-gcc/gcc/config/riscv/t-elf gcc/gcc/config/riscv/t-elf
+--- original-gcc/gcc/config/riscv/t-elf 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/t-elf 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,4 @@
++# Build the libraries for both hard and soft floating point
++
++MULTILIB_OPTIONS = msoft-float m64/m32 mno-atomic
++MULTILIB_DIRNAMES = soft-float 64 32 no-atomic
+diff -urN original-gcc/gcc/config/riscv/t-linux64 gcc/gcc/config/riscv/t-linux64
+--- original-gcc/gcc/config/riscv/t-linux64 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/gcc/config/riscv/t-linux64 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,5 @@
++# Build the libraries for both hard and soft floating point
++
++MULTILIB_OPTIONS = m64/m32 msoft-float mno-atomic
++MULTILIB_DIRNAMES = 64 32 soft-float no-atomic
++MULTILIB_OSDIRNAMES = ../lib ../lib32
+diff -urN original-gcc/gcc/config.gcc gcc/gcc/config.gcc
+--- original-gcc/gcc/config.gcc 2014-09-17 16:16:02.000000000 +0200
++++ gcc-4.9.2/gcc/config.gcc 2015-03-07 09:57:54.195132741 +0100
+@@ -447,6 +447,10 @@
+ esac
+ extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ ;;
++riscv*)
++ cpu_type=riscv
++ need_64bit_hwint=yes
++ ;;
+ rs6000*-*-*)
+ need_64bit_hwint=yes
+ extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+@@ -1949,6 +1953,27 @@
+ cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+ tmake_file="${tmake_file} microblaze/t-microblaze"
+ ;;
++riscv32*-*-linux*) # Linux RISC-V
++ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h riscv/default-32.h ${tm_file} riscv/linux.h riscv/linux64.h"
++ tmake_file="${tmake_file} riscv/t-linux64"
++ gnu_ld=yes
++ gas=yes
++ gcc_cv_initfini_array=yes
++ ;;
++riscv*-*-linux*) # Linux RISC-V
++ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h riscv/linux64.h"
++ tmake_file="${tmake_file} riscv/t-linux64"
++ gnu_ld=yes
++ gas=yes
++ gcc_cv_initfini_array=yes
++ ;;
++riscv*-*-elf*) # Linux RISC-V
++ tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
++ tmake_file="${tmake_file} riscv/t-elf"
++ gnu_ld=yes
++ gas=yes
++ gcc_cv_initfini_array=yes
++ ;;
+ mips*-*-netbsd*) # NetBSD/mips, either endian.
+ target_cpu_default="MASK_ABICALLS"
+ tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
+@@ -3756,6 +3781,31 @@
+ done
+ ;;
+
++ riscv*-*-*)
++ supported_defaults="abi arch arch_32 arch_64 float tune tune_32 tune_64"
++
++ case ${with_float} in
++ "" | soft | hard)
++ # OK
++ ;;
++ *)
++ echo "Unknown floating point type used in --with-float=$with_float" 1>&2
++ exit 1
++ ;;
++ esac
++
++ case ${with_abi} in
++ "" | 32 | 64)
++ # OK
++ ;;
++ *)
++ echo "Unknown ABI used in --with-abi=$with_abi" 1>&2
++ exit 1
++ ;;
++ esac
++
++ ;;
++
+ mips*-*-*)
+ supported_defaults="abi arch arch_32 arch_64 float fpu nan tune tune_32 tune_64 divide llsc mips-plt synci"
+
+diff -urN original-gcc/gcc/configure gcc/gcc/configure
+--- original-gcc/gcc/configure 2014-10-10 14:51:28.000000000 +0200
++++ gcc-4.9.2/gcc/configure 2015-03-07 09:57:54.211132741 +0100
+@@ -23515,6 +23515,25 @@
+ tls_first_minor=14
+ tls_as_opt="-a32 --fatal-warnings"
+ ;;
++ riscv*-*-*)
++ conftest_s='
++ .section .tdata,"awT",@progbits
++x:
++ .word 2
++ .text
++ la.tls.gd a0,x
++ la.tls.ie a1,x
++ lui a0,%tls_ie_pcrel_hi(x)
++ lw a0,%pcrel_lo(x)(a0)
++ add a0,a0,tp
++ lw a0,0(a0)
++ lui a0,%tprel_hi(x)
++ add a0,a0,tp,%tprel_add(x)
++ lw a0,%tprel_lo(x)(a0)'
++ tls_first_major=2
++ tls_first_minor=21
++ tls_as_opt='-m32 --fatal-warnings'
++ ;;
+ s390-*-*)
+ conftest_s='
+ .section ".tdata","awT",@progbits
+diff -urN original-gcc/gcc/configure.ac gcc/gcc/configure.ac
+--- original-gcc/gcc/configure.ac 2014-10-10 14:51:28.000000000 +0200
++++ gcc-4.9.2/gcc/configure.ac 2015-03-07 09:57:54.219132741 +0100
+@@ -3178,6 +3178,25 @@
+ tls_first_minor=14
+ tls_as_opt="-a32 --fatal-warnings"
+ ;;
++ riscv*-*-*)
++ conftest_s='
++ .section .tdata,"awT",@progbits
++x:
++ .word 2
++ .text
++ la.tls.gd a0,x
++ la.tls.ie a1,x
++ lui a0,%tls_ie_pcrel_hi(x)
++ lw a0,%pcrel_lo(x)(a0)
++ add a0,a0,tp
++ lw a0,0(a0)
++ lui a0,%tprel_hi(x)
++ add a0,a0,tp,%tprel_add(x)
++ lw a0,%tprel_lo(x)(a0)'
++ tls_first_major=2
++ tls_first_minor=21
++ tls_as_opt='-m32 --fatal-warnings'
++ ;;
+ s390-*-*)
+ conftest_s='
+ .section ".tdata","awT",@progbits
+diff -urN original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c
+--- original-gcc/gcc/testsuite/gcc.c-torture/execute/20101011-1.c 2013-12-31 08:05:35.000000000 +0100
++++ gcc-4.9.2/gcc/testsuite/gcc.c-torture/execute/20101011-1.c 2015-03-07 09:57:54.223132741 +0100
+@@ -6,6 +6,9 @@
+ #elif defined (__powerpc__) || defined (__PPC__) || defined (__ppc__) || defined (__POWERPC__) || defined (__ppc)
+ /* On PPC division by zero does not trap. */
+ # define DO_TEST 0
++#elif defined (__riscv__)
++ /* On RISC-V division by zero does not trap. */
++# define DO_TEST 0
+ #elif defined (__SPU__)
+ /* On SPU division by zero does not trap. */
+ # define DO_TEST 0
+diff -urN original-gcc/gcc/testsuite/gcc.dg/20020312-2.c gcc/gcc/testsuite/gcc.dg/20020312-2.c
+--- original-gcc/gcc/testsuite/gcc.dg/20020312-2.c 2013-12-31 08:05:35.000000000 +0100
++++ gcc-4.9.2/gcc/testsuite/gcc.dg/20020312-2.c 2015-03-07 09:57:54.223132741 +0100
+@@ -66,6 +66,8 @@
+ # else
+ # define PIC_REG "30"
+ # endif
++#elif defined(__riscv__)
++/* No pic register. */
+ #elif defined(__RX__)
+ /* No pic register. */
+ #elif defined(__s390__)
+diff -urN original-gcc/gcc/testsuite/gcc.dg/20040813-1.c gcc/gcc/testsuite/gcc.dg/20040813-1.c
+--- original-gcc/gcc/testsuite/gcc.dg/20040813-1.c 2013-12-31 08:05:35.000000000 +0100
++++ gcc-4.9.2/gcc/testsuite/gcc.dg/20040813-1.c 2015-03-07 09:57:54.227132741 +0100
+@@ -2,7 +2,7 @@
+ /* Contributed by Devang Patel <dpatel@apple.com> */
+
+ /* { dg-do compile } */
+-/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* tile*-*-* nios2-*-* *-*-vxworks* } { "*" } { "" } } */
++/* { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* *-*-aix* alpha*-*-* hppa*64*-*-* ia64-*-* riscv*-*-* tile*-*-* nios2-*-* *-*-vxworks* } { "*" } { "" } } */
+ /* { dg-options "-gstabs" } */
+
+ int
+diff -urN original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c gcc/gcc/testsuite/gcc.dg/stack-usage-1.c
+--- original-gcc/gcc/testsuite/gcc.dg/stack-usage-1.c 2013-12-31 08:05:35.000000000 +0100
++++ gcc-4.9.2/gcc/testsuite/gcc.dg/stack-usage-1.c 2015-03-07 09:57:54.227132741 +0100
+@@ -61,6 +61,8 @@
+ # else
+ # define SIZE 240
+ # endif
++#elif defined (__riscv__)
++# define SIZE 240
+ #elif defined (__AVR__)
+ # define SIZE 254
+ #elif defined (__s390x__)
+diff -urN original-gcc/libatomic/cas_n.c gcc/libatomic/cas_n.c
+--- original-gcc/libatomic/cas_n.c 2014-02-20 18:43:53.000000000 +0100
++++ gcc-4.9.2/libatomic/cas_n.c 2015-03-07 09:57:54.227132741 +0100
+@@ -70,7 +70,7 @@
+ mask = -1;
+ }
+
+- weval = *eptr << shift;
++ weval = (UWORD)*eptr << shift;
+ wnewval = (UWORD)newval << shift;
+ woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
+ do
+diff -urN original-gcc/libatomic/configure.tgt gcc/libatomic/configure.tgt
+--- original-gcc/libatomic/configure.tgt 2014-01-02 23:24:30.000000000 +0100
++++ gcc-4.9.2/libatomic/configure.tgt 2015-03-07 09:57:54.227132741 +0100
+@@ -29,6 +29,7 @@
+ case "${target_cpu}" in
+ alpha*) ARCH=alpha ;;
+ rs6000 | powerpc*) ARCH=powerpc ;;
++ riscv*) ARCH=riscv ;;
+ sh*) ARCH=sh ;;
+
+ arm*)
+diff -urN original-gcc/libatomic/fop_n.c gcc/libatomic/fop_n.c
+--- original-gcc/libatomic/fop_n.c 2014-01-02 23:24:30.000000000 +0100
++++ gcc-4.9.2/libatomic/fop_n.c 2015-03-07 09:57:54.231132741 +0100
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2012-2014 Free Software Foundation, Inc.
++/* Copyright (C) 2012-2015 Free Software Foundation, Inc.
+ Contributed by Richard Henderson <rth@redhat.com>.
+
+ This file is part of the GNU Atomic Library (libatomic).
+@@ -112,9 +112,9 @@
+
+ pre_barrier (smodel);
+
+- wptr = (UWORD *)mptr;
+- shift = 0;
+- mask = -1;
++ wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
++ shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
++ mask = SIZE(MASK) << shift;
+
+ wopval = (UWORD)opval << shift;
+ woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
+@@ -136,9 +136,9 @@
+
+ pre_barrier (smodel);
+
+- wptr = (UWORD *)mptr;
+- shift = 0;
+- mask = -1;
++ wptr = (UWORD *)((uintptr_t)mptr & -WORDSIZE);
++ shift = (((uintptr_t)mptr % WORDSIZE) * CHAR_BIT) ^ SIZE(INVERT_MASK);
++ mask = SIZE(MASK) << shift;
+
+ wopval = (UWORD)opval << shift;
+ woldval = __atomic_load_n (wptr, __ATOMIC_RELAXED);
+diff -urN original-gcc/libcpp/configure gcc/libcpp/configure
+--- original-gcc/libcpp/configure 2014-10-30 09:28:58.000000000 +0100
++++ gcc-4.9.2/libcpp/configure 2015-03-07 09:57:54.231132741 +0100
+@@ -7163,6 +7163,7 @@
+ mips*-*-* | \
+ mmix-*-* | \
+ powerpc*-*-* | \
++ riscv*-*-* | \
+ rs6000*-*-* | \
+ s390*-*-* | \
+ sparc*-*-* | \
+diff -urN original-gcc/libcpp/configure.ac gcc/libcpp/configure.ac
+--- original-gcc/libcpp/configure.ac 2014-02-24 16:08:00.000000000 +0100
++++ gcc-4.9.2/libcpp/configure.ac 2015-03-07 09:57:54.235132741 +0100
+@@ -192,6 +192,7 @@
+ mips*-*-* | \
+ mmix-*-* | \
+ powerpc*-*-* | \
++ riscv*-*-* | \
+ rs6000*-*-* | \
+ s390*-*-* | \
+ sparc*-*-* | \
+diff -urN original-gcc/libgcc/config/riscv/crti.S gcc/libgcc/config/riscv/crti.S
+--- original-gcc/libgcc/config/riscv/crti.S 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/crti.S 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1 @@
++/* crti.S is empty because .init_array/.fini_array are used exclusively. */
+diff -urN original-gcc/libgcc/config/riscv/crtn.S gcc/libgcc/config/riscv/crtn.S
+--- original-gcc/libgcc/config/riscv/crtn.S 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/crtn.S 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1 @@
++/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
+diff -urN original-gcc/libgcc/config/riscv/div.S gcc/libgcc/config/riscv/div.S
+--- original-gcc/libgcc/config/riscv/div.S 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/div.S 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,121 @@
++ .text
++ .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __udivdi3 __udivsi3
++# define __umoddi3 __umodsi3
++# define __divdi3 __divsi3
++# define __moddi3 __modsi3
++#else
++ .globl __udivsi3
++__udivsi3:
++ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
++ sll a0, a0, 32
++ sll a1, a1, 32
++ move t0, ra
++ jal __udivdi3
++ sext.w a0, a0
++ jr t0
++
++ .globl __umodsi3
++__umodsi3:
++ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
++ sll a0, a0, 32
++ sll a1, a1, 32
++ srl a0, a0, 32
++ srl a1, a1, 32
++ move t0, ra
++ jal __udivdi3
++ sext.w a0, a1
++ jr t0
++
++ .globl __modsi3
++ __modsi3 = __moddi3
++
++ .globl __divsi3
++__divsi3:
++ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
++ li t0, -1
++ beq a1, t0, .L20
++#endif
++
++ .globl __divdi3
++__divdi3:
++ bltz a0, .L10
++ bltz a1, .L11
++ /* Since the quotient is positive, fall into __udivdi3. */
++
++ .globl __udivdi3
++__udivdi3:
++ mv a2, a1
++ mv a1, a0
++ li a0, -1
++ beqz a2, .L5
++ li a3, 1
++ bgeu a2, a1, .L2
++.L1:
++ blez a2, .L2
++ slli a2, a2, 1
++ slli a3, a3, 1
++ bgtu a1, a2, .L1
++.L2:
++ li a0, 0
++.L3:
++ bltu a1, a2, .L4
++ sub a1, a1, a2
++ or a0, a0, a3
++.L4:
++ srli a3, a3, 1
++ srli a2, a2, 1
++ bnez a3, .L3
++.L5:
++ ret
++
++ .globl __umoddi3
++__umoddi3:
++ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
++ move t0, ra
++ jal __udivdi3
++ move a0, a1
++ jr t0
++
++ /* Handle negative arguments to __divdi3. */
++.L10:
++ neg a0, a0
++ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
++ neg a1, a1
++ j __divdi3 /* Compute __udivdi3(-a0, -a1). */
++.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
++ neg a1, a1
++.L12:
++ move t0, ra
++ jal __divdi3
++ neg a0, a0
++ jr t0
++
++ .globl __moddi3
++__moddi3:
++ move t0, ra
++ bltz a1, .L31
++ bltz a0, .L32
++.L30:
++ jal __udivdi3 /* The dividend is not negative. */
++ move a0, a1
++ jr t0
++.L31:
++ neg a1, a1
++ bgez a0, .L30
++.L32:
++ neg a0, a0
++ jal __udivdi3 /* The dividend is hella negative. */
++ neg a0, a1
++ jr t0
++
++#ifdef __riscv64
++ /* continuation of __divsi3 */
++.L20:
++ sll t0, t0, 31
++ bne a0, t0, __divdi3
++ ret
++#endif
+diff -urN original-gcc/libgcc/config/riscv/mul.S gcc/libgcc/config/riscv/mul.S
+--- original-gcc/libgcc/config/riscv/mul.S 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/mul.S 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,21 @@
++ .text
++ .align 2
++
++#ifndef __riscv64
++/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
++# define __muldi3 __mulsi3
++#endif
++
++ .globl __muldi3
++__muldi3:
++ mv a2, a0
++ li a0, 0
++.L1:
++ slli a3, a1, _RISCV_SZPTR-1
++ bgez a3, .L2
++ add a0, a0, a2
++.L2:
++ srli a1, a1, 1
++ slli a2, a2, 1
++ bnez a1, .L1
++ ret
+diff -urN original-gcc/libgcc/config/riscv/riscv-fp.c gcc/libgcc/config/riscv/riscv-fp.c
+--- original-gcc/libgcc/config/riscv/riscv-fp.c 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/riscv-fp.c 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,178 @@
++/* Functions needed for soft-float on riscv-linux. Based on
++ rs6000/ppc64-fp.c with TF types removed.
++
++ Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
++ 2000, 2001, 2002, 2003, 2004, 2006, 2009 Free Software Foundation,
++ Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++#if defined(__riscv64)
++#include "fp-bit.h"
++
++extern DItype __fixdfdi (DFtype);
++extern DItype __fixsfdi (SFtype);
++extern USItype __fixunsdfsi (DFtype);
++extern USItype __fixunssfsi (SFtype);
++extern DFtype __floatdidf (DItype);
++extern DFtype __floatundidf (UDItype);
++extern SFtype __floatdisf (DItype);
++extern SFtype __floatundisf (UDItype);
++
++static DItype local_fixunssfdi (SFtype);
++static DItype local_fixunsdfdi (DFtype);
++
++DItype
++__fixdfdi (DFtype a)
++{
++ if (a < 0)
++ return - local_fixunsdfdi (-a);
++ return local_fixunsdfdi (a);
++}
++
++DItype
++__fixsfdi (SFtype a)
++{
++ if (a < 0)
++ return - local_fixunssfdi (-a);
++ return local_fixunssfdi (a);
++}
++
++USItype
++__fixunsdfsi (DFtype a)
++{
++ if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
++ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
++ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
++ return (SItype) a;
++}
++
++USItype
++__fixunssfsi (SFtype a)
++{
++ if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
++ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
++ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
++ return (SItype) a;
++}
++
++DFtype
++__floatdidf (DItype u)
++{
++ DFtype d;
++
++ d = (SItype) (u >> (sizeof (SItype) * 8));
++ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
++ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
++
++ return d;
++}
++
++DFtype
++__floatundidf (UDItype u)
++{
++ DFtype d;
++
++ d = (USItype) (u >> (sizeof (SItype) * 8));
++ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
++ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
++
++ return d;
++}
++
++SFtype
++__floatdisf (DItype u)
++{
++ DFtype f;
++
++ if (53 < (sizeof (DItype) * 8)
++ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
++ {
++ if (! (- ((DItype) 1 << 53) < u
++ && u < ((DItype) 1 << 53)))
++ {
++ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
++ {
++ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
++ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
++ }
++ }
++ }
++ f = (SItype) (u >> (sizeof (SItype) * 8));
++ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
++ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
++
++ return (SFtype) f;
++}
++
++SFtype
++__floatundisf (UDItype u)
++{
++ DFtype f;
++
++ if (53 < (sizeof (DItype) * 8)
++ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
++ {
++ if (u >= ((UDItype) 1 << 53))
++ {
++ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
++ {
++ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
++ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
++ }
++ }
++ }
++ f = (USItype) (u >> (sizeof (SItype) * 8));
++ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
++ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
++
++ return (SFtype) f;
++}
++
++/* This version is needed to prevent recursion; fixunsdfdi in libgcc
++ calls fixdfdi, which in turn calls calls fixunsdfdi. */
++
++static DItype
++local_fixunsdfdi (DFtype a)
++{
++ USItype hi, lo;
++
++ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
++ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
++ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
++}
++
++/* This version is needed to prevent recursion; fixunssfdi in libgcc
++ calls fixsfdi, which in turn calls calls fixunssfdi. */
++
++static DItype
++local_fixunssfdi (SFtype original_a)
++{
++ DFtype a = original_a;
++ USItype hi, lo;
++
++ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
++ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
++ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
++}
++
++#endif
+diff -urN original-gcc/libgcc/config/riscv/t-dpbit gcc/libgcc/config/riscv/t-dpbit
+--- original-gcc/libgcc/config/riscv/t-dpbit 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/t-dpbit 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,4 @@
++LIB2ADD += dp-bit.c
++
++dp-bit.c: $(srcdir)/fp-bit.c
++ cat $(srcdir)/fp-bit.c > dp-bit.c
+diff -urN original-gcc/libgcc/config/riscv/t-elf gcc/libgcc/config/riscv/t-elf
+--- original-gcc/libgcc/config/riscv/t-elf 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/t-elf 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,3 @@
++LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
++ $(srcdir)/config/riscv/mul.S \
++ $(srcdir)/config/riscv/div.S
+diff -urN original-gcc/libgcc/config/riscv/t-fpbit gcc/libgcc/config/riscv/t-fpbit
+--- original-gcc/libgcc/config/riscv/t-fpbit 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/t-fpbit 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,5 @@
++LIB2ADD += fp-bit.c
++
++fp-bit.c: $(srcdir)/fp-bit.c
++ echo '#define FLOAT' > fp-bit.c
++ cat $(srcdir)/fp-bit.c >> fp-bit.c
+diff -urN original-gcc/libgcc/config/riscv/t-linux gcc/libgcc/config/riscv/t-linux
+--- original-gcc/libgcc/config/riscv/t-linux 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/t-linux 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,3 @@
++LIB2ADD += $(srcdir)/config/riscv/riscv-fp.c \
++ $(srcdir)/config/riscv/mul.S \
++ $(srcdir)/config/riscv/div.S
+diff -urN original-gcc/libgcc/config/riscv/t-linux32 gcc/libgcc/config/riscv/t-linux32
+--- original-gcc/libgcc/config/riscv/t-linux32 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/t-linux32 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,2 @@
++HOST_LIBGCC2_CFLAGS += -m32
++CRTSTUFF_CFLAGS += -m32
+diff -urN original-gcc/libgcc/config/riscv/t-tpbit gcc/libgcc/config/riscv/t-tpbit
+--- original-gcc/libgcc/config/riscv/t-tpbit 1970-01-01 01:00:00.000000000 +0100
++++ gcc-4.9.2/libgcc/config/riscv/t-tpbit 2015-03-07 09:51:45.667139025 +0100
+@@ -0,0 +1,10 @@
++LIB2ADD += tp-bit.c
++
++tp-bit.c: $(srcdir)/fp-bit.c
++ echo '#ifdef _RISCVEL' > tp-bit.c
++ echo '# define FLOAT_BIT_ORDER_MISMATCH' >> tp-bit.c
++ echo '#endif' >> tp-bit.c
++ echo '#if __LDBL_MANT_DIG__ == 113' >> tp-bit.c
++ echo '# define TFLOAT' >> tp-bit.c
++ cat $(srcdir)/fp-bit.c >> tp-bit.c
++ echo '#endif' >> tp-bit.c
+diff -urN original-gcc/libgcc/config.host gcc/libgcc/config.host
+--- original-gcc/libgcc/config.host 2014-03-27 16:40:31.000000000 +0100
++++ gcc-4.9.2/libgcc/config.host 2015-03-07 09:57:54.235132741 +0100
+@@ -167,6 +167,9 @@
+ ;;
+ rs6000*-*-*)
+ ;;
++riscv*)
++ cpu_type=riscv
++ ;;
+ score*-*-*)
+ cpu_type=score
+ ;;
+@@ -1002,6 +1005,18 @@
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
+ ;;
++riscv32*-*-linux*)
++ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-linux riscv/t-linux32"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
++ ;;
++riscv*-*-linux*)
++ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-tpbit riscv/t-linux"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
++ ;;
++riscv*-*-*)
++ tmake_file="${tmake_file} riscv/t-fpbit riscv/t-dpbit riscv/t-elf"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
++ ;;
+ rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
+ md_unwind_header=rs6000/aix-unwind.h
+ tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
+diff -urN original-gcc/libsanitizer/asan/asan_linux.cc gcc/libsanitizer/asan/asan_linux.cc
+--- original-gcc/libsanitizer/asan/asan_linux.cc 2013-12-05 10:18:38.000000000 +0100
++++ gcc-4.9.2/libsanitizer/asan/asan_linux.cc 2015-03-07 09:57:54.235132741 +0100
+@@ -98,6 +98,11 @@
+ *pc = ucontext->uc_mcontext.gregs[31];
+ *bp = ucontext->uc_mcontext.gregs[30];
+ *sp = ucontext->uc_mcontext.gregs[29];
++# elif defined(__riscv__)
++ ucontext_t *ucontext = (ucontext_t*)context;
++ *pc = ucontext->uc_mcontext.gregs[REG_PC];
++ *bp = ucontext->uc_mcontext.gregs[REG_S0];
++ *sp = ucontext->uc_mcontext.gregs[REG_SP];
+ #else
+ # error "Unsupported arch"
+ #endif
+diff -urN original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_linux.cc 2013-12-05 10:18:38.000000000 +0100
++++ gcc-4.9.2/libsanitizer/sanitizer_common/sanitizer_linux.cc 2015-03-07 09:57:54.239132741 +0100
+@@ -93,11 +93,11 @@
+ }
+
+ uptr internal_open(const char *filename, int flags) {
+- return internal_syscall(__NR_open, (uptr)filename, flags);
++ return internal_syscall(__NR_openat, AT_FDCWD, (uptr)filename, flags);
+ }
+
+ uptr internal_open(const char *filename, int flags, u32 mode) {
+- return internal_syscall(__NR_open, (uptr)filename, flags, mode);
++ return internal_syscall(__NR_openat, AT_FDCWD, (uptr)filename, flags, mode);
+ }
+
+ uptr OpenFile(const char *filename, bool write) {
+@@ -139,7 +139,7 @@
+
+ uptr internal_stat(const char *path, void *buf) {
+ #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+- return internal_syscall(__NR_stat, (uptr)path, (uptr)buf);
++ return internal_syscall(__NR_newfstatat, AT_FDCWD, (uptr)path, (uptr)buf, 0);
+ #else
+ struct stat64 buf64;
+ int res = internal_syscall(__NR_stat64, path, &buf64);
+@@ -150,7 +150,7 @@
+
+ uptr internal_lstat(const char *path, void *buf) {
+ #if SANITIZER_LINUX_USES_64BIT_SYSCALLS
+- return internal_syscall(__NR_lstat, (uptr)path, (uptr)buf);
++ return internal_syscall(__NR_newfstatat, AT_FDCWD, (uptr)path, (uptr)buf, AT_SYMLINK_NOFOLLOW);
+ #else
+ struct stat64 buf64;
+ int res = internal_syscall(__NR_lstat64, path, &buf64);
+@@ -178,15 +178,15 @@
+ }
+
+ uptr internal_dup2(int oldfd, int newfd) {
+- return internal_syscall(__NR_dup2, oldfd, newfd);
++ return internal_syscall(__NR_dup3, oldfd, newfd, 0);
+ }
+
+ uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+- return internal_syscall(__NR_readlink, (uptr)path, (uptr)buf, bufsize);
++ return internal_syscall(__NR_readlinkat, AT_FDCWD, (uptr)path, (uptr)buf, bufsize);
+ }
+
+ uptr internal_unlink(const char *path) {
+- return internal_syscall(__NR_unlink, (uptr)path);
++ return internal_syscall(__NR_unlinkat, AT_FDCWD, (uptr)path);
+ }
+
+ uptr internal_sched_yield() {
+@@ -588,7 +588,7 @@
+ }
+
+ uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
+- return internal_syscall(__NR_getdents, fd, (uptr)dirp, count);
++ return internal_syscall(__NR_getdents64, fd, (uptr)dirp, count);
+ }
+
+ uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+diff -urN original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc 2014-10-14 21:26:42.000000000 +0200
++++ gcc-4.9.2/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc 2015-03-07 09:57:54.235132741 +0100
+@@ -63,7 +63,7 @@
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+ } // namespace __sanitizer
+
+-#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__sparc__)
++#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__sparc__) && !defined(__riscv__)
+ COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
+ #endif
+
+diff -urN original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+--- original-gcc/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2014-10-14 21:26:42.000000000 +0200
++++ gcc-4.9.2/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h 2015-03-07 09:57:54.239132741 +0100
+@@ -67,6 +67,10 @@
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 144;
+ const unsigned struct_kernel_stat64_sz = 104;
++#elif defined(__riscv__)
++ const unsigned struct___old_kernel_stat_sz = 0;
++ const unsigned struct_kernel_stat_sz = 128;
++ const unsigned struct_kernel_stat64_sz = 128;
+ #elif defined(__sparc__) && defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 104;
+@@ -367,7 +371,7 @@
+ typedef long __sanitizer___kernel_off_t;
+ #endif
+
+-#if defined(__powerpc__)
++#if defined(__powerpc__) || defined(__riscv__)
+ typedef unsigned int __sanitizer___kernel_old_uid_t;
+ typedef unsigned int __sanitizer___kernel_old_gid_t;
+ #else