summaryrefslogtreecommitdiff
path: root/util/crossgcc/patches/gcc-6.3.0_riscv.patch
diff options
context:
space:
mode:
Diffstat (limited to 'util/crossgcc/patches/gcc-6.3.0_riscv.patch')
-rw-r--r--util/crossgcc/patches/gcc-6.3.0_riscv.patch10521
1 files changed, 10521 insertions, 0 deletions
diff --git a/util/crossgcc/patches/gcc-6.3.0_riscv.patch b/util/crossgcc/patches/gcc-6.3.0_riscv.patch
new file mode 100644
index 0000000000..ca9555de0b
--- /dev/null
+++ b/util/crossgcc/patches/gcc-6.3.0_riscv.patch
@@ -0,0 +1,10521 @@
+diff --git original-gcc/gcc/common/config/riscv/riscv-common.c gcc-6.3.0/gcc/common/config/riscv/riscv-common.c
+new file mode 100644
+index 00000000000..50f1485f87a
+--- /dev/null
++++ gcc-6.3.0/gcc/common/config/riscv/riscv-common.c
+@@ -0,0 +1,131 @@
++/* Common hooks for RISC-V.
++ Copyright (C) 2016 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "common/common-target.h"
++#include "common/common-target-def.h"
++#include "opts.h"
++#include "flags.h"
++#include "diagnostic-core.h"
++
++/* Parse a RISC-V ISA string into an option mask. */
++
++static void
++riscv_parse_arch_string (const char *isa, int *flags, location_t loc)
++{
++ const char *p = isa;
++
++ if (strncmp (p, "rv32", 4) == 0)
++ *flags &= ~MASK_64BIT, p += 4;
++ else if (strncmp (p, "rv64", 4) == 0)
++ *flags |= MASK_64BIT, p += 4;
++ else
++ {
++ error_at (loc, "-march=%s: ISA string must begin with rv32 or rv64", isa);
++ return;
++ }
++
++ if (*p == 'g')
++ {
++ p++;
++
++ *flags |= MASK_MUL;
++ *flags |= MASK_ATOMIC;
++ *flags |= MASK_HARD_FLOAT;
++ *flags |= MASK_DOUBLE_FLOAT;
++ }
++ else if (*p == 'i')
++ {
++ p++;
++
++ *flags &= ~MASK_MUL;
++ if (*p == 'm')
++ *flags |= MASK_MUL, p++;
++
++ *flags &= ~MASK_ATOMIC;
++ if (*p == 'a')
++ *flags |= MASK_ATOMIC, p++;
++
++ *flags &= ~(MASK_HARD_FLOAT | MASK_DOUBLE_FLOAT);
++ if (*p == 'f')
++ {
++ *flags |= MASK_HARD_FLOAT, p++;
++
++ if (*p == 'd')
++ {
++ *flags |= MASK_DOUBLE_FLOAT;
++ p++;
++ }
++ }
++ }
++ else
++ {
++ error_at (loc, "-march=%s: invalid ISA string", isa);
++ return;
++ }
++
++ *flags &= ~MASK_RVC;
++ if (*p == 'c')
++ *flags |= MASK_RVC, p++;
++
++ if (*p)
++ {
++ error_at (loc, "-march=%s: unsupported ISA substring %qs", isa, p);
++ return;
++ }
++}
++
++/* Implement TARGET_HANDLE_OPTION. */
++
++static bool
++riscv_handle_option (struct gcc_options *opts,
++ struct gcc_options *opts_set ATTRIBUTE_UNUSED,
++ const struct cl_decoded_option *decoded,
++ location_t loc)
++{
++ switch (decoded->opt_index)
++ {
++ case OPT_march_:
++ riscv_parse_arch_string (decoded->arg, &opts->x_target_flags, loc);
++ return true;
++
++ default:
++ return true;
++ }
++}
++
++/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
++static const struct default_options riscv_option_optimization_table[] =
++ {
++ { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
++ { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
++ { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
++ { OPT_LEVELS_NONE, 0, NULL, 0 }
++ };
++
++#undef TARGET_OPTION_OPTIMIZATION_TABLE
++#define TARGET_OPTION_OPTIMIZATION_TABLE riscv_option_optimization_table
++
++#undef TARGET_HANDLE_OPTION
++#define TARGET_HANDLE_OPTION riscv_handle_option
++
++struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER;
+diff --git original-gcc/gcc/config.gcc gcc-6.3.0/gcc/config.gcc
+index bc389eb45e7..ddfa4dccb52 100644
+--- original-gcc/gcc/config.gcc
++++ gcc-6.3.0/gcc/config.gcc
+@@ -451,6 +451,10 @@ powerpc*-*-*)
+ esac
+ extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ ;;
++riscv*)
++ cpu_type=riscv
++ extra_objs="riscv-builtins.o riscv-c.o"
++ ;;
+ rs6000*-*-*)
+ extra_options="${extra_options} g.opt fused-madd.opt rs6000/rs6000-tables.opt"
+ ;;
+@@ -2016,6 +2020,34 @@ microblaze*-*-elf)
+ cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+ tmake_file="${tmake_file} microblaze/t-microblaze"
+ ;;
++riscv*-*-linux*)
++ tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} riscv/linux.h"
++ case "x${enable_multilib}" in
++ xno) ;;
++ xyes) tmake_file="${tmake_file} riscv/t-linux-multilib" ;;
++ *) echo "Unknown value for enable_multilib"; exit 1
++ esac
++ tmake_file="${tmake_file} riscv/t-riscv riscv/t-linux"
++ gnu_ld=yes
++ gas=yes
++ # Force .init_array support. The configure script cannot always
++ # automatically detect that GAS supports it, yet we require it.
++ gcc_cv_initfini_array=yes
++ ;;
++riscv*-*-elf*)
++ tm_file="elfos.h newlib-stdint.h ${tm_file} riscv/elf.h"
++ case "x${enable_multilib}" in
++ xno) ;;
++ xyes) tmake_file="${tmake_file} riscv/t-elf-multilib" ;;
++ *) echo "Unknown value for enable_multilib"; exit 1
++ esac
++ tmake_file="${tmake_file} riscv/t-riscv"
++ gnu_ld=yes
++ gas=yes
++ # Force .init_array support. The configure script cannot always
++ # automatically detect that GAS supports it, yet we require it.
++ gcc_cv_initfini_array=yes
++ ;;
+ mips*-*-netbsd*) # NetBSD/mips, either endian.
+ target_cpu_default="MASK_ABICALLS"
+ tm_file="elfos.h ${tm_file} mips/elf.h netbsd.h netbsd-elf.h mips/netbsd.h"
+@@ -3939,6 +3971,70 @@ case "${target}" in
+ done
+ ;;
+
++ riscv*-*-*)
++ supported_defaults="abi arch tune"
++
++ case "${target}" in
++ riscv32*) xlen=32 ;;
++ riscv64*) xlen=64 ;;
++ *) echo "Unsupported RISC-V target ${target}" 1>&2; exit 1 ;;
++ esac
++
++ # Infer arch from --with-arch, --target, and --with-abi.
++ case "${with_arch}" in
++ rv32i* | rv32g* | rv64i* | rv64g*)
++ # OK.
++ ;;
++ "")
++ # Infer XLEN, but otherwise assume GC.
++ case "${with_abi}" in
++ ilp32 | ilp32f | ilp32d) with_arch="rv32gc" ;;
++ lp64 | lp64f | lp64d) with_arch="rv64gc" ;;
++ *) with_arch="rv${xlen}gc" ;;
++ esac
++ ;;
++ *)
++ echo "--with-arch=${with_arch} is not supported. The argument must begin with rv32i, rv32g, rv64i, or rv64g." 1>&2
++ exit 1
++ ;;
++ esac
++
++ # Make sure --with-abi is valid. If it was not specified,
++ # pick a default based on the ISA, preferring soft-float
++ # unless the D extension is present.
++ case "${with_abi}" in
++ ilp32 | ilp32f | ilp32d | lp64 | lp64f | lp64d)
++ ;;
++ "")
++ case "${with_arch}" in
++ rv32*d* | rv32g*) with_abi=ilp32d ;;
++ rv32*) with_abi=ilp32 ;;
++ rv64*d* | rv64g*) with_abi=lp64d ;;
++ rv64*) with_abi=lp64 ;;
++ esac
++ ;;
++ *)
++ echo "--with-abi=${with_abi} is not supported" 1>&2
++ exit 1
++ ;;
++ esac
++
++ # Make sure ABI and ISA are compatible.
++ case "${with_abi},${with_arch}" in
++ ilp32,rv32* \
++ | ilp32f,rv32*f* | ilp32f,rv32g* \
++ | ilp32d,rv32*d* | ilp32d,rv32g* \
++ | lp64,rv64* \
++ | lp64f,rv64*f* | lp64f,rv64g* \
++ | lp64d,rv64*d* | lp64d,rv64g*)
++ ;;
++ *)
++ echo "--with-abi=${with_abi} is not supported for ISA ${with_arch}" 1>&2
++ exit 1
++ ;;
++ esac
++ ;;
++
+ mips*-*-*)
+ supported_defaults="abi arch arch_32 arch_64 float fpu nan fp_32 odd_spreg_32 tune tune_32 tune_64 divide llsc mips-plt synci lxc1-sxc1 madd4"
+
+diff --git original-gcc/gcc/config/riscv/constraints.md gcc-6.3.0/gcc/config/riscv/constraints.md
+new file mode 100644
+index 00000000000..ae93788e44a
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/constraints.md
+@@ -0,0 +1,78 @@
++;; Constraint definitions for RISC-V target.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Register constraints
++
++(define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
++ "A floating-point register (if available).")
++
++(define_register_constraint "j" "SIBCALL_REGS"
++ "@internal")
++
++;; Avoid using register t0 for JALR's argument, because for some
++;; microarchitectures that is a return-address stack hint.
++(define_register_constraint "l" "JALR_REGS"
++ "@internal")
++
++;; General constraints
++
++(define_constraint "I"
++ "An I-type 12-bit signed immediate."
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (ival)")))
++
++(define_constraint "J"
++ "Integer zero."
++ (and (match_code "const_int")
++ (match_test "ival == 0")))
++
++(define_constraint "K"
++ "A 5-bit unsigned immediate for CSR access instructions."
++ (and (match_code "const_int")
++ (match_test "IN_RANGE (ival, 0, 31)")))
++
++;; Floating-point constant +0.0, used for FCVT-based moves when FMV is
++;; not available in RV32.
++(define_constraint "G"
++ "@internal"
++ (and (match_code "const_double")
++ (match_test "op == CONST0_RTX (mode)")))
++
++(define_memory_constraint "A"
++ "An address that is held in a general-purpose register."
++ (and (match_code "mem")
++ (match_test "GET_CODE(XEXP(op,0)) == REG")))
++
++(define_constraint "S"
++ "@internal
++ A constant call address."
++ (match_operand 0 "absolute_symbolic_operand"))
++
++(define_constraint "U"
++ "@internal
++ A PLT-indirect call address."
++ (match_operand 0 "plt_symbolic_operand"))
++
++(define_constraint "T"
++ "@internal
++ A constant @code{move_operand}."
++ (and (match_operand 0 "move_operand")
++ (match_test "CONSTANT_P (op)")))
+diff --git original-gcc/gcc/config/riscv/elf.h gcc-6.3.0/gcc/config/riscv/elf.h
+new file mode 100644
+index 00000000000..391e59f49b9
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/elf.h
+@@ -0,0 +1,35 @@
++/* Target macros for riscv*-elf targets.
++ Copyright (C) 1994-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#define LINK_SPEC "\
++-melf" XLEN_SPEC "lriscv \
++%{shared}"
++
++/* Link against Newlib libraries, because the ELF backend assumes Newlib.
++ Handle the circular dependence between libc and libgloss. */
++#undef LIB_SPEC
++#define LIB_SPEC "--start-group -lc -lgloss --end-group"
++
++#undef STARTFILE_SPEC
++#define STARTFILE_SPEC "crt0%O%s crtbegin%O%s"
++
++#undef ENDFILE_SPEC
++#define ENDFILE_SPEC "crtend%O%s"
++
++#define NO_IMPLICIT_EXTERN_C 1
+diff --git original-gcc/gcc/config/riscv/generic.md gcc-6.3.0/gcc/config/riscv/generic.md
+new file mode 100644
+index 00000000000..294c7ef729d
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/generic.md
+@@ -0,0 +1,78 @@
++;; Generic DFA-based pipeline description for RISC-V targets.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published
++;; by the Free Software Foundation; either version 3, or (at your
++;; option) any later version.
++
++;; GCC is distributed in the hope that it will be useful, but WITHOUT
++;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++;; License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++
++(define_automaton "pipe0")
++(define_cpu_unit "alu" "pipe0")
++(define_cpu_unit "imuldiv" "pipe0")
++(define_cpu_unit "fdivsqrt" "pipe0")
++
++(define_insn_reservation "generic_alu" 1
++ (eq_attr "type" "unknown,const,arith,shift,slt,multi,nop,logical,move")
++ "alu")
++
++(define_insn_reservation "generic_load" 3
++ (eq_attr "type" "load,fpload")
++ "alu")
++
++(define_insn_reservation "generic_store" 1
++ (eq_attr "type" "store,fpstore")
++ "alu")
++
++(define_insn_reservation "generic_xfer" 3
++ (eq_attr "type" "mfc,mtc,fcvt,fmove,fcmp")
++ "alu")
++
++(define_insn_reservation "generic_branch" 1
++ (eq_attr "type" "branch,jump,call")
++ "alu")
++
++(define_insn_reservation "generic_imul" 10
++ (eq_attr "type" "imul")
++ "imuldiv*10")
++
++(define_insn_reservation "generic_idivsi" 34
++ (and (eq_attr "type" "idiv")
++ (eq_attr "mode" "SI"))
++ "imuldiv*34")
++
++(define_insn_reservation "generic_idivdi" 66
++ (and (eq_attr "type" "idiv")
++ (eq_attr "mode" "DI"))
++ "imuldiv*66")
++
++(define_insn_reservation "generic_fmul_single" 5
++ (and (eq_attr "type" "fadd,fmul,fmadd")
++ (eq_attr "mode" "SF"))
++ "alu")
++
++(define_insn_reservation "generic_fmul_double" 7
++ (and (eq_attr "type" "fadd,fmul,fmadd")
++ (eq_attr "mode" "DF"))
++ "alu")
++
++(define_insn_reservation "generic_fdiv" 20
++ (eq_attr "type" "fdiv")
++ "fdivsqrt*20")
++
++(define_insn_reservation "generic_fsqrt" 25
++ (eq_attr "type" "fsqrt")
++ "fdivsqrt*25")
+diff --git original-gcc/gcc/config/riscv/linux.h gcc-6.3.0/gcc/config/riscv/linux.h
+new file mode 100644
+index 00000000000..0c622118056
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/linux.h
+@@ -0,0 +1,40 @@
++/* Definitions for RISC-V GNU/Linux systems with ELF format.
++ Copyright (C) 1998-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#define TARGET_OS_CPP_BUILTINS() \
++ do { \
++ GNU_USER_TARGET_OS_CPP_BUILTINS(); \
++ } while (0)
++
++#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux-riscv" XLEN_SPEC "-" ABI_SPEC ".so.1"
++
++/* Because RISC-V only has word-sized atomics, it requries libatomic where
++ others do not. So link libatomic by default, as needed. */
++#undef LIB_SPEC
++#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC \
++ " %{pthread:" LD_AS_NEEDED_OPTION " -latomic " LD_NO_AS_NEEDED_OPTION "}" \
++
++#define LINK_SPEC "\
++-melf" XLEN_SPEC "lriscv \
++%{shared} \
++ %{!shared: \
++ %{!static: \
++ %{rdynamic:-export-dynamic} \
++ -dynamic-linker " GNU_USER_DYNAMIC_LINKER "} \
++ %{static:-static}}"
+diff --git original-gcc/gcc/config/riscv/multilib-generator gcc-6.3.0/gcc/config/riscv/multilib-generator
+new file mode 100755
+index 00000000000..b7ebf7bed41
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/multilib-generator
+@@ -0,0 +1,65 @@
++#!/usr/bin/env python
++
++# RISC-V multilib list generator.
++# Copyright (C) 2011-2017 Free Software Foundation, Inc.
++# Contributed by Andrew Waterman (andrew@sifive.com).
++#
++# This file is part of GCC.
++#
++# GCC is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3, or (at your option)
++# any later version.
++#
++# GCC is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3. If not see
++# <http://www.gnu.org/licenses/>.
++
++# Each argument to this script is of the form
++# <primary arch>-<abi>-<additional arches>-<extensions>
++# For example,
++# rv32imafd-ilp32d-rv32g-c,v
++# means that, in addition to rv32imafd, these configurations can also use the
++# rv32imafd-ilp32d libraries: rv32imafdc, rv32imafdv, rv32g, rv32gc, rv32gv
++
++from __future__ import print_function
++import sys
++import collections
++
++arches = collections.OrderedDict()
++abis = collections.OrderedDict()
++required = []
++reuse = []
++
++for cfg in sys.argv[1:]:
++ (arch, abi, extra, ext) = cfg.split('-')
++ arches[arch] = 1
++ abis[abi] = 1
++ extra = list(filter(None, extra.split(',')))
++ ext = list(filter(None, ext.split(',')))
++ alts = sum([[x] + [x + y for y in ext] for x in [arch] + extra], [])
++ alts = alts + [x.replace('imafd', 'g') for x in alts if 'imafd' in x]
++ for alt in alts[1:]:
++ arches[alt] = 1
++ reuse.append('march.%s/mabi.%s=march.%s/mabi.%s' % (arch, abi, alt, abi))
++ required.append('march=%s/mabi=%s' % (arch, abi))
++
++arch_options = '/'.join(['march=%s' % x for x in arches.keys()])
++arch_dirnames = ' '.join(arches.keys())
++
++abi_options = '/'.join(['mabi=%s' % x for x in abis.keys()])
++abi_dirnames = ' '.join(abis.keys())
++
++prog = sys.argv[0].split('/')[-1]
++print('# This file was generated by %s with the command:' % prog)
++print('# %s' % ' '.join(sys.argv))
++
++print('MULTILIB_OPTIONS = %s %s' % (arch_options, abi_options))
++print('MULTILIB_DIRNAMES = %s %s' % (arch_dirnames, abi_dirnames))
++print('MULTILIB_REQUIRED = %s' % ' '.join(required))
++print('MULTILIB_REUSE = %s' % ' '.join(reuse))
+diff --git original-gcc/gcc/config/riscv/peephole.md gcc-6.3.0/gcc/config/riscv/peephole.md
+new file mode 100644
+index 00000000000..7e644e01759
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/peephole.md
+@@ -0,0 +1,40 @@
++;; Peephole optimizations for RISC-V for GNU compiler.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++;; Simplify (unsigned long)(unsigned int)a << const
++(define_peephole2
++ [(set (match_operand:DI 0 "register_operand")
++ (ashift:DI (match_operand:DI 1 "register_operand")
++ (match_operand 2 "const_int_operand")))
++ (set (match_operand:DI 3 "register_operand")
++ (lshiftrt:DI (match_dup 0) (match_dup 2)))
++ (set (match_operand:DI 4 "register_operand")
++ (ashift:DI (match_dup 3) (match_operand 5 "const_int_operand")))]
++ "TARGET_64BIT
++ && INTVAL (operands[5]) < INTVAL (operands[2])
++ && (REGNO (operands[3]) == REGNO (operands[4])
++ || peep2_reg_dead_p (3, operands[3]))"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (match_dup 2)))
++ (set (match_dup 4)
++ (lshiftrt:DI (match_dup 0) (match_operand 5)))]
++{
++ operands[5] = GEN_INT (INTVAL (operands[2]) - INTVAL (operands[5]));
++})
+diff --git original-gcc/gcc/config/riscv/pic.md gcc-6.3.0/gcc/config/riscv/pic.md
+new file mode 100644
+index 00000000000..6a29ead32d3
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/pic.md
+@@ -0,0 +1,85 @@
++;; PIC codegen for RISC-V for GNU compiler.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++
++;; Simplify PIC loads to static variables.
++;; These should go away once we figure out how to emit auipc discretely.
++
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYI 0 "register_operand" "=r")
++ (mem:ANYI (match_operand 1 "absolute_symbolic_operand" "")))]
++ "USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>\t%0,%1"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++ (clobber (match_scratch:DI 2 "=r"))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>\t%0,%1,%2"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_load<mode>"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mem:ANYF (match_operand 1 "absolute_symbolic_operand" "")))
++ (clobber (match_scratch:SI 2 "=r"))]
++ "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>\t%0,%1,%2"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_loadu<mode>"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r")
++ (zero_extend:SUPERQI (mem:SUBX (match_operand 1 "absolute_symbolic_operand" ""))))]
++ "USE_LOAD_ADDRESS_MACRO (operands[1])"
++ "<load>u\t%0,%1"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_storedi<mode>"
++ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++ (clobber (match_scratch:DI 2 "=&r"))]
++ "TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%z1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_storesi<mode>"
++ [(set (mem:ANYI (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYI 1 "reg_or_0_operand" "rJ"))
++ (clobber (match_scratch:SI 2 "=&r"))]
++ "!TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%z1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_storedi<mode>"
++ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYF 1 "register_operand" "f"))
++ (clobber (match_scratch:DI 2 "=r"))]
++ "TARGET_HARD_FLOAT && TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%1,%0,%2"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "*local_pic_storesi<mode>"
++ [(set (mem:ANYF (match_operand 0 "absolute_symbolic_operand" ""))
++ (match_operand:ANYF 1 "register_operand" "f"))
++ (clobber (match_scratch:SI 2 "=r"))]
++ "TARGET_HARD_FLOAT && !TARGET_64BIT && USE_LOAD_ADDRESS_MACRO (operands[0])"
++ "<store>\t%1,%0,%2"
++ [(set (attr "length") (const_int 8))])
+diff --git original-gcc/gcc/config/riscv/predicates.md gcc-6.3.0/gcc/config/riscv/predicates.md
+new file mode 100644
+index 00000000000..854af1481f7
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/predicates.md
+@@ -0,0 +1,180 @@
++;; Predicate description for RISC-V target.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++;; Based on MIPS target for GNU compiler.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_predicate "const_arith_operand"
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (INTVAL (op))")))
++
++(define_predicate "arith_operand"
++ (ior (match_operand 0 "const_arith_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "const_csr_operand"
++ (and (match_code "const_int")
++ (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
++
++(define_predicate "csr_operand"
++ (ior (match_operand 0 "const_csr_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "sle_operand"
++ (and (match_code "const_int")
++ (match_test "SMALL_OPERAND (INTVAL (op) + 1)")))
++
++(define_predicate "sleu_operand"
++ (and (match_operand 0 "sle_operand")
++ (match_test "INTVAL (op) + 1 != 0")))
++
++(define_predicate "const_0_operand"
++ (and (match_code "const_int,const_wide_int,const_double,const_vector")
++ (match_test "op == CONST0_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_0_operand"
++ (ior (match_operand 0 "const_0_operand")
++ (match_operand 0 "register_operand")))
++
++;; Only use branch-on-bit sequences when the mask is not an ANDI immediate.
++(define_predicate "branch_on_bit_operand"
++ (and (match_code "const_int")
++ (match_test "INTVAL (op) >= IMM_BITS - 1")))
++
++;; A legitimate CONST_INT operand that takes more than one instruction
++;; to load.
++(define_predicate "splittable_const_int_operand"
++ (match_code "const_int")
++{
++ /* Don't handle multi-word moves this way; we don't want to introduce
++ the individual word-mode moves until after reload. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ return false;
++
++ /* Otherwise check whether the constant can be loaded in a single
++ instruction. */
++ return !LUI_OPERAND (INTVAL (op)) && !SMALL_OPERAND (INTVAL (op));
++})
++
++(define_predicate "move_operand"
++ (match_operand 0 "general_operand")
++{
++ enum riscv_symbol_type symbol_type;
++
++ /* The thinking here is as follows:
++
++ (1) The move expanders should split complex load sequences into
++ individual instructions. Those individual instructions can
++ then be optimized by all rtl passes.
++
++ (2) The target of pre-reload load sequences should not be used
++ to store temporary results. If the target register is only
++ assigned one value, reload can rematerialize that value
++ on demand, rather than spill it to the stack.
++
++ (3) If we allowed pre-reload passes like combine and cse to recreate
++ complex load sequences, we would want to be able to split the
++ sequences before reload as well, so that the pre-reload scheduler
++ can see the individual instructions. This falls foul of (2);
++ the splitter would be forced to reuse the target register for
++ intermediate results.
++
++ (4) We want to define complex load splitters for combine. These
++ splitters can request a temporary scratch register, which avoids
++ the problem in (2). They allow things like:
++
++ (set (reg T1) (high SYM))
++ (set (reg T2) (low (reg T1) SYM))
++ (set (reg X) (plus (reg T2) (const_int OFFSET)))
++
++ to be combined into:
++
++ (set (reg T3) (high SYM+OFFSET))
++ (set (reg X) (lo_sum (reg T3) SYM+OFFSET))
++
++ if T2 is only used this once. */
++ switch (GET_CODE (op))
++ {
++ case CONST_INT:
++ return !splittable_const_int_operand (op, mode);
++
++ case CONST:
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return riscv_symbolic_constant_p (op, &symbol_type)
++ && !riscv_split_symbol_type (symbol_type);
++
++ case HIGH:
++ op = XEXP (op, 0);
++ return riscv_symbolic_constant_p (op, &symbol_type)
++ && riscv_split_symbol_type (symbol_type)
++ && symbol_type != SYMBOL_PCREL;
++
++ default:
++ return true;
++ }
++})
++
++(define_predicate "symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return riscv_symbolic_constant_p (op, &type);
++})
++
++(define_predicate "absolute_symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return (riscv_symbolic_constant_p (op, &type)
++ && (type == SYMBOL_ABSOLUTE || type == SYMBOL_PCREL));
++})
++
++(define_predicate "plt_symbolic_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++ enum riscv_symbol_type type;
++ return (riscv_symbolic_constant_p (op, &type)
++ && type == SYMBOL_GOT_DISP && !SYMBOL_REF_WEAK (op) && TARGET_PLT);
++})
++
++(define_predicate "call_insn_operand"
++ (ior (match_operand 0 "absolute_symbolic_operand")
++ (match_operand 0 "plt_symbolic_operand")
++ (match_operand 0 "register_operand")))
++
++(define_predicate "modular_operator"
++ (match_code "plus,minus,mult,ashift"))
++
++(define_predicate "equality_operator"
++ (match_code "eq,ne"))
++
++(define_predicate "order_operator"
++ (match_code "eq,ne,lt,ltu,le,leu,ge,geu,gt,gtu"))
++
++(define_predicate "signed_order_operator"
++ (match_code "eq,ne,lt,le,ge,gt"))
++
++(define_predicate "fp_native_comparison"
++ (match_code "eq,lt,le,gt,ge"))
++
++(define_predicate "fp_scc_comparison"
++ (match_code "unordered,ordered,unlt,unge,unle,ungt,ltgt,ne,eq,lt,le,gt,ge"))
++
++(define_predicate "fp_branch_comparison"
++ (match_code "unordered,ordered,unlt,unge,unle,ungt,uneq,ltgt,ne,eq,lt,le,gt,ge"))
+diff --git original-gcc/gcc/config/riscv/riscv-builtins.c gcc-6.3.0/gcc/config/riscv/riscv-builtins.c
+new file mode 100644
+index 00000000000..626a6a33f99
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv-builtins.c
+@@ -0,0 +1,287 @@
++/* Subroutines used for expanding RISC-V builtins.
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "tree.h"
++#include "gimple-expr.h"
++#include "memmodel.h"
++#include "expmed.h"
++#include "optabs.h"
++#include "recog.h"
++#include "diagnostic-core.h"
++#include "stor-layout.h"
++#include "expr.h"
++#include "langhooks.h"
++
++/* Macros to create an enumeration identifier for a function prototype. */
++#define RISCV_FTYPE_NAME1(A, B) RISCV_##A##_FTYPE_##B
++
++/* Classifies the prototype of a built-in function. */
++enum riscv_function_type {
++#define DEF_RISCV_FTYPE(NARGS, LIST) RISCV_FTYPE_NAME##NARGS LIST,
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++ RISCV_MAX_FTYPE_MAX
++};
++
++/* Specifies how a built-in function should be converted into rtl. */
++enum riscv_builtin_type {
++ /* The function corresponds directly to an .md pattern. */
++ RISCV_BUILTIN_DIRECT,
++
++ /* Likewise, but with return type VOID. */
++ RISCV_BUILTIN_DIRECT_NO_TARGET
++};
++
++/* Declare an availability predicate for built-in functions. */
++#define AVAIL(NAME, COND) \
++ static unsigned int \
++ riscv_builtin_avail_##NAME (void) \
++ { \
++ return (COND); \
++ }
++
++/* This structure describes a single built-in function. */
++struct riscv_builtin_description {
++ /* The code of the main .md file instruction. See riscv_builtin_type
++ for more information. */
++ enum insn_code icode;
++
++ /* The name of the built-in function. */
++ const char *name;
++
++ /* Specifies how the function should be expanded. */
++ enum riscv_builtin_type builtin_type;
++
++ /* The function's prototype. */
++ enum riscv_function_type prototype;
++
++ /* Whether the function is available. */
++ unsigned int (*avail) (void);
++};
++
++AVAIL (hard_float, TARGET_HARD_FLOAT)
++
++/* Construct a riscv_builtin_description from the given arguments.
++
++ INSN is the name of the associated instruction pattern, without the
++ leading CODE_FOR_riscv_.
++
++ NAME is the name of the function itself, without the leading
++ "__builtin_riscv_".
++
++ BUILTIN_TYPE and FUNCTION_TYPE are riscv_builtin_description fields.
++
++ AVAIL is the name of the availability predicate, without the leading
++ riscv_builtin_avail_. */
++#define RISCV_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
++ { CODE_FOR_riscv_ ## INSN, "__builtin_riscv_" NAME, \
++ BUILTIN_TYPE, FUNCTION_TYPE, riscv_builtin_avail_ ## AVAIL }
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT function
++ mapped to instruction CODE_FOR_riscv_<INSN>, FUNCTION_TYPE and AVAIL
++ are as for RISCV_BUILTIN. */
++#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
++ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT, FUNCTION_TYPE, AVAIL)
++
++/* Define __builtin_riscv_<INSN>, which is a RISCV_BUILTIN_DIRECT_NO_TARGET
++ function mapped to instruction CODE_FOR_riscv_<INSN>, FUNCTION_TYPE
++ and AVAIL are as for RISCV_BUILTIN. */
++#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, AVAIL) \
++ RISCV_BUILTIN (INSN, #INSN, RISCV_BUILTIN_DIRECT_NO_TARGET, \
++ FUNCTION_TYPE, AVAIL)
++
++/* Argument types. */
++#define RISCV_ATYPE_VOID void_type_node
++#define RISCV_ATYPE_USI unsigned_intSI_type_node
++
++/* RISCV_FTYPE_ATYPESN takes N RISCV_FTYPES-like type codes and lists
++ their associated RISCV_ATYPEs. */
++#define RISCV_FTYPE_ATYPES1(A, B) \
++ RISCV_ATYPE_##A, RISCV_ATYPE_##B
++
++static const struct riscv_builtin_description riscv_builtins[] = {
++ DIRECT_BUILTIN (frflags, RISCV_USI_FTYPE_VOID, hard_float),
++ DIRECT_NO_TARGET_BUILTIN (fsflags, RISCV_VOID_FTYPE_USI, hard_float)
++};
++
++/* Index I is the function declaration for riscv_builtins[I], or null if the
++ function isn't defined on this target. */
++static GTY(()) tree riscv_builtin_decls[ARRAY_SIZE (riscv_builtins)];
++
++/* Get the index I of the function declaration for riscv_builtin_decls[I]
++ using the instruction code or return null if not defined for the target. */
++static GTY(()) int riscv_builtin_decl_index[NUM_INSN_CODES];
++
++#define GET_BUILTIN_DECL(CODE) \
++ riscv_builtin_decls[riscv_builtin_decl_index[(CODE)]]
++
++/* Return the function type associated with function prototype TYPE. */
++
++static tree
++riscv_build_function_type (enum riscv_function_type type)
++{
++ static tree types[(int) RISCV_MAX_FTYPE_MAX];
++
++ if (types[(int) type] == NULL_TREE)
++ switch (type)
++ {
++#define DEF_RISCV_FTYPE(NUM, ARGS) \
++ case RISCV_FTYPE_NAME##NUM ARGS: \
++ types[(int) type] \
++ = build_function_type_list (RISCV_FTYPE_ATYPES##NUM ARGS, \
++ NULL_TREE); \
++ break;
++#include "config/riscv/riscv-ftypes.def"
++#undef DEF_RISCV_FTYPE
++ default:
++ gcc_unreachable ();
++ }
++
++ return types[(int) type];
++}
++
++/* Implement TARGET_INIT_BUILTINS. */
++
++void
++riscv_init_builtins (void)
++{
++ for (size_t i = 0; i < ARRAY_SIZE (riscv_builtins); i++)
++ {
++ const struct riscv_builtin_description *d = &riscv_builtins[i];
++ if (d->avail ())
++ {
++ tree type = riscv_build_function_type (d->prototype);
++ riscv_builtin_decls[i]
++ = add_builtin_function (d->name, type, i, BUILT_IN_MD, NULL, NULL);
++ riscv_builtin_decl_index[d->icode] = i;
++ }
++ }
++}
++
++/* Implement TARGET_BUILTIN_DECL. */
++
++tree
++riscv_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
++{
++ if (code >= ARRAY_SIZE (riscv_builtins))
++ return error_mark_node;
++ return riscv_builtin_decls[code];
++}
++
++/* Take argument ARGNO from EXP's argument list and convert it into
++ an expand operand. Store the operand in *OP. */
++
++static void
++riscv_prepare_builtin_arg (struct expand_operand *op, tree exp, unsigned argno)
++{
++ tree arg = CALL_EXPR_ARG (exp, argno);
++ create_input_operand (op, expand_normal (arg), TYPE_MODE (TREE_TYPE (arg)));
++}
++
++/* Expand instruction ICODE as part of a built-in function sequence.
++ Use the first NOPS elements of OPS as the instruction's operands.
++ HAS_TARGET_P is true if operand 0 is a target; it is false if the
++ instruction has no target.
++
++ Return the target rtx if HAS_TARGET_P, otherwise return const0_rtx. */
++
++static rtx
++riscv_expand_builtin_insn (enum insn_code icode, unsigned int n_ops,
++ struct expand_operand *ops, bool has_target_p)
++{
++ if (!maybe_expand_insn (icode, n_ops, ops))
++ {
++ error ("invalid argument to built-in function");
++ return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
++ }
++
++ return has_target_p ? ops[0].value : const0_rtx;
++}
++
++/* Expand a RISCV_BUILTIN_DIRECT or RISCV_BUILTIN_DIRECT_NO_TARGET function;
++ HAS_TARGET_P says which. EXP is the CALL_EXPR that calls the function
++ and ICODE is the code of the associated .md pattern. TARGET, if nonnull,
++ suggests a good place to put the result. */
++
++static rtx
++riscv_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
++ bool has_target_p)
++{
++ struct expand_operand ops[MAX_RECOG_OPERANDS];
++
++ /* Map any target to operand 0. */
++ int opno = 0;
++ if (has_target_p)
++ create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
++
++ /* Map the arguments to the other operands. */
++ gcc_assert (opno + call_expr_nargs (exp)
++ == insn_data[icode].n_generator_args);
++ for (int argno = 0; argno < call_expr_nargs (exp); argno++)
++ riscv_prepare_builtin_arg (&ops[opno++], exp, argno);
++
++ return riscv_expand_builtin_insn (icode, opno, ops, has_target_p);
++}
++
++/* Implement TARGET_EXPAND_BUILTIN. */
++
++rtx
++riscv_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
++ machine_mode mode ATTRIBUTE_UNUSED,
++ int ignore ATTRIBUTE_UNUSED)
++{
++ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
++ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
++ const struct riscv_builtin_description *d = &riscv_builtins[fcode];
++
++ switch (d->builtin_type)
++ {
++ case RISCV_BUILTIN_DIRECT:
++ return riscv_expand_builtin_direct (d->icode, target, exp, true);
++
++ case RISCV_BUILTIN_DIRECT_NO_TARGET:
++ return riscv_expand_builtin_direct (d->icode, target, exp, false);
++ }
++
++ gcc_unreachable ();
++}
++
++/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */
++
++void
++riscv_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
++{
++ if (!TARGET_HARD_FLOAT)
++ return;
++
++ tree frflags = GET_BUILTIN_DECL (CODE_FOR_riscv_frflags);
++ tree fsflags = GET_BUILTIN_DECL (CODE_FOR_riscv_fsflags);
++ tree old_flags = create_tmp_var_raw (RISCV_ATYPE_USI);
++
++ *hold = build2 (MODIFY_EXPR, RISCV_ATYPE_USI, old_flags,
++ build_call_expr (frflags, 0));
++ *clear = build_call_expr (fsflags, 1, old_flags);
++ *update = NULL_TREE;
++}
+diff --git original-gcc/gcc/config/riscv/riscv-c.c gcc-6.3.0/gcc/config/riscv/riscv-c.c
+new file mode 100644
+index 00000000000..64e7cf877af
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv-c.c
+@@ -0,0 +1,92 @@
++/* RISC-V-specific code for C family languages.
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "c-family/c-common.h"
++#include "cpplib.h"
++
++#define builtin_define(TXT) cpp_define (pfile, TXT)
++
++/* Implement TARGET_CPU_CPP_BUILTINS. */
++
++void
++riscv_cpu_cpp_builtins (cpp_reader *pfile)
++{
++ builtin_define ("__riscv");
++
++ if (TARGET_RVC)
++ builtin_define ("__riscv_compressed");
++
++ if (TARGET_ATOMIC)
++ builtin_define ("__riscv_atomic");
++
++ if (TARGET_MUL)
++ builtin_define ("__riscv_mul");
++ if (TARGET_DIV)
++ builtin_define ("__riscv_div");
++ if (TARGET_DIV && TARGET_MUL)
++ builtin_define ("__riscv_muldiv");
++
++ builtin_define_with_int_value ("__riscv_xlen", UNITS_PER_WORD * 8);
++ if (TARGET_HARD_FLOAT)
++ builtin_define_with_int_value ("__riscv_flen", UNITS_PER_FP_REG * 8);
++
++ if (TARGET_HARD_FLOAT && TARGET_FDIV)
++ {
++ builtin_define ("__riscv_fdiv");
++ builtin_define ("__riscv_fsqrt");
++ }
++
++ switch (riscv_abi)
++ {
++ case ABI_ILP32:
++ case ABI_LP64:
++ builtin_define ("__riscv_float_abi_soft");
++ break;
++
++ case ABI_ILP32F:
++ case ABI_LP64F:
++ builtin_define ("__riscv_float_abi_single");
++ break;
++
++ case ABI_ILP32D:
++ case ABI_LP64D:
++ builtin_define ("__riscv_float_abi_double");
++ break;
++ }
++
++ switch (riscv_cmodel)
++ {
++ case CM_MEDLOW:
++ builtin_define ("__riscv_cmodel_medlow");
++ break;
++
++ case CM_MEDANY:
++ builtin_define ("__riscv_cmodel_medany");
++ break;
++
++ case CM_PIC:
++ builtin_define ("__riscv_cmodel_pic");
++ break;
++ }
++}
+diff --git original-gcc/gcc/config/riscv/riscv-ftypes.def gcc-6.3.0/gcc/config/riscv/riscv-ftypes.def
+new file mode 100644
+index 00000000000..eb69148368f
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv-ftypes.def
+@@ -0,0 +1,30 @@
++/* Definitions of prototypes for RISC-V built-in functions. -*- C -*-
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++/* Invoke DEF_RISCV_FTYPE (NARGS, LIST) for each prototype used by
++ RISCV built-in functions, where:
++
++ NARGS is the number of arguments.
++ LIST contains the return-type code followed by the codes for each
++ argument type. */
++
++DEF_RISCV_FTYPE (1, (USI, VOID))
++DEF_RISCV_FTYPE (1, (VOID, USI))
+diff --git original-gcc/gcc/config/riscv/riscv-modes.def gcc-6.3.0/gcc/config/riscv/riscv-modes.def
+new file mode 100644
+index 00000000000..5c65667da68
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv-modes.def
+@@ -0,0 +1,22 @@
++/* Extra machine modes for RISC-V target.
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++FLOAT_MODE (TF, 16, ieee_quad_format);
+diff --git original-gcc/gcc/config/riscv/riscv-opts.h gcc-6.3.0/gcc/config/riscv/riscv-opts.h
+new file mode 100644
+index 00000000000..2b19233379c
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv-opts.h
+@@ -0,0 +1,41 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#ifndef GCC_RISCV_OPTS_H
++#define GCC_RISCV_OPTS_H
++
++enum riscv_abi_type {
++ ABI_ILP32,
++ ABI_ILP32F,
++ ABI_ILP32D,
++ ABI_LP64,
++ ABI_LP64F,
++ ABI_LP64D
++};
++extern enum riscv_abi_type riscv_abi;
++
++enum riscv_code_model {
++ CM_MEDLOW,
++ CM_MEDANY,
++ CM_PIC
++};
++extern enum riscv_code_model riscv_cmodel;
++
++#endif /* ! GCC_RISCV_OPTS_H */
+diff --git original-gcc/gcc/config/riscv/riscv-protos.h gcc-6.3.0/gcc/config/riscv/riscv-protos.h
+new file mode 100644
+index 00000000000..de7023f88c5
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv-protos.h
+@@ -0,0 +1,83 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#ifndef GCC_RISCV_PROTOS_H
++#define GCC_RISCV_PROTOS_H
++
++/* Symbol types we understand. The order of this list must match that of
++ the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST. */
++enum riscv_symbol_type {
++ SYMBOL_ABSOLUTE,
++ SYMBOL_PCREL,
++ SYMBOL_GOT_DISP,
++ SYMBOL_TLS,
++ SYMBOL_TLS_LE,
++ SYMBOL_TLS_IE,
++ SYMBOL_TLS_GD
++};
++#define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
++
++/* Routines implemented in riscv.c. */
++extern enum riscv_symbol_type riscv_classify_symbolic_expression (rtx);
++extern bool riscv_symbolic_constant_p (rtx, enum riscv_symbol_type *);
++extern int riscv_regno_mode_ok_for_base_p (int, enum machine_mode, bool);
++extern bool riscv_hard_regno_mode_ok_p (unsigned int, enum machine_mode);
++extern int riscv_address_insns (rtx, enum machine_mode, bool);
++extern int riscv_const_insns (rtx);
++extern int riscv_split_const_insns (rtx);
++extern int riscv_load_store_insns (rtx, rtx_insn *);
++extern rtx riscv_emit_move (rtx, rtx);
++extern bool riscv_split_symbol (rtx, rtx, enum machine_mode, rtx *);
++extern bool riscv_split_symbol_type (enum riscv_symbol_type);
++extern rtx riscv_unspec_address (rtx, enum riscv_symbol_type);
++extern void riscv_move_integer (rtx, rtx, HOST_WIDE_INT);
++extern bool riscv_legitimize_move (enum machine_mode, rtx, rtx);
++extern rtx riscv_subword (rtx, bool);
++extern bool riscv_split_64bit_move_p (rtx, rtx);
++extern void riscv_split_doubleword_move (rtx, rtx);
++extern const char *riscv_output_move (rtx, rtx);
++extern const char *riscv_output_gpr_save (unsigned);
++#ifdef RTX_CODE
++extern void riscv_expand_int_scc (rtx, enum rtx_code, rtx, rtx);
++extern void riscv_expand_float_scc (rtx, enum rtx_code, rtx, rtx);
++extern void riscv_expand_conditional_branch (rtx, enum rtx_code, rtx, rtx);
++#endif
++extern rtx riscv_legitimize_call_address (rtx);
++extern void riscv_set_return_address (rtx, rtx);
++extern bool riscv_expand_block_move (rtx, rtx, rtx);
++extern rtx riscv_return_addr (int, rtx);
++extern HOST_WIDE_INT riscv_initial_elimination_offset (int, int);
++extern void riscv_expand_prologue (void);
++extern void riscv_expand_epilogue (bool);
++extern bool riscv_can_use_return_insn (void);
++extern rtx riscv_function_value (const_tree, const_tree, enum machine_mode);
++extern unsigned int riscv_hard_regno_nregs (int, enum machine_mode);
++
++/* Routines implemented in riscv-c.c. */
++void riscv_cpu_cpp_builtins (cpp_reader *);
++
++/* Routines implemented in riscv-builtins.c. */
++extern void riscv_atomic_assign_expand_fenv (tree *, tree *, tree *);
++extern rtx riscv_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
++extern tree riscv_builtin_decl (unsigned int, bool);
++extern void riscv_init_builtins (void);
++
++#endif /* ! GCC_RISCV_PROTOS_H */
+diff --git original-gcc/gcc/config/riscv/riscv.c gcc-6.3.0/gcc/config/riscv/riscv.c
+new file mode 100644
+index 00000000000..834651f4214
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv.c
+@@ -0,0 +1,4138 @@
++/* Subroutines used for code generation for RISC-V.
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "rtl.h"
++#include "regs.h"
++#include "hard-reg-set.h"
++#include "insn-config.h"
++#include "conditions.h"
++#include "insn-attr.h"
++#include "recog.h"
++#include "output.h"
++#include "hash-set.h"
++#include "machmode.h"
++#include "vec.h"
++#include "double-int.h"
++#include "input.h"
++#include "alias.h"
++#include "symtab.h"
++#include "wide-int.h"
++#include "inchash.h"
++#include "tree.h"
++#include "fold-const.h"
++#include "varasm.h"
++#include "stringpool.h"
++#include "stor-layout.h"
++#include "calls.h"
++#include "function.h"
++#include "hashtab.h"
++#include "flags.h"
++#include "statistics.h"
++#include "real.h"
++#include "fixed-value.h"
++#include "expmed.h"
++#include "dojump.h"
++#include "explow.h"
++#include "memmodel.h"
++#include "emit-rtl.h"
++#include "stmt.h"
++#include "expr.h"
++#include "insn-codes.h"
++#include "optabs.h"
++#include "libfuncs.h"
++#include "reload.h"
++#include "tm_p.h"
++#include "ggc.h"
++#include "gstab.h"
++#include "hash-table.h"
++#include "debug.h"
++#include "target.h"
++#include "target-def.h"
++#include "common/common-target.h"
++#include "langhooks.h"
++#include "dominance.h"
++#include "cfg.h"
++#include "cfgrtl.h"
++#include "cfganal.h"
++#include "lcm.h"
++#include "cfgbuild.h"
++#include "cfgcleanup.h"
++#include "predict.h"
++#include "basic-block.h"
++#include "bitmap.h"
++#include "regset.h"
++#include "df.h"
++#include "sched-int.h"
++#include "tree-ssa-alias.h"
++#include "internal-fn.h"
++#include "gimple-fold.h"
++#include "tree-eh.h"
++#include "gimple-expr.h"
++#include "is-a.h"
++#include "gimple.h"
++#include "gimplify.h"
++#include "diagnostic.h"
++#include "target-globals.h"
++#include "opts.h"
++#include "tree-pass.h"
++#include "context.h"
++#include "hash-map.h"
++#include "plugin-api.h"
++#include "ipa-ref.h"
++#include "cgraph.h"
++#include "builtins.h"
++#include "rtl-iter.h"
++
++/* True if X is an UNSPEC wrapper around a SYMBOL_REF or LABEL_REF. */
++#define UNSPEC_ADDRESS_P(X) \
++ (GET_CODE (X) == UNSPEC \
++ && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
++ && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
++
++/* Extract the symbol or label from UNSPEC wrapper X. */
++#define UNSPEC_ADDRESS(X) \
++ XVECEXP (X, 0, 0)
++
++/* Extract the symbol type from UNSPEC wrapper X. */
++#define UNSPEC_ADDRESS_TYPE(X) \
++ ((enum riscv_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
++
++/* True if bit BIT is set in VALUE. */
++#define BITSET_P(VALUE, BIT) (((VALUE) & (1ULL << (BIT))) != 0)
++
++/* Classifies an address.
++
++ ADDRESS_REG
++ A natural register + offset address. The register satisfies
++ riscv_valid_base_register_p and the offset is a const_arith_operand.
++
++ ADDRESS_LO_SUM
++ A LO_SUM rtx. The first operand is a valid base register and
++ the second operand is a symbolic address.
++
++ ADDRESS_CONST_INT
++ A signed 16-bit constant address.
++
++ ADDRESS_SYMBOLIC:
++ A constant symbolic address. */
++enum riscv_address_type {
++ ADDRESS_REG,
++ ADDRESS_LO_SUM,
++ ADDRESS_CONST_INT,
++ ADDRESS_SYMBOLIC
++};
++
++/* Information about a function's frame layout. */
++struct GTY(()) riscv_frame_info {
++ /* The size of the frame in bytes. */
++ HOST_WIDE_INT total_size;
++
++ /* Bit X is set if the function saves or restores GPR X. */
++ unsigned int mask;
++
++ /* Likewise FPR X. */
++ unsigned int fmask;
++
++ /* How much the GPR save/restore routines adjust sp (or 0 if unused). */
++ unsigned save_libcall_adjustment;
++
++ /* Offsets of fixed-point and floating-point save areas from frame bottom */
++ HOST_WIDE_INT gp_sp_offset;
++ HOST_WIDE_INT fp_sp_offset;
++
++ /* Offset of virtual frame pointer from stack pointer/frame bottom */
++ HOST_WIDE_INT frame_pointer_offset;
++
++ /* Offset of hard frame pointer from stack pointer/frame bottom */
++ HOST_WIDE_INT hard_frame_pointer_offset;
++
++ /* The offset of arg_pointer_rtx from the bottom of the frame. */
++ HOST_WIDE_INT arg_pointer_offset;
++};
++
++struct GTY(()) machine_function {
++ /* The number of extra stack bytes taken up by register varargs.
++ This area is allocated by the callee at the very top of the frame. */
++ int varargs_size;
++
++ /* Memoized return value of leaf_function_p. <0 if false, >0 if true. */
++ int is_leaf;
++
++ /* The current frame information, calculated by riscv_compute_frame_info. */
++ struct riscv_frame_info frame;
++};
++
++/* Information about a single argument. */
++struct riscv_arg_info {
++ /* True if the argument is at least partially passed on the stack. */
++ bool stack_p;
++
++ /* The number of integer registers allocated to this argument. */
++ unsigned int num_gprs;
++
++ /* The offset of the first register used, provided num_gprs is nonzero.
++ If passed entirely on the stack, the value is MAX_ARGS_IN_REGISTERS. */
++ unsigned int gpr_offset;
++
++ /* The number of floating-point registers allocated to this argument. */
++ unsigned int num_fprs;
++
++ /* The offset of the first register used, provided num_fprs is nonzero. */
++ unsigned int fpr_offset;
++};
++
++/* Information about an address described by riscv_address_type.
++
++ ADDRESS_CONST_INT
++ No fields are used.
++
++ ADDRESS_REG
++ REG is the base register and OFFSET is the constant offset.
++
++ ADDRESS_LO_SUM
++ REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
++ is the type of symbol it references.
++
++ ADDRESS_SYMBOLIC
++ SYMBOL_TYPE is the type of symbol that the address references. */
++struct riscv_address_info {
++ enum riscv_address_type type;
++ rtx reg;
++ rtx offset;
++ enum riscv_symbol_type symbol_type;
++};
++
++/* One stage in a constant building sequence. These sequences have
++ the form:
++
++ A = VALUE[0]
++ A = A CODE[1] VALUE[1]
++ A = A CODE[2] VALUE[2]
++ ...
++
++ where A is an accumulator, each CODE[i] is a binary rtl operation
++ and each VALUE[i] is a constant integer. CODE[0] is undefined. */
++struct riscv_integer_op {
++ enum rtx_code code;
++ unsigned HOST_WIDE_INT value;
++};
++
++/* The largest number of operations needed to load an integer constant.
++ The worst case is LUI, ADDI, SLLI, ADDI, SLLI, ADDI, SLLI, ADDI. */
++#define RISCV_MAX_INTEGER_OPS 8
++
++/* Costs of various operations on the different architectures. */
++
++struct riscv_tune_info
++{
++ unsigned short fp_add[2];
++ unsigned short fp_mul[2];
++ unsigned short fp_div[2];
++ unsigned short int_mul[2];
++ unsigned short int_div[2];
++ unsigned short issue_rate;
++ unsigned short branch_cost;
++ unsigned short memory_cost;
++};
++
++/* Information about one CPU we know about. */
++struct riscv_cpu_info {
++ /* This CPU's canonical name. */
++ const char *name;
++
++ /* Tuning parameters for this CPU. */
++ const struct riscv_tune_info *tune_info;
++};
++
++/* Global variables for machine-dependent things. */
++
++/* Which tuning parameters to use. */
++static const struct riscv_tune_info *tune_info;
++
++/* Index R is the smallest register class that contains register R. */
++const enum reg_class riscv_regno_to_class[FIRST_PSEUDO_REGISTER] = {
++ GR_REGS, GR_REGS, GR_REGS, GR_REGS,
++ GR_REGS, GR_REGS, SIBCALL_REGS, SIBCALL_REGS,
++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
++ JALR_REGS, JALR_REGS, JALR_REGS, JALR_REGS,
++ SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS, SIBCALL_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FP_REGS, FP_REGS, FP_REGS, FP_REGS,
++ FRAME_REGS, FRAME_REGS,
++};
++
++/* Costs to use when optimizing for rocket. */
++static const struct riscv_tune_info rocket_tune_info = {
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_add */
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (5)}, /* fp_mul */
++ {COSTS_N_INSNS (20), COSTS_N_INSNS (20)}, /* fp_div */
++ {COSTS_N_INSNS (4), COSTS_N_INSNS (4)}, /* int_mul */
++ {COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
++ 1, /* issue_rate */
++ 3, /* branch_cost */
++ 5 /* memory_cost */
++};
++
++/* Costs to use when optimizing for size. */
++static const struct riscv_tune_info optimize_size_tune_info = {
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_add */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_mul */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* fp_div */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_mul */
++ {COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
++ 1, /* issue_rate */
++ 1, /* branch_cost */
++ 2 /* memory_cost */
++};
++
++/* A table describing all the processors GCC knows about. */
++static const struct riscv_cpu_info riscv_cpu_info_table[] = {
++ { "rocket", &rocket_tune_info },
++};
++
++/* Return the riscv_cpu_info entry for the given name string. */
++
++static const struct riscv_cpu_info *
++riscv_parse_cpu (const char *cpu_string)
++{
++ for (unsigned i = 0; i < ARRAY_SIZE (riscv_cpu_info_table); i++)
++ if (strcmp (riscv_cpu_info_table[i].name, cpu_string) == 0)
++ return riscv_cpu_info_table + i;
++
++ error ("unknown cpu %qs for -mtune", cpu_string);
++ return riscv_cpu_info_table;
++}
++
++/* Helper function for riscv_build_integer; arguments are as for
++ riscv_build_integer. */
++
++static int
++riscv_build_integer_1 (struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS],
++ HOST_WIDE_INT value, enum machine_mode mode)
++{
++ HOST_WIDE_INT low_part = CONST_LOW_PART (value);
++ int cost = RISCV_MAX_INTEGER_OPS + 1, alt_cost;
++ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++
++ if (SMALL_OPERAND (value) || LUI_OPERAND (value))
++ {
++ /* Simply ADDI or LUI. */
++ codes[0].code = UNKNOWN;
++ codes[0].value = value;
++ return 1;
++ }
++
++ /* End with ADDI. When constructing HImode constants, do not generate any
++ intermediate value that is not itself a valid HImode constant. The
++ XORI case below will handle those remaining HImode constants. */
++ if (low_part != 0 && (mode != HImode || value - low_part <= INT16_MAX))
++ {
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value - low_part, mode);
++ if (alt_cost < cost)
++ {
++ alt_codes[alt_cost-1].code = PLUS;
++ alt_codes[alt_cost-1].value = low_part;
++ memcpy (codes, alt_codes, sizeof (alt_codes));
++ cost = alt_cost;
++ }
++ }
++
++ /* End with XORI. */
++ if (cost > 2 && (low_part < 0 || mode == HImode))
++ {
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, value ^ low_part, mode);
++ if (alt_cost < cost)
++ {
++ alt_codes[alt_cost-1].code = XOR;
++ alt_codes[alt_cost-1].value = low_part;
++ memcpy (codes, alt_codes, sizeof (alt_codes));
++ cost = alt_cost;
++ }
++ }
++
++ /* Eliminate trailing zeros and end with SLLI. */
++ if (cost > 2 && (value & 1) == 0)
++ {
++ int shift = ctz_hwi (value);
++ unsigned HOST_WIDE_INT x = value;
++ x = sext_hwi (x >> shift, HOST_BITS_PER_WIDE_INT - shift);
++
++ /* Don't eliminate the lower 12 bits if LUI might apply. */
++ if (shift > IMM_BITS && !SMALL_OPERAND (x) && LUI_OPERAND (x << IMM_BITS))
++ shift -= IMM_BITS, x <<= IMM_BITS;
++
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, x, mode);
++ if (alt_cost < cost)
++ {
++ alt_codes[alt_cost-1].code = ASHIFT;
++ alt_codes[alt_cost-1].value = shift;
++ memcpy (codes, alt_codes, sizeof (alt_codes));
++ cost = alt_cost;
++ }
++ }
++
++ gcc_assert (cost <= RISCV_MAX_INTEGER_OPS);
++ return cost;
++}
++
++/* Fill CODES with a sequence of rtl operations to load VALUE.
++ Return the number of operations needed. */
++
++static int
++riscv_build_integer (struct riscv_integer_op *codes, HOST_WIDE_INT value,
++ enum machine_mode mode)
++{
++ int cost = riscv_build_integer_1 (codes, value, mode);
++
++ /* Eliminate leading zeros and end with SRLI. */
++ if (value > 0 && cost > 2)
++ {
++ struct riscv_integer_op alt_codes[RISCV_MAX_INTEGER_OPS];
++ int alt_cost, shift = clz_hwi (value);
++ HOST_WIDE_INT shifted_val;
++
++ /* Try filling trailing bits with 1s. */
++ shifted_val = (value << shift) | ((((HOST_WIDE_INT) 1) << shift) - 1);
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++ if (alt_cost < cost)
++ {
++ alt_codes[alt_cost-1].code = LSHIFTRT;
++ alt_codes[alt_cost-1].value = shift;
++ memcpy (codes, alt_codes, sizeof (alt_codes));
++ cost = alt_cost;
++ }
++
++ /* Try filling trailing bits with 0s. */
++ shifted_val = value << shift;
++ alt_cost = 1 + riscv_build_integer_1 (alt_codes, shifted_val, mode);
++ if (alt_cost < cost)
++ {
++ alt_codes[alt_cost-1].code = LSHIFTRT;
++ alt_codes[alt_cost-1].value = shift;
++ memcpy (codes, alt_codes, sizeof (alt_codes));
++ cost = alt_cost;
++ }
++ }
++
++ return cost;
++}
++
++/* Return the cost of constructing VAL in the event that a scratch
++ register is available. */
++
++static int
++riscv_split_integer_cost (HOST_WIDE_INT val)
++{
++ int cost;
++ unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
++ unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++
++ cost = 2 + riscv_build_integer (codes, loval, VOIDmode);
++ if (loval != hival)
++ cost += riscv_build_integer (codes, hival, VOIDmode);
++
++ return cost;
++}
++
++/* Return the cost of constructing the integer constant VAL. */
++
++static int
++riscv_integer_cost (HOST_WIDE_INT val)
++{
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++ return MIN (riscv_build_integer (codes, val, VOIDmode),
++ riscv_split_integer_cost (val));
++}
++
++/* Try to split a 64b integer into 32b parts, then reassemble. */
++
++static rtx
++riscv_split_integer (HOST_WIDE_INT val, enum machine_mode mode)
++{
++ unsigned HOST_WIDE_INT loval = sext_hwi (val, 32);
++ unsigned HOST_WIDE_INT hival = sext_hwi ((val - loval) >> 32, 32);
++ rtx hi = gen_reg_rtx (mode), lo = gen_reg_rtx (mode);
++
++ riscv_move_integer (hi, hi, hival);
++ riscv_move_integer (lo, lo, loval);
++
++ hi = gen_rtx_fmt_ee (ASHIFT, mode, hi, GEN_INT (32));
++ hi = force_reg (mode, hi);
++
++ return gen_rtx_fmt_ee (PLUS, mode, hi, lo);
++}
++
++/* Return true if X is a thread-local symbol. */
++
++static bool
++riscv_tls_symbol_p (const_rtx x)
++{
++ return SYMBOL_REF_P (x) && SYMBOL_REF_TLS_MODEL (x) != 0;
++}
++
++/* Return true if symbol X binds locally. */
++
++static bool
++riscv_symbol_binds_local_p (const_rtx x)
++{
++ if (SYMBOL_REF_P (x))
++ return (SYMBOL_REF_DECL (x)
++ ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
++ : SYMBOL_REF_LOCAL_P (x));
++ else
++ return false;
++}
++
++/* Return the method that should be used to access SYMBOL_REF or
++ LABEL_REF X. */
++
++static enum riscv_symbol_type
++riscv_classify_symbol (const_rtx x)
++{
++ if (riscv_tls_symbol_p (x))
++ return SYMBOL_TLS;
++
++ if (GET_CODE (x) == SYMBOL_REF && flag_pic && !riscv_symbol_binds_local_p (x))
++ return SYMBOL_GOT_DISP;
++
++ return riscv_cmodel == CM_MEDLOW ? SYMBOL_ABSOLUTE : SYMBOL_PCREL;
++}
++
++/* Classify the base of symbolic expression X. */
++
++enum riscv_symbol_type
++riscv_classify_symbolic_expression (rtx x)
++{
++ rtx offset;
++
++ split_const (x, &x, &offset);
++ if (UNSPEC_ADDRESS_P (x))
++ return UNSPEC_ADDRESS_TYPE (x);
++
++ return riscv_classify_symbol (x);
++}
++
++/* Return true if X is a symbolic constant. If it is, store the type of
++ the symbol in *SYMBOL_TYPE. */
++
++bool
++riscv_symbolic_constant_p (rtx x, enum riscv_symbol_type *symbol_type)
++{
++ rtx offset;
++
++ split_const (x, &x, &offset);
++ if (UNSPEC_ADDRESS_P (x))
++ {
++ *symbol_type = UNSPEC_ADDRESS_TYPE (x);
++ x = UNSPEC_ADDRESS (x);
++ }
++ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
++ *symbol_type = riscv_classify_symbol (x);
++ else
++ return false;
++
++ if (offset == const0_rtx)
++ return true;
++
++ /* Nonzero offsets are only valid for references that don't use the GOT. */
++ switch (*symbol_type)
++ {
++ case SYMBOL_ABSOLUTE:
++ case SYMBOL_PCREL:
++ case SYMBOL_TLS_LE:
++ /* GAS rejects offsets outside the range [-2^31, 2^31-1]. */
++ return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
++
++ default:
++ return false;
++ }
++}
++
++/* Returns the number of instructions necessary to reference a symbol. */
++
++static int riscv_symbol_insns (enum riscv_symbol_type type)
++{
++ switch (type)
++ {
++ case SYMBOL_TLS: return 0; /* Depends on the TLS model. */
++ case SYMBOL_ABSOLUTE: return 2; /* LUI + the reference. */
++ case SYMBOL_PCREL: return 2; /* AUIPC + the reference. */
++ case SYMBOL_TLS_LE: return 3; /* LUI + ADD TP + the reference. */
++ case SYMBOL_GOT_DISP: return 3; /* AUIPC + LD GOT + the reference. */
++ default: gcc_unreachable ();
++ }
++}
++
++/* Implement TARGET_LEGITIMATE_CONSTANT_P. */
++
++static bool
++riscv_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ return riscv_const_insns (x) > 0;
++}
++
++/* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
++
++static bool
++riscv_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ enum riscv_symbol_type type;
++ rtx base, offset;
++
++ /* There is no assembler syntax for expressing an address-sized
++ high part. */
++ if (GET_CODE (x) == HIGH)
++ return true;
++
++ split_const (x, &base, &offset);
++ if (riscv_symbolic_constant_p (base, &type))
++ {
++ /* As an optimization, don't spill symbolic constants that are as
++ cheap to rematerialize as to access in the constant pool. */
++ if (SMALL_OPERAND (INTVAL (offset)) && riscv_symbol_insns (type) > 0)
++ return true;
++
++ /* As an optimization, avoid needlessly generate dynamic relocations. */
++ if (flag_pic)
++ return true;
++ }
++
++ /* TLS symbols must be computed by riscv_legitimize_move. */
++ if (tls_referenced_p (x))
++ return true;
++
++ return false;
++}
++
++/* Return true if register REGNO is a valid base register for mode MODE.
++ STRICT_P is true if REG_OK_STRICT is in effect. */
++
++int
++riscv_regno_mode_ok_for_base_p (int regno,
++ enum machine_mode mode ATTRIBUTE_UNUSED,
++ bool strict_p)
++{
++ if (!HARD_REGISTER_NUM_P (regno))
++ {
++ if (!strict_p)
++ return true;
++ regno = reg_renumber[regno];
++ }
++
++ /* These fake registers will be eliminated to either the stack or
++ hard frame pointer, both of which are usually valid base registers.
++ Reload deals with the cases where the eliminated form isn't valid. */
++ if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
++ return true;
++
++ return GP_REG_P (regno);
++}
++
++/* Return true if X is a valid base register for mode MODE.
++ STRICT_P is true if REG_OK_STRICT is in effect. */
++
++static bool
++riscv_valid_base_register_p (rtx x, enum machine_mode mode, bool strict_p)
++{
++ if (!strict_p && GET_CODE (x) == SUBREG)
++ x = SUBREG_REG (x);
++
++ return (REG_P (x)
++ && riscv_regno_mode_ok_for_base_p (REGNO (x), mode, strict_p));
++}
++
++/* Return true if, for every base register BASE_REG, (plus BASE_REG X)
++ can address a value of mode MODE. */
++
++static bool
++riscv_valid_offset_p (rtx x, enum machine_mode mode)
++{
++ /* Check that X is a signed 12-bit number. */
++ if (!const_arith_operand (x, Pmode))
++ return false;
++
++ /* We may need to split multiword moves, so make sure that every word
++ is accessible. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++ && !SMALL_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
++ return false;
++
++ return true;
++}
++
++/* Should a symbol of type SYMBOL_TYPE should be split in two? */
++
++bool
++riscv_split_symbol_type (enum riscv_symbol_type symbol_type)
++{
++ if (symbol_type == SYMBOL_TLS_LE)
++ return true;
++
++ if (!TARGET_EXPLICIT_RELOCS)
++ return false;
++
++ return symbol_type == SYMBOL_ABSOLUTE || symbol_type == SYMBOL_PCREL;
++}
++
++/* Return true if a LO_SUM can address a value of mode MODE when the
++ LO_SUM symbol has type SYM_TYPE. */
++
++static bool
++riscv_valid_lo_sum_p (enum riscv_symbol_type sym_type, enum machine_mode mode)
++{
++ /* Check that symbols of type SYMBOL_TYPE can be used to access values
++ of mode MODE. */
++ if (riscv_symbol_insns (sym_type) == 0)
++ return false;
++
++ /* Check that there is a known low-part relocation. */
++ if (!riscv_split_symbol_type (sym_type))
++ return false;
++
++ /* We may need to split multiword moves, so make sure that each word
++ can be accessed without inducing a carry. */
++ if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++ && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
++ return false;
++
++ return true;
++}
++
++/* Return true if X is a valid address for machine mode MODE. If it is,
++ fill in INFO appropriately. STRICT_P is true if REG_OK_STRICT is in
++ effect. */
++
++static bool
++riscv_classify_address (struct riscv_address_info *info, rtx x,
++ enum machine_mode mode, bool strict_p)
++{
++ switch (GET_CODE (x))
++ {
++ case REG:
++ case SUBREG:
++ info->type = ADDRESS_REG;
++ info->reg = x;
++ info->offset = const0_rtx;
++ return riscv_valid_base_register_p (info->reg, mode, strict_p);
++
++ case PLUS:
++ info->type = ADDRESS_REG;
++ info->reg = XEXP (x, 0);
++ info->offset = XEXP (x, 1);
++ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++ && riscv_valid_offset_p (info->offset, mode));
++
++ case LO_SUM:
++ info->type = ADDRESS_LO_SUM;
++ info->reg = XEXP (x, 0);
++ info->offset = XEXP (x, 1);
++ /* We have to trust the creator of the LO_SUM to do something vaguely
++ sane. Target-independent code that creates a LO_SUM should also
++ create and verify the matching HIGH. Target-independent code that
++ adds an offset to a LO_SUM must prove that the offset will not
++ induce a carry. Failure to do either of these things would be
++ a bug, and we are not required to check for it here. The RISC-V
++ backend itself should only create LO_SUMs for valid symbolic
++ constants, with the high part being either a HIGH or a copy
++ of _gp. */
++ info->symbol_type
++ = riscv_classify_symbolic_expression (info->offset);
++ return (riscv_valid_base_register_p (info->reg, mode, strict_p)
++ && riscv_valid_lo_sum_p (info->symbol_type, mode));
++
++ case CONST_INT:
++ /* Small-integer addresses don't occur very often, but they
++ are legitimate if x0 is a valid base register. */
++ info->type = ADDRESS_CONST_INT;
++ return SMALL_OPERAND (INTVAL (x));
++
++ default:
++ return false;
++ }
++}
++
++/* Implement TARGET_LEGITIMATE_ADDRESS_P. */
++
++static bool
++riscv_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
++{
++ struct riscv_address_info addr;
++
++ return riscv_classify_address (&addr, x, mode, strict_p);
++}
++
++/* Return the number of instructions needed to load or store a value
++ of mode MODE at address X. Return 0 if X isn't valid for MODE.
++ Assume that multiword moves may need to be split into word moves
++ if MIGHT_SPLIT_P, otherwise assume that a single load or store is
++ enough. */
++
++int
++riscv_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
++{
++ struct riscv_address_info addr;
++ int n = 1;
++
++ if (!riscv_classify_address (&addr, x, mode, false))
++ return 0;
++
++ /* BLKmode is used for single unaligned loads and stores and should
++ not count as a multiword mode. */
++ if (mode != BLKmode && might_split_p)
++ n += (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++ if (addr.type == ADDRESS_LO_SUM)
++ n += riscv_symbol_insns (addr.symbol_type) - 1;
++
++ return n;
++}
++
++/* Return the number of instructions needed to load constant X.
++ Return 0 if X isn't a valid constant. */
++
++int
++riscv_const_insns (rtx x)
++{
++ enum riscv_symbol_type symbol_type;
++ rtx offset;
++
++ switch (GET_CODE (x))
++ {
++ case HIGH:
++ if (!riscv_symbolic_constant_p (XEXP (x, 0), &symbol_type)
++ || !riscv_split_symbol_type (symbol_type))
++ return 0;
++
++ /* This is simply an LUI. */
++ return 1;
++
++ case CONST_INT:
++ {
++ int cost = riscv_integer_cost (INTVAL (x));
++ /* Force complicated constants to memory. */
++ return cost < 4 ? cost : 0;
++ }
++
++ case CONST_DOUBLE:
++ case CONST_VECTOR:
++ /* We can use x0 to load floating-point zero. */
++ return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
++
++ case CONST:
++ /* See if we can refer to X directly. */
++ if (riscv_symbolic_constant_p (x, &symbol_type))
++ return riscv_symbol_insns (symbol_type);
++
++ /* Otherwise try splitting the constant into a base and offset. */
++ split_const (x, &x, &offset);
++ if (offset != 0)
++ {
++ int n = riscv_const_insns (x);
++ if (n != 0)
++ return n + riscv_integer_cost (INTVAL (offset));
++ }
++ return 0;
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ return riscv_symbol_insns (riscv_classify_symbol (x));
++
++ default:
++ return 0;
++ }
++}
++
++/* X is a doubleword constant that can be handled by splitting it into
++ two words and loading each word separately. Return the number of
++ instructions required to do this. */
++
++int
++riscv_split_const_insns (rtx x)
++{
++ unsigned int low, high;
++
++ low = riscv_const_insns (riscv_subword (x, false));
++ high = riscv_const_insns (riscv_subword (x, true));
++ gcc_assert (low > 0 && high > 0);
++ return low + high;
++}
++
++/* Return the number of instructions needed to implement INSN,
++ given that it loads from or stores to MEM. */
++
++int
++riscv_load_store_insns (rtx mem, rtx_insn *insn)
++{
++ enum machine_mode mode;
++ bool might_split_p;
++ rtx set;
++
++ gcc_assert (MEM_P (mem));
++ mode = GET_MODE (mem);
++
++ /* Try to prove that INSN does not need to be split. */
++ might_split_p = true;
++ if (GET_MODE_BITSIZE (mode) <= 32)
++ might_split_p = false;
++ else if (GET_MODE_BITSIZE (mode) == 64)
++ {
++ set = single_set (insn);
++ if (set && !riscv_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
++ might_split_p = false;
++ }
++
++ return riscv_address_insns (XEXP (mem, 0), mode, might_split_p);
++}
++
++/* Emit a move from SRC to DEST. Assume that the move expanders can
++ handle all moves if !can_create_pseudo_p (). The distinction is
++ important because, unlike emit_move_insn, the move expanders know
++ how to force Pmode objects into the constant pool even when the
++ constant pool address is not itself legitimate. */
++
++rtx
++riscv_emit_move (rtx dest, rtx src)
++{
++ return (can_create_pseudo_p ()
++ ? emit_move_insn (dest, src)
++ : emit_move_insn_1 (dest, src));
++}
++
++/* Emit an instruction of the form (set TARGET SRC). */
++
++static rtx
++riscv_emit_set (rtx target, rtx src)
++{
++ emit_insn (gen_rtx_SET (target, src));
++ return target;
++}
++
++/* Emit an instruction of the form (set DEST (CODE X Y)). */
++
++static rtx
++riscv_emit_binary (enum rtx_code code, rtx dest, rtx x, rtx y)
++{
++ return riscv_emit_set (dest, gen_rtx_fmt_ee (code, GET_MODE (dest), x, y));
++}
++
++/* Compute (CODE X Y) and store the result in a new register
++ of mode MODE. Return that new register. */
++
++static rtx
++riscv_force_binary (enum machine_mode mode, enum rtx_code code, rtx x, rtx y)
++{
++ return riscv_emit_binary (code, gen_reg_rtx (mode), x, y);
++}
++
++/* Copy VALUE to a register and return that register. If new pseudos
++ are allowed, copy it into a new register, otherwise use DEST. */
++
++static rtx
++riscv_force_temporary (rtx dest, rtx value)
++{
++ if (can_create_pseudo_p ())
++ return force_reg (Pmode, value);
++ else
++ {
++ riscv_emit_move (dest, value);
++ return dest;
++ }
++}
++
++/* Wrap symbol or label BASE in an UNSPEC address of type SYMBOL_TYPE,
++ then add CONST_INT OFFSET to the result. */
++
++static rtx
++riscv_unspec_address_offset (rtx base, rtx offset,
++ enum riscv_symbol_type symbol_type)
++{
++ base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
++ UNSPEC_ADDRESS_FIRST + symbol_type);
++ if (offset != const0_rtx)
++ base = gen_rtx_PLUS (Pmode, base, offset);
++ return gen_rtx_CONST (Pmode, base);
++}
++
++/* Return an UNSPEC address with underlying address ADDRESS and symbol
++ type SYMBOL_TYPE. */
++
++rtx
++riscv_unspec_address (rtx address, enum riscv_symbol_type symbol_type)
++{
++ rtx base, offset;
++
++ split_const (address, &base, &offset);
++ return riscv_unspec_address_offset (base, offset, symbol_type);
++}
++
++/* If OP is an UNSPEC address, return the address to which it refers,
++ otherwise return OP itself. */
++
++static rtx
++riscv_strip_unspec_address (rtx op)
++{
++ rtx base, offset;
++
++ split_const (op, &base, &offset);
++ if (UNSPEC_ADDRESS_P (base))
++ op = plus_constant (Pmode, UNSPEC_ADDRESS (base), INTVAL (offset));
++ return op;
++}
++
++/* If riscv_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
++ high part to BASE and return the result. Just return BASE otherwise.
++ TEMP is as for riscv_force_temporary.
++
++ The returned expression can be used as the first operand to a LO_SUM. */
++
++static rtx
++riscv_unspec_offset_high (rtx temp, rtx addr, enum riscv_symbol_type symbol_type)
++{
++ addr = gen_rtx_HIGH (Pmode, riscv_unspec_address (addr, symbol_type));
++ return riscv_force_temporary (temp, addr);
++}
++
++/* Load an entry from the GOT for a TLS GD access. */
++
++static rtx riscv_got_load_tls_gd (rtx dest, rtx sym)
++{
++ if (Pmode == DImode)
++ return gen_got_load_tls_gddi (dest, sym);
++ else
++ return gen_got_load_tls_gdsi (dest, sym);
++}
++
++/* Load an entry from the GOT for a TLS IE access. */
++
++static rtx riscv_got_load_tls_ie (rtx dest, rtx sym)
++{
++ if (Pmode == DImode)
++ return gen_got_load_tls_iedi (dest, sym);
++ else
++ return gen_got_load_tls_iesi (dest, sym);
++}
++
++/* Add in the thread pointer for a TLS LE access. */
++
++static rtx riscv_tls_add_tp_le (rtx dest, rtx base, rtx sym)
++{
++ rtx tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++ if (Pmode == DImode)
++ return gen_tls_add_tp_ledi (dest, base, tp, sym);
++ else
++ return gen_tls_add_tp_lesi (dest, base, tp, sym);
++}
++
++/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
++ it appears in a MEM of that mode. Return true if ADDR is a legitimate
++ constant in that context and can be split into high and low parts.
++ If so, and if LOW_OUT is nonnull, emit the high part and store the
++ low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise.
++
++ TEMP is as for riscv_force_temporary and is used to load the high
++ part into a register.
++
++ When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
++ a legitimize SET_SRC for an .md pattern, otherwise the low part
++ is guaranteed to be a legitimate address for mode MODE. */
++
++bool
++riscv_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *low_out)
++{
++ enum riscv_symbol_type symbol_type;
++
++ if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
++ || !riscv_symbolic_constant_p (addr, &symbol_type)
++ || riscv_symbol_insns (symbol_type) == 0
++ || !riscv_split_symbol_type (symbol_type))
++ return false;
++
++ if (low_out)
++ switch (symbol_type)
++ {
++ case SYMBOL_ABSOLUTE:
++ {
++ rtx high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
++ high = riscv_force_temporary (temp, high);
++ *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++ }
++ break;
++
++ case SYMBOL_PCREL:
++ {
++ static unsigned seqno;
++ char buf[32];
++ rtx label;
++
++ ssize_t bytes = snprintf (buf, sizeof (buf), ".LA%u", seqno);
++ gcc_assert ((size_t) bytes < sizeof (buf));
++
++ label = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
++ SYMBOL_REF_FLAGS (label) |= SYMBOL_FLAG_LOCAL;
++
++ if (temp == NULL)
++ temp = gen_reg_rtx (Pmode);
++
++ if (Pmode == DImode)
++ emit_insn (gen_auipcdi (temp, copy_rtx (addr), GEN_INT (seqno)));
++ else
++ emit_insn (gen_auipcsi (temp, copy_rtx (addr), GEN_INT (seqno)));
++
++ *low_out = gen_rtx_LO_SUM (Pmode, temp, label);
++
++ seqno++;
++ }
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ return true;
++}
++
++/* Return a legitimate address for REG + OFFSET. TEMP is as for
++ riscv_force_temporary; it is only needed when OFFSET is not a
++ SMALL_OPERAND. */
++
++static rtx
++riscv_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
++{
++ if (!SMALL_OPERAND (offset))
++ {
++ rtx high;
++
++ /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.
++ The addition inside the macro CONST_HIGH_PART may cause an
++ overflow, so we need to force a sign-extension check. */
++ high = gen_int_mode (CONST_HIGH_PART (offset), Pmode);
++ offset = CONST_LOW_PART (offset);
++ high = riscv_force_temporary (temp, high);
++ reg = riscv_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
++ }
++ return plus_constant (Pmode, reg, offset);
++}
++
++/* The __tls_get_attr symbol. */
++static GTY(()) rtx riscv_tls_symbol;
++
++/* Return an instruction sequence that calls __tls_get_addr. SYM is
++ the TLS symbol we are referencing and TYPE is the symbol type to use
++ (either global dynamic or local dynamic). RESULT is an RTX for the
++ return value location. */
++
++static rtx_insn *
++riscv_call_tls_get_addr (rtx sym, rtx result)
++{
++ rtx a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST), func;
++ rtx_insn *insn;
++
++ if (!riscv_tls_symbol)
++ riscv_tls_symbol = init_one_libfunc ("__tls_get_addr");
++ func = gen_rtx_MEM (FUNCTION_MODE, riscv_tls_symbol);
++
++ start_sequence ();
++
++ emit_insn (riscv_got_load_tls_gd (a0, sym));
++ insn = emit_call_insn (gen_call_value (result, func, const0_rtx, NULL));
++ RTL_CONST_CALL_P (insn) = 1;
++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
++ insn = get_insns ();
++
++ end_sequence ();
++
++ return insn;
++}
++
++/* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
++ its address. The return value will be both a valid address and a valid
++ SET_SRC (either a REG or a LO_SUM). */
++
++static rtx
++riscv_legitimize_tls_address (rtx loc)
++{
++ rtx dest, tp, tmp;
++ enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
++
++ /* Since we support TLS copy relocs, non-PIC TLS accesses may all use LE. */
++ if (!flag_pic)
++ model = TLS_MODEL_LOCAL_EXEC;
++
++ switch (model)
++ {
++ case TLS_MODEL_LOCAL_DYNAMIC:
++ /* Rely on section anchors for the optimization that LDM TLS
++ provides. The anchor's address is loaded with GD TLS. */
++ case TLS_MODEL_GLOBAL_DYNAMIC:
++ tmp = gen_rtx_REG (Pmode, GP_RETURN);
++ dest = gen_reg_rtx (Pmode);
++ emit_libcall_block (riscv_call_tls_get_addr (loc, tmp), dest, tmp, loc);
++ break;
++
++ case TLS_MODEL_INITIAL_EXEC:
++ /* la.tls.ie; tp-relative add */
++ tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++ tmp = gen_reg_rtx (Pmode);
++ emit_insn (riscv_got_load_tls_ie (tmp, loc));
++ dest = gen_reg_rtx (Pmode);
++ emit_insn (gen_add3_insn (dest, tmp, tp));
++ break;
++
++ case TLS_MODEL_LOCAL_EXEC:
++ tmp = riscv_unspec_offset_high (NULL, loc, SYMBOL_TLS_LE);
++ dest = gen_reg_rtx (Pmode);
++ emit_insn (riscv_tls_add_tp_le (dest, tmp, loc));
++ dest = gen_rtx_LO_SUM (Pmode, dest,
++ riscv_unspec_address (loc, SYMBOL_TLS_LE));
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++ return dest;
++}
++
++/* If X is not a valid address for mode MODE, force it into a register. */
++
++static rtx
++riscv_force_address (rtx x, enum machine_mode mode)
++{
++ if (!riscv_legitimate_address_p (mode, x, false))
++ x = force_reg (Pmode, x);
++ return x;
++}
++
++/* This function is used to implement LEGITIMIZE_ADDRESS. If X can
++ be legitimized in a way that the generic machinery might not expect,
++ return a new address, otherwise return NULL. MODE is the mode of
++ the memory being accessed. */
++
++static rtx
++riscv_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
++ enum machine_mode mode)
++{
++ rtx addr;
++
++ if (riscv_tls_symbol_p (x))
++ return riscv_legitimize_tls_address (x);
++
++ /* See if the address can split into a high part and a LO_SUM. */
++ if (riscv_split_symbol (NULL, x, mode, &addr))
++ return riscv_force_address (addr, mode);
++
++ /* Handle BASE + OFFSET using riscv_add_offset. */
++ if (GET_CODE (x) == PLUS && CONST_INT_P (XEXP (x, 1))
++ && INTVAL (XEXP (x, 1)) != 0)
++ {
++ rtx base = XEXP (x, 0);
++ HOST_WIDE_INT offset = INTVAL (XEXP (x, 1));
++
++ if (!riscv_valid_base_register_p (base, mode, false))
++ base = copy_to_mode_reg (Pmode, base);
++ addr = riscv_add_offset (NULL, base, offset);
++ return riscv_force_address (addr, mode);
++ }
++
++ return x;
++}
++
++/* Load VALUE into DEST. TEMP is as for riscv_force_temporary. */
++
++void
++riscv_move_integer (rtx temp, rtx dest, HOST_WIDE_INT value)
++{
++ struct riscv_integer_op codes[RISCV_MAX_INTEGER_OPS];
++ enum machine_mode mode;
++ int i, num_ops;
++ rtx x;
++
++ mode = GET_MODE (dest);
++ num_ops = riscv_build_integer (codes, value, mode);
++
++ if (can_create_pseudo_p () && num_ops > 2 /* not a simple constant */
++ && num_ops >= riscv_split_integer_cost (value))
++ x = riscv_split_integer (value, mode);
++ else
++ {
++ /* Apply each binary operation to X. */
++ x = GEN_INT (codes[0].value);
++
++ for (i = 1; i < num_ops; i++)
++ {
++ if (!can_create_pseudo_p ())
++ x = riscv_emit_set (temp, x);
++ else
++ x = force_reg (mode, x);
++
++ x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
++ }
++ }
++
++ riscv_emit_set (dest, x);
++}
++
++/* Subroutine of riscv_legitimize_move. Move constant SRC into register
++ DEST given that SRC satisfies immediate_operand but doesn't satisfy
++ move_operand. */
++
++static void
++riscv_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
++{
++ rtx base, offset;
++
++ /* Split moves of big integers into smaller pieces. */
++ if (splittable_const_int_operand (src, mode))
++ {
++ riscv_move_integer (dest, dest, INTVAL (src));
++ return;
++ }
++
++ /* Split moves of symbolic constants into high/low pairs. */
++ if (riscv_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
++ {
++ riscv_emit_set (dest, src);
++ return;
++ }
++
++ /* Generate the appropriate access sequences for TLS symbols. */
++ if (riscv_tls_symbol_p (src))
++ {
++ riscv_emit_move (dest, riscv_legitimize_tls_address (src));
++ return;
++ }
++
++ /* If we have (const (plus symbol offset)), and that expression cannot
++ be forced into memory, load the symbol first and add in the offset. Also
++ prefer to do this even if the constant _can_ be forced into memory, as it
++ usually produces better code. */
++ split_const (src, &base, &offset);
++ if (offset != const0_rtx
++ && (targetm.cannot_force_const_mem (mode, src) || can_create_pseudo_p ()))
++ {
++ base = riscv_force_temporary (dest, base);
++ riscv_emit_move (dest, riscv_add_offset (NULL, base, INTVAL (offset)));
++ return;
++ }
++
++ src = force_const_mem (mode, src);
++
++ /* When using explicit relocs, constant pool references are sometimes
++ not legitimate addresses. */
++ riscv_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
++ riscv_emit_move (dest, src);
++}
++
++/* If (set DEST SRC) is not a valid move instruction, emit an equivalent
++ sequence that is valid. */
++
++bool
++riscv_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
++{
++ if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
++ {
++ riscv_emit_move (dest, force_reg (mode, src));
++ return true;
++ }
++
++ /* We need to deal with constants that would be legitimate
++ immediate_operands but aren't legitimate move_operands. */
++ if (CONSTANT_P (src) && !move_operand (src, mode))
++ {
++ riscv_legitimize_const_move (mode, dest, src);
++ set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
++ return true;
++ }
++
++ return false;
++}
++
++/* Return true if there is an instruction that implements CODE and accepts
++ X as an immediate operand. */
++
++static int
++riscv_immediate_operand_p (int code, HOST_WIDE_INT x)
++{
++ switch (code)
++ {
++ case ASHIFT:
++ case ASHIFTRT:
++ case LSHIFTRT:
++ /* All shift counts are truncated to a valid constant. */
++ return true;
++
++ case AND:
++ case IOR:
++ case XOR:
++ case PLUS:
++ case LT:
++ case LTU:
++ /* These instructions take 12-bit signed immediates. */
++ return SMALL_OPERAND (x);
++
++ case LE:
++ /* We add 1 to the immediate and use SLT. */
++ return SMALL_OPERAND (x + 1);
++
++ case LEU:
++ /* Likewise SLTU, but reject the always-true case. */
++ return SMALL_OPERAND (x + 1) && x + 1 != 0;
++
++ case GE:
++ case GEU:
++ /* We can emulate an immediate of 1 by using GT/GTU against x0. */
++ return x == 1;
++
++ default:
++ /* By default assume that x0 can be used for 0. */
++ return x == 0;
++ }
++}
++
++/* Return the cost of binary operation X, given that the instruction
++ sequence for a word-sized or smaller operation takes SIGNLE_INSNS
++ instructions and that the sequence of a double-word operation takes
++ DOUBLE_INSNS instructions. */
++
++static int
++riscv_binary_cost (rtx x, int single_insns, int double_insns)
++{
++ if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
++ return COSTS_N_INSNS (double_insns);
++ return COSTS_N_INSNS (single_insns);
++}
++
++/* Return the cost of sign- or zero-extending OP. */
++
++static int
++riscv_extend_cost (rtx op, bool unsigned_p)
++{
++ if (MEM_P (op))
++ return 0;
++
++ if (unsigned_p && GET_MODE (op) == QImode)
++ /* We can use ANDI. */
++ return COSTS_N_INSNS (1);
++
++ if (!unsigned_p && GET_MODE (op) == SImode)
++ /* We can use SEXT.W. */
++ return COSTS_N_INSNS (1);
++
++ /* We need to use a shift left and a shift right. */
++ return COSTS_N_INSNS (2);
++}
++
++/* Implement TARGET_RTX_COSTS. */
++
++static bool
++riscv_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno ATTRIBUTE_UNUSED,
++ int *total, bool speed)
++{
++ bool float_mode_p = FLOAT_MODE_P (mode);
++ int cost;
++
++ switch (GET_CODE (x))
++ {
++ case CONST_INT:
++ if (riscv_immediate_operand_p (outer_code, INTVAL (x)))
++ {
++ *total = 0;
++ return true;
++ }
++ /* Fall through. */
++
++ case SYMBOL_REF:
++ case LABEL_REF:
++ case CONST_DOUBLE:
++ case CONST:
++ if ((cost = riscv_const_insns (x)) > 0)
++ {
++ /* If the constant is likely to be stored in a GPR, SETs of
++ single-insn constants are as cheap as register sets; we
++ never want to CSE them. */
++ if (cost == 1 && outer_code == SET)
++ *total = 0;
++ /* When we load a constant more than once, it usually is better
++ to duplicate the last operation in the sequence than to CSE
++ the constant itself. */
++ else if (outer_code == SET || GET_MODE (x) == VOIDmode)
++ *total = COSTS_N_INSNS (1);
++ }
++ else /* The instruction will be fetched from the constant pool. */
++ *total = COSTS_N_INSNS (riscv_symbol_insns (SYMBOL_ABSOLUTE));
++ return true;
++
++ case MEM:
++ /* If the address is legitimate, return the number of
++ instructions it needs. */
++ if ((cost = riscv_address_insns (XEXP (x, 0), mode, true)) > 0)
++ {
++ *total = COSTS_N_INSNS (cost + tune_info->memory_cost);
++ return true;
++ }
++ /* Otherwise use the default handling. */
++ return false;
++
++ case NOT:
++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
++ return false;
++
++ case AND:
++ case IOR:
++ case XOR:
++ /* Double-word operations use two single-word operations. */
++ *total = riscv_binary_cost (x, 1, 2);
++ return false;
++
++ case ASHIFT:
++ case ASHIFTRT:
++ case LSHIFTRT:
++ *total = riscv_binary_cost (x, 1, CONSTANT_P (XEXP (x, 1)) ? 4 : 9);
++ return false;
++
++ case ABS:
++ *total = COSTS_N_INSNS (float_mode_p ? 1 : 3);
++ return false;
++
++ case LO_SUM:
++ *total = set_src_cost (XEXP (x, 0), mode, speed);
++ return true;
++
++ case LT:
++ case LTU:
++ case LE:
++ case LEU:
++ case GT:
++ case GTU:
++ case GE:
++ case GEU:
++ case EQ:
++ case NE:
++ /* Branch comparisons have VOIDmode, so use the first operand's
++ mode instead. */
++ mode = GET_MODE (XEXP (x, 0));
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = riscv_binary_cost (x, 1, 3);
++ return false;
++
++ case UNORDERED:
++ case ORDERED:
++ /* (FEQ(A, A) & FEQ(B, B)) compared against 0. */
++ mode = GET_MODE (XEXP (x, 0));
++ *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (2);
++ return false;
++
++ case UNEQ:
++ case LTGT:
++ /* (FEQ(A, A) & FEQ(B, B)) compared against FEQ(A, B). */
++ mode = GET_MODE (XEXP (x, 0));
++ *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (3);
++ return false;
++
++ case UNGE:
++ case UNGT:
++ case UNLE:
++ case UNLT:
++ /* FLT or FLE, but guarded by an FFLAGS read and write. */
++ mode = GET_MODE (XEXP (x, 0));
++ *total = tune_info->fp_add[mode == DFmode] + COSTS_N_INSNS (4);
++ return false;
++
++ case MINUS:
++ case PLUS:
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = riscv_binary_cost (x, 1, 4);
++ return false;
++
++ case NEG:
++ {
++ rtx op = XEXP (x, 0);
++ if (GET_CODE (op) == FMA && !HONOR_SIGNED_ZEROS (mode))
++ {
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (XEXP (op, 0), mode, speed)
++ + set_src_cost (XEXP (op, 1), mode, speed)
++ + set_src_cost (XEXP (op, 2), mode, speed));
++ return true;
++ }
++ }
++
++ if (float_mode_p)
++ *total = tune_info->fp_add[mode == DFmode];
++ else
++ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
++ return false;
++
++ case MULT:
++ if (float_mode_p)
++ *total = tune_info->fp_mul[mode == DFmode];
++ else if (GET_MODE_SIZE (mode) > UNITS_PER_WORD)
++ *total = 3 * tune_info->int_mul[0] + COSTS_N_INSNS (2);
++ else if (!speed)
++ *total = COSTS_N_INSNS (1);
++ else
++ *total = tune_info->int_mul[mode == DImode];
++ return false;
++
++ case DIV:
++ case SQRT:
++ case MOD:
++ if (float_mode_p)
++ {
++ *total = tune_info->fp_div[mode == DFmode];
++ return false;
++ }
++ /* Fall through. */
++
++ case UDIV:
++ case UMOD:
++ if (speed)
++ *total = tune_info->int_div[mode == DImode];
++ else
++ *total = COSTS_N_INSNS (1);
++ return false;
++
++ case SIGN_EXTEND:
++ case ZERO_EXTEND:
++ *total = riscv_extend_cost (XEXP (x, 0), GET_CODE (x) == ZERO_EXTEND);
++ return false;
++
++ case FLOAT:
++ case UNSIGNED_FLOAT:
++ case FIX:
++ case FLOAT_EXTEND:
++ case FLOAT_TRUNCATE:
++ *total = tune_info->fp_add[mode == DFmode];
++ return false;
++
++ case FMA:
++ *total = (tune_info->fp_mul[mode == DFmode]
++ + set_src_cost (XEXP (x, 0), mode, speed)
++ + set_src_cost (XEXP (x, 1), mode, speed)
++ + set_src_cost (XEXP (x, 2), mode, speed));
++ return true;
++
++ case UNSPEC:
++ if (XINT (x, 1) == UNSPEC_AUIPC)
++ {
++ /* Make AUIPC cheap to avoid spilling its result to the stack. */
++ *total = 1;
++ return true;
++ }
++ return false;
++
++ default:
++ return false;
++ }
++}
++
++/* Implement TARGET_ADDRESS_COST. */
++
++static int
++riscv_address_cost (rtx addr, enum machine_mode mode,
++ addr_space_t as ATTRIBUTE_UNUSED,
++ bool speed ATTRIBUTE_UNUSED)
++{
++ return riscv_address_insns (addr, mode, false);
++}
++
++/* Return one word of double-word value OP. HIGH_P is true to select the
++ high part or false to select the low part. */
++
++rtx
++riscv_subword (rtx op, bool high_p)
++{
++ unsigned int byte = high_p ? UNITS_PER_WORD : 0;
++ enum machine_mode mode = GET_MODE (op);
++
++ if (mode == VOIDmode)
++ mode = TARGET_64BIT ? TImode : DImode;
++
++ if (MEM_P (op))
++ return adjust_address (op, word_mode, byte);
++
++ if (REG_P (op))
++ gcc_assert (!FP_REG_RTX_P (op));
++
++ return simplify_gen_subreg (word_mode, op, mode, byte);
++}
++
++/* Return true if a 64-bit move from SRC to DEST should be split into two. */
++
++bool
++riscv_split_64bit_move_p (rtx dest, rtx src)
++{
++ if (TARGET_64BIT)
++ return false;
++
++ /* Allow FPR <-> FPR and FPR <-> MEM moves, and permit the special case
++ of zeroing an FPR with FCVT.D.W. */
++ if (TARGET_DOUBLE_FLOAT
++ && ((FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
++ || (FP_REG_RTX_P (dest) && MEM_P (src))
++ || (FP_REG_RTX_P (src) && MEM_P (dest))
++ || (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))))
++ return false;
++
++ return true;
++}
++
++/* Split a doubleword move from SRC to DEST. On 32-bit targets,
++ this function handles 64-bit moves for which riscv_split_64bit_move_p
++ holds. For 64-bit targets, this function handles 128-bit moves. */
++
++void
++riscv_split_doubleword_move (rtx dest, rtx src)
++{
++ rtx low_dest;
++
++ /* The operation can be split into two normal moves. Decide in
++ which order to do them. */
++ low_dest = riscv_subword (dest, false);
++ if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
++ {
++ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++ riscv_emit_move (low_dest, riscv_subword (src, false));
++ }
++ else
++ {
++ riscv_emit_move (low_dest, riscv_subword (src, false));
++ riscv_emit_move (riscv_subword (dest, true), riscv_subword (src, true));
++ }
++}
++
++/* Return the appropriate instructions to move SRC into DEST. Assume
++ that SRC is operand 1 and DEST is operand 0. */
++
++const char *
++riscv_output_move (rtx dest, rtx src)
++{
++ enum rtx_code dest_code, src_code;
++ enum machine_mode mode;
++ bool dbl_p;
++
++ dest_code = GET_CODE (dest);
++ src_code = GET_CODE (src);
++ mode = GET_MODE (dest);
++ dbl_p = (GET_MODE_SIZE (mode) == 8);
++
++ if (dbl_p && riscv_split_64bit_move_p (dest, src))
++ return "#";
++
++ if (dest_code == REG && GP_REG_P (REGNO (dest)))
++ {
++ if (src_code == REG && FP_REG_P (REGNO (src)))
++ return dbl_p ? "fmv.x.d\t%0,%1" : "fmv.x.s\t%0,%1";
++
++ if (src_code == MEM)
++ switch (GET_MODE_SIZE (mode))
++ {
++ case 1: return "lbu\t%0,%1";
++ case 2: return "lhu\t%0,%1";
++ case 4: return "lw\t%0,%1";
++ case 8: return "ld\t%0,%1";
++ }
++
++ if (src_code == CONST_INT)
++ return "li\t%0,%1";
++
++ if (src_code == HIGH)
++ return "lui\t%0,%h1";
++
++ if (symbolic_operand (src, VOIDmode))
++ switch (riscv_classify_symbolic_expression (src))
++ {
++ case SYMBOL_GOT_DISP: return "la\t%0,%1";
++ case SYMBOL_ABSOLUTE: return "lla\t%0,%1";
++ case SYMBOL_PCREL: return "lla\t%0,%1";
++ default: gcc_unreachable ();
++ }
++ }
++ if ((src_code == REG && GP_REG_P (REGNO (src)))
++ || (src == CONST0_RTX (mode)))
++ {
++ if (dest_code == REG)
++ {
++ if (GP_REG_P (REGNO (dest)))
++ return "mv\t%0,%z1";
++
++ if (FP_REG_P (REGNO (dest)))
++ {
++ if (!dbl_p)
++ return "fmv.s.x\t%0,%z1";
++ if (TARGET_64BIT)
++ return "fmv.d.x\t%0,%z1";
++ /* in RV32, we can emulate fmv.d.x %0, x0 using fcvt.d.w */
++ gcc_assert (src == CONST0_RTX (mode));
++ return "fcvt.d.w\t%0,x0";
++ }
++ }
++ if (dest_code == MEM)
++ switch (GET_MODE_SIZE (mode))
++ {
++ case 1: return "sb\t%z1,%0";
++ case 2: return "sh\t%z1,%0";
++ case 4: return "sw\t%z1,%0";
++ case 8: return "sd\t%z1,%0";
++ }
++ }
++ if (src_code == REG && FP_REG_P (REGNO (src)))
++ {
++ if (dest_code == REG && FP_REG_P (REGNO (dest)))
++ return dbl_p ? "fmv.d\t%0,%1" : "fmv.s\t%0,%1";
++
++ if (dest_code == MEM)
++ return dbl_p ? "fsd\t%1,%0" : "fsw\t%1,%0";
++ }
++ if (dest_code == REG && FP_REG_P (REGNO (dest)))
++ {
++ if (src_code == MEM)
++ return dbl_p ? "fld\t%0,%1" : "flw\t%0,%1";
++ }
++ gcc_unreachable ();
++}
++
++/* Return true if CMP1 is a suitable second operand for integer ordering
++ test CODE. See also the *sCC patterns in riscv.md. */
++
++static bool
++riscv_int_order_operand_ok_p (enum rtx_code code, rtx cmp1)
++{
++ switch (code)
++ {
++ case GT:
++ case GTU:
++ return reg_or_0_operand (cmp1, VOIDmode);
++
++ case GE:
++ case GEU:
++ return cmp1 == const1_rtx;
++
++ case LT:
++ case LTU:
++ return arith_operand (cmp1, VOIDmode);
++
++ case LE:
++ return sle_operand (cmp1, VOIDmode);
++
++ case LEU:
++ return sleu_operand (cmp1, VOIDmode);
++
++ default:
++ gcc_unreachable ();
++ }
++}
++
++/* Return true if *CMP1 (of mode MODE) is a valid second operand for
++ integer ordering test *CODE, or if an equivalent combination can
++ be formed by adjusting *CODE and *CMP1. When returning true, update
++ *CODE and *CMP1 with the chosen code and operand, otherwise leave
++ them alone. */
++
++static bool
++riscv_canonicalize_int_order_test (enum rtx_code *code, rtx *cmp1,
++ enum machine_mode mode)
++{
++ HOST_WIDE_INT plus_one;
++
++ if (riscv_int_order_operand_ok_p (*code, *cmp1))
++ return true;
++
++ if (CONST_INT_P (*cmp1))
++ switch (*code)
++ {
++ case LE:
++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++ if (INTVAL (*cmp1) < plus_one)
++ {
++ *code = LT;
++ *cmp1 = force_reg (mode, GEN_INT (plus_one));
++ return true;
++ }
++ break;
++
++ case LEU:
++ plus_one = trunc_int_for_mode (UINTVAL (*cmp1) + 1, mode);
++ if (plus_one != 0)
++ {
++ *code = LTU;
++ *cmp1 = force_reg (mode, GEN_INT (plus_one));
++ return true;
++ }
++ break;
++
++ default:
++ break;
++ }
++ return false;
++}
++
++/* Compare CMP0 and CMP1 using ordering test CODE and store the result
++ in TARGET. CMP0 and TARGET are register_operands. If INVERT_PTR
++ is nonnull, it's OK to set TARGET to the inverse of the result and
++ flip *INVERT_PTR instead. */
++
++static void
++riscv_emit_int_order_test (enum rtx_code code, bool *invert_ptr,
++ rtx target, rtx cmp0, rtx cmp1)
++{
++ enum machine_mode mode;
++
++ /* First see if there is a RISCV instruction that can do this operation.
++ If not, try doing the same for the inverse operation. If that also
++ fails, force CMP1 into a register and try again. */
++ mode = GET_MODE (cmp0);
++ if (riscv_canonicalize_int_order_test (&code, &cmp1, mode))
++ riscv_emit_binary (code, target, cmp0, cmp1);
++ else
++ {
++ enum rtx_code inv_code = reverse_condition (code);
++ if (!riscv_canonicalize_int_order_test (&inv_code, &cmp1, mode))
++ {
++ cmp1 = force_reg (mode, cmp1);
++ riscv_emit_int_order_test (code, invert_ptr, target, cmp0, cmp1);
++ }
++ else if (invert_ptr == 0)
++ {
++ rtx inv_target = riscv_force_binary (GET_MODE (target),
++ inv_code, cmp0, cmp1);
++ riscv_emit_binary (XOR, target, inv_target, const1_rtx);
++ }
++ else
++ {
++ *invert_ptr = !*invert_ptr;
++ riscv_emit_binary (inv_code, target, cmp0, cmp1);
++ }
++ }
++}
++
++/* Return a register that is zero iff CMP0 and CMP1 are equal.
++ The register will have the same mode as CMP0. */
++
++static rtx
++riscv_zero_if_equal (rtx cmp0, rtx cmp1)
++{
++ if (cmp1 == const0_rtx)
++ return cmp0;
++
++ return expand_binop (GET_MODE (cmp0), sub_optab,
++ cmp0, cmp1, 0, 0, OPTAB_DIRECT);
++}
++
++/* Sign- or zero-extend OP0 and OP1 for integer comparisons. */
++
++static void
++riscv_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
++{
++ /* Comparisons consider all XLEN bits, so extend sub-XLEN values. */
++ if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0)))
++ {
++ /* It is more profitable to zero-extend QImode values. */
++ if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode)
++ {
++ *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0);
++ if (CONST_INT_P (*op1))
++ *op1 = GEN_INT ((uint8_t) INTVAL (*op1));
++ else
++ *op1 = gen_rtx_ZERO_EXTEND (word_mode, *op1);
++ }
++ else
++ {
++ *op0 = gen_rtx_SIGN_EXTEND (word_mode, *op0);
++ if (*op1 != const0_rtx)
++ *op1 = gen_rtx_SIGN_EXTEND (word_mode, *op1);
++ }
++ }
++}
++
++/* Convert a comparison into something that can be used in a branch. On
++ entry, *OP0 and *OP1 are the values being compared and *CODE is the code
++ used to compare them. Update them to describe the final comparison. */
++
++static void
++riscv_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1)
++{
++ if (splittable_const_int_operand (*op1, VOIDmode))
++ {
++ HOST_WIDE_INT rhs = INTVAL (*op1);
++
++ if (*code == EQ || *code == NE)
++ {
++ /* Convert e.g. OP0 == 2048 into OP0 - 2048 == 0. */
++ if (SMALL_OPERAND (-rhs))
++ {
++ *op0 = riscv_force_binary (GET_MODE (*op0), PLUS, *op0,
++ GEN_INT (-rhs));
++ *op1 = const0_rtx;
++ }
++ }
++ else
++ {
++ static const enum rtx_code mag_comparisons[][2] = {
++ {LEU, LTU}, {GTU, GEU}, {LE, LT}, {GT, GE}
++ };
++
++ /* Convert e.g. (OP0 <= 0xFFF) into (OP0 < 0x1000). */
++ for (size_t i = 0; i < ARRAY_SIZE (mag_comparisons); i++)
++ {
++ HOST_WIDE_INT new_rhs;
++ bool increment = *code == mag_comparisons[i][0];
++ bool decrement = *code == mag_comparisons[i][1];
++ if (!increment && !decrement)
++ continue;
++
++ new_rhs = rhs + (increment ? 1 : -1);
++ if (riscv_integer_cost (new_rhs) < riscv_integer_cost (rhs)
++ && (rhs < 0) == (new_rhs < 0))
++ {
++ *op1 = GEN_INT (new_rhs);
++ *code = mag_comparisons[i][increment];
++ }
++ break;
++ }
++ }
++ }
++
++ riscv_extend_comparands (*code, op0, op1);
++
++ *op0 = force_reg (word_mode, *op0);
++ if (*op1 != const0_rtx)
++ *op1 = force_reg (word_mode, *op1);
++}
++
++/* Like riscv_emit_int_compare, but for floating-point comparisons. */
++
++static void
++riscv_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1)
++{
++ rtx tmp0, tmp1, cmp_op0 = *op0, cmp_op1 = *op1;
++ enum rtx_code fp_code = *code;
++ *code = NE;
++
++ switch (fp_code)
++ {
++ case UNORDERED:
++ *code = EQ;
++ /* Fall through. */
++
++ case ORDERED:
++ /* a == a && b == b */
++ tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
++ tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
++ *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
++ *op1 = const0_rtx;
++ break;
++
++ case UNEQ:
++ case LTGT:
++ /* ordered(a, b) > (a == b) */
++ *code = fp_code == LTGT ? GTU : EQ;
++ tmp0 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op0);
++ tmp1 = riscv_force_binary (word_mode, EQ, cmp_op1, cmp_op1);
++ *op0 = riscv_force_binary (word_mode, AND, tmp0, tmp1);
++ *op1 = riscv_force_binary (word_mode, EQ, cmp_op0, cmp_op1);
++ break;
++
++#define UNORDERED_COMPARISON(CODE, CMP) \
++ case CODE: \
++ *code = EQ; \
++ *op0 = gen_reg_rtx (word_mode); \
++ if (GET_MODE (cmp_op0) == SFmode && TARGET_64BIT) \
++ emit_insn (gen_f##CMP##_quietsfdi4 (*op0, cmp_op0, cmp_op1)); \
++ else if (GET_MODE (cmp_op0) == SFmode) \
++ emit_insn (gen_f##CMP##_quietsfsi4 (*op0, cmp_op0, cmp_op1)); \
++ else if (GET_MODE (cmp_op0) == DFmode && TARGET_64BIT) \
++ emit_insn (gen_f##CMP##_quietdfdi4 (*op0, cmp_op0, cmp_op1)); \
++ else if (GET_MODE (cmp_op0) == DFmode) \
++ emit_insn (gen_f##CMP##_quietdfsi4 (*op0, cmp_op0, cmp_op1)); \
++ else \
++ gcc_unreachable (); \
++ *op1 = const0_rtx; \
++ break;
++
++ case UNLT:
++ std::swap (cmp_op0, cmp_op1);
++ /* Fall through. */
++
++ UNORDERED_COMPARISON(UNGT, le)
++
++ case UNLE:
++ std::swap (cmp_op0, cmp_op1);
++ /* Fall through. */
++
++ UNORDERED_COMPARISON(UNGE, lt)
++#undef UNORDERED_COMPARISON
++
++ case NE:
++ fp_code = EQ;
++ *code = EQ;
++ /* Fall through. */
++
++ case EQ:
++ case LE:
++ case LT:
++ case GE:
++ case GT:
++ /* We have instructions for these cases. */
++ *op0 = riscv_force_binary (word_mode, fp_code, cmp_op0, cmp_op1);
++ *op1 = const0_rtx;
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++}
++
++/* CODE-compare OP0 and OP1. Store the result in TARGET. */
++
++void
++riscv_expand_int_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
++{
++ riscv_extend_comparands (code, &op0, &op1);
++ op0 = force_reg (word_mode, op0);
++
++ if (code == EQ || code == NE)
++ {
++ rtx zie = riscv_zero_if_equal (op0, op1);
++ riscv_emit_binary (code, target, zie, const0_rtx);
++ }
++ else
++ riscv_emit_int_order_test (code, 0, target, op0, op1);
++}
++
++/* Like riscv_expand_int_scc, but for floating-point comparisons. */
++
++void
++riscv_expand_float_scc (rtx target, enum rtx_code code, rtx op0, rtx op1)
++{
++ riscv_emit_float_compare (&code, &op0, &op1);
++
++ rtx cmp = riscv_force_binary (word_mode, code, op0, op1);
++ riscv_emit_set (target, lowpart_subreg (SImode, cmp, word_mode));
++}
++
++/* Jump to LABEL if (CODE OP0 OP1) holds. */
++
++void
++riscv_expand_conditional_branch (rtx label, rtx_code code, rtx op0, rtx op1)
++{
++ if (FLOAT_MODE_P (GET_MODE (op1)))
++ riscv_emit_float_compare (&code, &op0, &op1);
++ else
++ riscv_emit_int_compare (&code, &op0, &op1);
++
++ rtx condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
++ emit_jump_insn (gen_condjump (condition, label));
++}
++
++/* Implement TARGET_FUNCTION_ARG_BOUNDARY. Every parameter gets at
++ least PARM_BOUNDARY bits of alignment, but will be given anything up
++ to STACK_BOUNDARY bits if the type requires it. */
++
++static unsigned int
++riscv_function_arg_boundary (enum machine_mode mode, const_tree type)
++{
++ unsigned int alignment;
++
++ /* Use natural alignment if the type is not aggregate data. */
++ if (type && !AGGREGATE_TYPE_P (type))
++ alignment = TYPE_ALIGN (TYPE_MAIN_VARIANT (type));
++ else
++ alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
++
++ return MIN (STACK_BOUNDARY, MAX (PARM_BOUNDARY, alignment));
++}
++
++/* If MODE represents an argument that can be passed or returned in
++ floating-point registers, return the number of registers, else 0. */
++
++static unsigned
++riscv_pass_mode_in_fpr_p (enum machine_mode mode)
++{
++ if (GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FP_ARG)
++ {
++ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
++ return 1;
++
++ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
++ return 2;
++ }
++
++ return 0;
++}
++
++typedef struct {
++ const_tree type;
++ HOST_WIDE_INT offset;
++} riscv_aggregate_field;
++
++/* Identify subfields of aggregates that are candidates for passing in
++ floating-point registers. */
++
++static int
++riscv_flatten_aggregate_field (const_tree type,
++ riscv_aggregate_field fields[2],
++ int n, HOST_WIDE_INT offset)
++{
++ switch (TREE_CODE (type))
++ {
++ case RECORD_TYPE:
++ /* Can't handle incomplete types nor sizes that are not fixed. */
++ if (!COMPLETE_TYPE_P (type)
++ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
++ || !tree_fits_uhwi_p (TYPE_SIZE (type)))
++ return -1;
++
++ for (tree f = TYPE_FIELDS (type); f; f = DECL_CHAIN (f))
++ if (TREE_CODE (f) == FIELD_DECL)
++ {
++ if (!TYPE_P (TREE_TYPE (f)))
++ return -1;
++
++ HOST_WIDE_INT pos = offset + int_byte_position (f);
++ n = riscv_flatten_aggregate_field (TREE_TYPE (f), fields, n, pos);
++ if (n < 0)
++ return -1;
++ }
++ return n;
++
++ case ARRAY_TYPE:
++ {
++ HOST_WIDE_INT n_elts;
++ riscv_aggregate_field subfields[2];
++ tree index = TYPE_DOMAIN (type);
++ tree elt_size = TYPE_SIZE_UNIT (TREE_TYPE (type));
++ int n_subfields = riscv_flatten_aggregate_field (TREE_TYPE (type),
++ subfields, 0, offset);
++
++ /* Can't handle incomplete types nor sizes that are not fixed. */
++ if (n_subfields <= 0
++ || !COMPLETE_TYPE_P (type)
++ || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
++ || !index
++ || !TYPE_MAX_VALUE (index)
++ || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
++ || !TYPE_MIN_VALUE (index)
++ || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
++ || !tree_fits_uhwi_p (elt_size))
++ return -1;
++
++ n_elts = 1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
++ - tree_to_uhwi (TYPE_MIN_VALUE (index));
++ gcc_assert (n_elts >= 0);
++
++ for (HOST_WIDE_INT i = 0; i < n_elts; i++)
++ for (int j = 0; j < n_subfields; j++)
++ {
++ if (n >= 2)
++ return -1;
++
++ fields[n] = subfields[j];
++ fields[n++].offset += i * tree_to_uhwi (elt_size);
++ }
++
++ return n;
++ }
++
++ case COMPLEX_TYPE:
++ {
++ /* Complex type need consume 2 field, so n must be 0. */
++ if (n != 0)
++ return -1;
++
++ HOST_WIDE_INT elt_size = GET_MODE_SIZE (TYPE_MODE (TREE_TYPE (type)));
++
++ if (elt_size <= UNITS_PER_FP_ARG)
++ {
++ fields[0].type = TREE_TYPE (type);
++ fields[0].offset = offset;
++ fields[1].type = TREE_TYPE (type);
++ fields[1].offset = offset + elt_size;
++
++ return 2;
++ }
++
++ return -1;
++ }
++
++ default:
++ if (n < 2
++ && ((SCALAR_FLOAT_TYPE_P (type)
++ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FP_ARG)
++ || (INTEGRAL_TYPE_P (type)
++ && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_WORD)))
++ {
++ fields[n].type = type;
++ fields[n].offset = offset;
++ return n + 1;
++ }
++ else
++ return -1;
++ }
++}
++
++/* Identify candidate aggregates for passing in floating-point registers.
++ Candidates have at most two fields after flattening. */
++
++static int
++riscv_flatten_aggregate_argument (const_tree type,
++ riscv_aggregate_field fields[2])
++{
++ if (!type || TREE_CODE (type) != RECORD_TYPE)
++ return -1;
++
++ return riscv_flatten_aggregate_field (type, fields, 0, 0);
++}
++
++/* See whether TYPE is a record whose fields should be returned in one or
++ two floating-point registers. If so, populate FIELDS accordingly. */
++
++static unsigned
++riscv_pass_aggregate_in_fpr_pair_p (const_tree type,
++ riscv_aggregate_field fields[2])
++{
++ int n = riscv_flatten_aggregate_argument (type, fields);
++
++ for (int i = 0; i < n; i++)
++ if (!SCALAR_FLOAT_TYPE_P (fields[i].type))
++ return 0;
++
++ return n > 0 ? n : 0;
++}
++
++/* See whether TYPE is a record whose fields should be returned in one or
++ floating-point register and one integer register. If so, populate
++ FIELDS accordingly. */
++
++static bool
++riscv_pass_aggregate_in_fpr_and_gpr_p (const_tree type,
++ riscv_aggregate_field fields[2])
++{
++ unsigned num_int = 0, num_float = 0;
++ int n = riscv_flatten_aggregate_argument (type, fields);
++
++ for (int i = 0; i < n; i++)
++ {
++ num_float += SCALAR_FLOAT_TYPE_P (fields[i].type);
++ num_int += INTEGRAL_TYPE_P (fields[i].type);
++ }
++
++ return num_int == 1 && num_float == 1;
++}
++
++/* Return the representation of an argument passed or returned in an FPR
++ when the value has mode VALUE_MODE and the type has TYPE_MODE. The
++ two modes may be different for structures like:
++
++ struct __attribute__((packed)) foo { float f; }
++
++ where the SFmode value "f" is passed in REGNO but the struct itself
++ has mode BLKmode. */
++
++static rtx
++riscv_pass_fpr_single (enum machine_mode type_mode, unsigned regno,
++ enum machine_mode value_mode)
++{
++ rtx x = gen_rtx_REG (value_mode, regno);
++
++ if (type_mode != value_mode)
++ {
++ x = gen_rtx_EXPR_LIST (VOIDmode, x, const0_rtx);
++ x = gen_rtx_PARALLEL (type_mode, gen_rtvec (1, x));
++ }
++ return x;
++}
++
++/* Pass or return a composite value in the FPR pair REGNO and REGNO + 1.
++ MODE is the mode of the composite. MODE1 and OFFSET1 are the mode and
++ byte offset for the first value, likewise MODE2 and OFFSET2 for the
++ second value. */
++
++static rtx
++riscv_pass_fpr_pair (enum machine_mode mode, unsigned regno1,
++ enum machine_mode mode1, HOST_WIDE_INT offset1,
++ unsigned regno2, enum machine_mode mode2,
++ HOST_WIDE_INT offset2)
++{
++ return gen_rtx_PARALLEL
++ (mode,
++ gen_rtvec (2,
++ gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (mode1, regno1),
++ GEN_INT (offset1)),
++ gen_rtx_EXPR_LIST (VOIDmode,
++ gen_rtx_REG (mode2, regno2),
++ GEN_INT (offset2))));
++}
++
++/* Fill INFO with information about a single argument, and return an
++ RTL pattern to pass or return the argument. CUM is the cumulative
++ state for earlier arguments. MODE is the mode of this argument and
++ TYPE is its type (if known). NAMED is true if this is a named
++ (fixed) argument rather than a variable one. RETURN_P is true if
++ returning the argument, or false if passing the argument. */
++
++static rtx
++riscv_get_arg_info (struct riscv_arg_info *info, const CUMULATIVE_ARGS *cum,
++ enum machine_mode mode, const_tree type, bool named,
++ bool return_p)
++{
++ unsigned num_bytes, num_words;
++ unsigned fpr_base = return_p ? FP_RETURN : FP_ARG_FIRST;
++ unsigned gpr_base = return_p ? GP_RETURN : GP_ARG_FIRST;
++ unsigned alignment = riscv_function_arg_boundary (mode, type);
++
++ memset (info, 0, sizeof (*info));
++ info->gpr_offset = cum->num_gprs;
++ info->fpr_offset = cum->num_fprs;
++
++ if (named)
++ {
++ riscv_aggregate_field fields[2];
++ unsigned fregno = fpr_base + info->fpr_offset;
++ unsigned gregno = gpr_base + info->gpr_offset;
++
++ /* Pass one- or two-element floating-point aggregates in FPRs. */
++ if ((info->num_fprs = riscv_pass_aggregate_in_fpr_pair_p (type, fields))
++ && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
++ switch (info->num_fprs)
++ {
++ case 1:
++ return riscv_pass_fpr_single (mode, fregno,
++ TYPE_MODE (fields[0].type));
++
++ case 2:
++ return riscv_pass_fpr_pair (mode, fregno,
++ TYPE_MODE (fields[0].type),
++ fields[0].offset,
++ fregno + 1,
++ TYPE_MODE (fields[1].type),
++ fields[1].offset);
++
++ default:
++ gcc_unreachable ();
++ }
++
++ /* Pass real and complex floating-point numbers in FPRs. */
++ if ((info->num_fprs = riscv_pass_mode_in_fpr_p (mode))
++ && info->fpr_offset + info->num_fprs <= MAX_ARGS_IN_REGISTERS)
++ switch (GET_MODE_CLASS (mode))
++ {
++ case MODE_FLOAT:
++ return gen_rtx_REG (mode, fregno);
++
++ case MODE_COMPLEX_FLOAT:
++ return riscv_pass_fpr_pair (mode, fregno, GET_MODE_INNER (mode), 0,
++ fregno + 1, GET_MODE_INNER (mode),
++ GET_MODE_UNIT_SIZE (mode));
++
++ default:
++ gcc_unreachable ();
++ }
++
++ /* Pass structs with one float and one integer in an FPR and a GPR. */
++ if (riscv_pass_aggregate_in_fpr_and_gpr_p (type, fields)
++ && info->gpr_offset < MAX_ARGS_IN_REGISTERS
++ && info->fpr_offset < MAX_ARGS_IN_REGISTERS)
++ {
++ info->num_gprs = 1;
++ info->num_fprs = 1;
++
++ if (!SCALAR_FLOAT_TYPE_P (fields[0].type))
++ std::swap (fregno, gregno);
++
++ return riscv_pass_fpr_pair (mode, fregno, TYPE_MODE (fields[0].type),
++ fields[0].offset,
++ gregno, TYPE_MODE (fields[1].type),
++ fields[1].offset);
++ }
++ }
++
++ /* Work out the size of the argument. */
++ num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
++ num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++
++ /* Doubleword-aligned varargs start on an even register boundary. */
++ if (!named && num_bytes != 0 && alignment > BITS_PER_WORD)
++ info->gpr_offset += info->gpr_offset & 1;
++
++ /* Partition the argument between registers and stack. */
++ info->num_fprs = 0;
++ info->num_gprs = MIN (num_words, MAX_ARGS_IN_REGISTERS - info->gpr_offset);
++ info->stack_p = (num_words - info->num_gprs) != 0;
++
++ if (info->num_gprs || return_p)
++ return gen_rtx_REG (mode, gpr_base + info->gpr_offset);
++
++ return NULL_RTX;
++}
++
++/* Implement TARGET_FUNCTION_ARG. */
++
++static rtx
++riscv_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++ struct riscv_arg_info info;
++
++ if (mode == VOIDmode)
++ return NULL;
++
++ return riscv_get_arg_info (&info, cum, mode, type, named, false);
++}
++
++/* Implement TARGET_FUNCTION_ARG_ADVANCE. */
++
++static void
++riscv_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++ struct riscv_arg_info info;
++
++ riscv_get_arg_info (&info, cum, mode, type, named, false);
++
++ /* Advance the register count. This has the effect of setting
++ num_gprs to MAX_ARGS_IN_REGISTERS if a doubleword-aligned
++ argument required us to skip the final GPR and pass the whole
++ argument on the stack. */
++ cum->num_fprs = info.fpr_offset + info.num_fprs;
++ cum->num_gprs = info.gpr_offset + info.num_gprs;
++}
++
++/* Implement TARGET_ARG_PARTIAL_BYTES. */
++
++static int
++riscv_arg_partial_bytes (cumulative_args_t cum,
++ enum machine_mode mode, tree type, bool named)
++{
++ struct riscv_arg_info arg;
++
++ riscv_get_arg_info (&arg, get_cumulative_args (cum), mode, type, named, false);
++ return arg.stack_p ? arg.num_gprs * UNITS_PER_WORD : 0;
++}
++
++/* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
++ VALTYPE is the return type and MODE is VOIDmode. For libcalls,
++ VALTYPE is null and MODE is the mode of the return value. */
++
++rtx
++riscv_function_value (const_tree type, const_tree func, enum machine_mode mode)
++{
++ struct riscv_arg_info info;
++ CUMULATIVE_ARGS args;
++
++ if (type)
++ {
++ int unsigned_p = TYPE_UNSIGNED (type);
++
++ mode = TYPE_MODE (type);
++
++ /* Since TARGET_PROMOTE_FUNCTION_MODE unconditionally promotes,
++ return values, promote the mode here too. */
++ mode = promote_function_mode (type, mode, &unsigned_p, func, 1);
++ }
++
++ memset (&args, 0, sizeof args);
++ return riscv_get_arg_info (&info, &args, mode, type, true, true);
++}
++
++/* Implement TARGET_PASS_BY_REFERENCE. */
++
++static bool
++riscv_pass_by_reference (cumulative_args_t cum_v, enum machine_mode mode,
++ const_tree type, bool named)
++{
++ HOST_WIDE_INT size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
++ struct riscv_arg_info info;
++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
++
++ /* ??? std_gimplify_va_arg_expr passes NULL for cum. Fortunately, we
++ never pass variadic arguments in floating-point registers, so we can
++ avoid the call to riscv_get_arg_info in this case. */
++ if (cum != NULL)
++ {
++ /* Don't pass by reference if we can use a floating-point register. */
++ riscv_get_arg_info (&info, cum, mode, type, named, false);
++ if (info.num_fprs)
++ return false;
++ }
++
++ /* Pass by reference if the data do not fit in two integer registers. */
++ return !IN_RANGE (size, 0, 2 * UNITS_PER_WORD);
++}
++
++/* Implement TARGET_RETURN_IN_MEMORY. */
++
++static bool
++riscv_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
++{
++ CUMULATIVE_ARGS args;
++ cumulative_args_t cum = pack_cumulative_args (&args);
++
++ /* The rules for returning in memory are the same as for passing the
++ first named argument by reference. */
++ memset (&args, 0, sizeof args);
++ return riscv_pass_by_reference (cum, TYPE_MODE (type), type, true);
++}
++
++/* Implement TARGET_SETUP_INCOMING_VARARGS. */
++
++static void
++riscv_setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
++ tree type, int *pretend_size ATTRIBUTE_UNUSED,
++ int no_rtl)
++{
++ CUMULATIVE_ARGS local_cum;
++ int gp_saved;
++
++ /* The caller has advanced CUM up to, but not beyond, the last named
++ argument. Advance a local copy of CUM past the last "real" named
++ argument, to find out how many registers are left over. */
++ local_cum = *get_cumulative_args (cum);
++ riscv_function_arg_advance (pack_cumulative_args (&local_cum), mode, type, 1);
++
++ /* Found out how many registers we need to save. */
++ gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
++
++ if (!no_rtl && gp_saved > 0)
++ {
++ rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
++ REG_PARM_STACK_SPACE (cfun->decl)
++ - gp_saved * UNITS_PER_WORD);
++ rtx mem = gen_frame_mem (BLKmode, ptr);
++ set_mem_alias_set (mem, get_varargs_alias_set ());
++
++ move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
++ mem, gp_saved);
++ }
++ if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
++ cfun->machine->varargs_size = gp_saved * UNITS_PER_WORD;
++}
++
++/* Implement TARGET_EXPAND_BUILTIN_VA_START. */
++
++static void
++riscv_va_start (tree valist, rtx nextarg)
++{
++ nextarg = plus_constant (Pmode, nextarg, -cfun->machine->varargs_size);
++ std_expand_builtin_va_start (valist, nextarg);
++}
++
++/* Make ADDR suitable for use as a call or sibcall target. */
++
++rtx
++riscv_legitimize_call_address (rtx addr)
++{
++ if (!call_insn_operand (addr, VOIDmode))
++ {
++ rtx reg = RISCV_PROLOGUE_TEMP (Pmode);
++ riscv_emit_move (reg, addr);
++ return reg;
++ }
++ return addr;
++}
++
++/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
++ in context CONTEXT. HI_RELOC indicates a high-part reloc. */
++
++static void
++riscv_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
++{
++ const char *reloc;
++
++ switch (riscv_classify_symbolic_expression (op))
++ {
++ case SYMBOL_ABSOLUTE:
++ reloc = hi_reloc ? "%hi" : "%lo";
++ break;
++
++ case SYMBOL_PCREL:
++ reloc = hi_reloc ? "%pcrel_hi" : "%pcrel_lo";
++ break;
++
++ case SYMBOL_TLS_LE:
++ reloc = hi_reloc ? "%tprel_hi" : "%tprel_lo";
++ break;
++
++ default:
++ gcc_unreachable ();
++ }
++
++ fprintf (file, "%s(", reloc);
++ output_addr_const (file, riscv_strip_unspec_address (op));
++ fputc (')', file);
++}
++
++/* Return true if the .AQ suffix should be added to an AMO to implement the
++ acquire portion of memory model MODEL. */
++
++static bool
++riscv_memmodel_needs_amo_acquire (enum memmodel model)
++{
++ switch (model)
++ {
++ case MEMMODEL_ACQ_REL:
++ case MEMMODEL_SEQ_CST:
++ case MEMMODEL_SYNC_SEQ_CST:
++ case MEMMODEL_ACQUIRE:
++ case MEMMODEL_CONSUME:
++ case MEMMODEL_SYNC_ACQUIRE:
++ return true;
++
++ case MEMMODEL_RELEASE:
++ case MEMMODEL_SYNC_RELEASE:
++ case MEMMODEL_RELAXED:
++ return false;
++
++ default:
++ gcc_unreachable ();
++ }
++}
++
++/* Return true if a FENCE should be emitted to before a memory access to
++ implement the release portion of memory model MODEL. */
++
++static bool
++riscv_memmodel_needs_release_fence (enum memmodel model)
++{
++ switch (model)
++ {
++ case MEMMODEL_ACQ_REL:
++ case MEMMODEL_SEQ_CST:
++ case MEMMODEL_SYNC_SEQ_CST:
++ case MEMMODEL_RELEASE:
++ case MEMMODEL_SYNC_RELEASE:
++ return true;
++
++ case MEMMODEL_ACQUIRE:
++ case MEMMODEL_CONSUME:
++ case MEMMODEL_SYNC_ACQUIRE:
++ case MEMMODEL_RELAXED:
++ return false;
++
++ default:
++ gcc_unreachable ();
++ }
++}
++
++/* Implement TARGET_PRINT_OPERAND. The RISCV-specific operand codes are:
++
++ 'h' Print the high-part relocation associated with OP, after stripping
++ any outermost HIGH.
++ 'R' Print the low-part relocation associated with OP.
++ 'C' Print the integer branch condition for comparison OP.
++ 'A' Print the atomic operation suffix for memory model OP.
++ 'F' Print a FENCE if the memory model requires a release.
++ 'z' Print x0 if OP is zero, otherwise print OP normally. */
++
++static void
++riscv_print_operand (FILE *file, rtx op, int letter)
++{
++ enum machine_mode mode = GET_MODE (op);
++ enum rtx_code code = GET_CODE (op);
++
++ switch (letter)
++ {
++ case 'h':
++ if (code == HIGH)
++ op = XEXP (op, 0);
++ riscv_print_operand_reloc (file, op, true);
++ break;
++
++ case 'R':
++ riscv_print_operand_reloc (file, op, false);
++ break;
++
++ case 'C':
++ /* The RTL names match the instruction names. */
++ fputs (GET_RTX_NAME (code), file);
++ break;
++
++ case 'A':
++ if (riscv_memmodel_needs_amo_acquire ((enum memmodel) INTVAL (op)))
++ fputs (".aq", file);
++ break;
++
++ case 'F':
++ if (riscv_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
++ fputs ("fence rw,w; ", file);
++ break;
++
++ default:
++ switch (code)
++ {
++ case REG:
++ if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ fprintf (file, "%s", reg_names[REGNO (op)]);
++ break;
++
++ case MEM:
++ if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ else
++ output_address (mode, XEXP (op, 0));
++ break;
++
++ default:
++ if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
++ fputs (reg_names[GP_REG_FIRST], file);
++ else if (letter && letter != 'z')
++ output_operand_lossage ("invalid use of '%%%c'", letter);
++ else
++ output_addr_const (file, riscv_strip_unspec_address (op));
++ break;
++ }
++ }
++}
++
++/* Implement TARGET_PRINT_OPERAND_ADDRESS. */
++
++static void
++riscv_print_operand_address (FILE *file, machine_mode mode ATTRIBUTE_UNUSED, rtx x)
++{
++ struct riscv_address_info addr;
++
++ if (riscv_classify_address (&addr, x, word_mode, true))
++ switch (addr.type)
++ {
++ case ADDRESS_REG:
++ riscv_print_operand (file, addr.offset, 0);
++ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++ return;
++
++ case ADDRESS_LO_SUM:
++ riscv_print_operand_reloc (file, addr.offset, false);
++ fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
++ return;
++
++ case ADDRESS_CONST_INT:
++ output_addr_const (file, x);
++ fprintf (file, "(%s)", reg_names[GP_REG_FIRST]);
++ return;
++
++ case ADDRESS_SYMBOLIC:
++ output_addr_const (file, riscv_strip_unspec_address (x));
++ return;
++ }
++ gcc_unreachable ();
++}
++
++static bool
++riscv_size_ok_for_small_data_p (int size)
++{
++ return g_switch_value && IN_RANGE (size, 1, g_switch_value);
++}
++
++/* Return true if EXP should be placed in the small data section. */
++
++static bool
++riscv_in_small_data_p (const_tree x)
++{
++ if (TREE_CODE (x) == STRING_CST || TREE_CODE (x) == FUNCTION_DECL)
++ return false;
++
++ if (TREE_CODE (x) == VAR_DECL && DECL_SECTION_NAME (x))
++ {
++ const char *sec = DECL_SECTION_NAME (x);
++ return strcmp (sec, ".sdata") == 0 || strcmp (sec, ".sbss") == 0;
++ }
++
++ return riscv_size_ok_for_small_data_p (int_size_in_bytes (TREE_TYPE (x)));
++}
++
++/* Return a section for X, handling small data. */
++
++static section *
++riscv_elf_select_rtx_section (enum machine_mode mode, rtx x,
++ unsigned HOST_WIDE_INT align)
++{
++ section *s = default_elf_select_rtx_section (mode, x, align);
++
++ if (riscv_size_ok_for_small_data_p (GET_MODE_SIZE (mode)))
++ {
++ if (strncmp (s->named.name, ".rodata.cst", strlen (".rodata.cst")) == 0)
++ {
++ /* Rename .rodata.cst* to .srodata.cst*. */
++ char *name = (char *) alloca (strlen (s->named.name) + 2);
++ sprintf (name, ".s%s", s->named.name + 1);
++ return get_section (name, s->named.common.flags, NULL);
++ }
++
++ if (s == data_section)
++ return sdata_section;
++ }
++
++ return s;
++}
++
++/* Make the last instruction frame-related and note that it performs
++ the operation described by FRAME_PATTERN. */
++
++static void
++riscv_set_frame_expr (rtx frame_pattern)
++{
++ rtx insn;
++
++ insn = get_last_insn ();
++ RTX_FRAME_RELATED_P (insn) = 1;
++ REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
++ frame_pattern,
++ REG_NOTES (insn));
++}
++
++/* Return a frame-related rtx that stores REG at MEM.
++ REG must be a single register. */
++
++static rtx
++riscv_frame_set (rtx mem, rtx reg)
++{
++ rtx set = gen_rtx_SET (mem, reg);
++ RTX_FRAME_RELATED_P (set) = 1;
++ return set;
++}
++
++/* Return true if the current function must save register REGNO. */
++
++static bool
++riscv_save_reg_p (unsigned int regno)
++{
++ bool call_saved = !global_regs[regno] && !call_used_regs[regno];
++ bool might_clobber = crtl->saves_all_registers
++ || df_regs_ever_live_p (regno);
++
++ if (call_saved && might_clobber)
++ return true;
++
++ if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
++ return true;
++
++ if (regno == RETURN_ADDR_REGNUM && crtl->calls_eh_return)
++ return true;
++
++ return false;
++}
++
++/* Determine whether to call GPR save/restore routines. */
++static bool
++riscv_use_save_libcall (const struct riscv_frame_info *frame)
++{
++ if (!TARGET_SAVE_RESTORE || crtl->calls_eh_return || frame_pointer_needed)
++ return false;
++
++ return frame->save_libcall_adjustment != 0;
++}
++
++/* Determine which GPR save/restore routine to call. */
++
++static unsigned
++riscv_save_libcall_count (unsigned mask)
++{
++ for (unsigned n = GP_REG_LAST; n > GP_REG_FIRST; n--)
++ if (BITSET_P (mask, n))
++ return CALLEE_SAVED_REG_NUMBER (n) + 1;
++ abort ();
++}
++
++/* Populate the current function's riscv_frame_info structure.
++
++ RISC-V stack frames grown downward. High addresses are at the top.
++
++ +-------------------------------+
++ | |
++ | incoming stack arguments |
++ | |
++ +-------------------------------+ <-- incoming stack pointer
++ | |
++ | callee-allocated save area |
++ | for arguments that are |
++ | split between registers and |
++ | the stack |
++ | |
++ +-------------------------------+ <-- arg_pointer_rtx
++ | |
++ | callee-allocated save area |
++ | for register varargs |
++ | |
++ +-------------------------------+ <-- hard_frame_pointer_rtx;
++ | | stack_pointer_rtx + gp_sp_offset
++ | GPR save area | + UNITS_PER_WORD
++ | |
++ +-------------------------------+ <-- stack_pointer_rtx + fp_sp_offset
++ | | + UNITS_PER_HWVALUE
++ | FPR save area |
++ | |
++ +-------------------------------+ <-- frame_pointer_rtx (virtual)
++ | |
++ | local variables |
++ | |
++ P +-------------------------------+
++ | |
++ | outgoing stack arguments |
++ | |
++ +-------------------------------+ <-- stack_pointer_rtx
++
++ Dynamic stack allocations such as alloca insert data at point P.
++ They decrease stack_pointer_rtx but leave frame_pointer_rtx and
++ hard_frame_pointer_rtx unchanged. */
++
++static void
++riscv_compute_frame_info (void)
++{
++ struct riscv_frame_info *frame;
++ HOST_WIDE_INT offset;
++ unsigned int regno, i, num_x_saved = 0, num_f_saved = 0;
++
++ frame = &cfun->machine->frame;
++ memset (frame, 0, sizeof (*frame));
++
++ /* Find out which GPRs we need to save. */
++ for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++ if (riscv_save_reg_p (regno))
++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
++
++ /* If this function calls eh_return, we must also save and restore the
++ EH data registers. */
++ if (crtl->calls_eh_return)
++ for (i = 0; (regno = EH_RETURN_DATA_REGNO (i)) != INVALID_REGNUM; i++)
++ frame->mask |= 1 << (regno - GP_REG_FIRST), num_x_saved++;
++
++ /* Find out which FPRs we need to save. This loop must iterate over
++ the same space as its companion in riscv_for_each_saved_reg. */
++ if (TARGET_HARD_FLOAT)
++ for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ if (riscv_save_reg_p (regno))
++ frame->fmask |= 1 << (regno - FP_REG_FIRST), num_f_saved++;
++
++ /* At the bottom of the frame are any outgoing stack arguments. */
++ offset = crtl->outgoing_args_size;
++ /* Next are local stack variables. */
++ offset += RISCV_STACK_ALIGN (get_frame_size ());
++ /* The virtual frame pointer points above the local variables. */
++ frame->frame_pointer_offset = offset;
++ /* Next are the callee-saved FPRs. */
++ if (frame->fmask)
++ offset += RISCV_STACK_ALIGN (num_f_saved * UNITS_PER_FP_REG);
++ frame->fp_sp_offset = offset - UNITS_PER_FP_REG;
++ /* Next are the callee-saved GPRs. */
++ if (frame->mask)
++ {
++ unsigned x_save_size = RISCV_STACK_ALIGN (num_x_saved * UNITS_PER_WORD);
++ unsigned num_save_restore = 1 + riscv_save_libcall_count (frame->mask);
++
++ /* Only use save/restore routines if they don't alter the stack size. */
++ if (RISCV_STACK_ALIGN (num_save_restore * UNITS_PER_WORD) == x_save_size)
++ frame->save_libcall_adjustment = x_save_size;
++
++ offset += x_save_size;
++ }
++ frame->gp_sp_offset = offset - UNITS_PER_WORD;
++ /* The hard frame pointer points above the callee-saved GPRs. */
++ frame->hard_frame_pointer_offset = offset;
++ /* Above the hard frame pointer is the callee-allocated varags save area. */
++ offset += RISCV_STACK_ALIGN (cfun->machine->varargs_size);
++ frame->arg_pointer_offset = offset;
++ /* Next is the callee-allocated area for pretend stack arguments. */
++ offset += crtl->args.pretend_args_size;
++ frame->total_size = offset;
++ /* Next points the incoming stack pointer and any incoming arguments. */
++
++ /* Only use save/restore routines when the GPRs are atop the frame. */
++ if (frame->hard_frame_pointer_offset != frame->total_size)
++ frame->save_libcall_adjustment = 0;
++}
++
++/* Make sure that we're not trying to eliminate to the wrong hard frame
++ pointer. */
++
++static bool
++riscv_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
++{
++ return (to == HARD_FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
++}
++
++/* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame pointer
++ or argument pointer. TO is either the stack pointer or hard frame
++ pointer. */
++
++HOST_WIDE_INT
++riscv_initial_elimination_offset (int from, int to)
++{
++ HOST_WIDE_INT src, dest;
++
++ riscv_compute_frame_info ();
++
++ if (to == HARD_FRAME_POINTER_REGNUM)
++ dest = cfun->machine->frame.hard_frame_pointer_offset;
++ else if (to == STACK_POINTER_REGNUM)
++ dest = 0; /* The stack pointer is the base of all offsets, hence 0. */
++ else
++ gcc_unreachable ();
++
++ if (from == FRAME_POINTER_REGNUM)
++ src = cfun->machine->frame.frame_pointer_offset;
++ else if (from == ARG_POINTER_REGNUM)
++ src = cfun->machine->frame.arg_pointer_offset;
++ else
++ gcc_unreachable ();
++
++ return src - dest;
++}
++
++/* Implement RETURN_ADDR_RTX. We do not support moving back to a
++ previous frame. */
++
++rtx
++riscv_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
++{
++ if (count != 0)
++ return const0_rtx;
++
++ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM);
++}
++
++/* Emit code to change the current function's return address to
++ ADDRESS. SCRATCH is available as a scratch register, if needed.
++ ADDRESS and SCRATCH are both word-mode GPRs. */
++
++void
++riscv_set_return_address (rtx address, rtx scratch)
++{
++ rtx slot_address;
++
++ gcc_assert (BITSET_P (cfun->machine->frame.mask, RETURN_ADDR_REGNUM));
++ slot_address = riscv_add_offset (scratch, stack_pointer_rtx,
++ cfun->machine->frame.gp_sp_offset);
++ riscv_emit_move (gen_frame_mem (GET_MODE (address), slot_address), address);
++}
++
++/* A function to save or store a register. The first argument is the
++ register and the second is the stack slot. */
++typedef void (*riscv_save_restore_fn) (rtx, rtx);
++
++/* Use FN to save or restore register REGNO. MODE is the register's
++ mode and OFFSET is the offset of its save slot from the current
++ stack pointer. */
++
++static void
++riscv_save_restore_reg (enum machine_mode mode, int regno,
++ HOST_WIDE_INT offset, riscv_save_restore_fn fn)
++{
++ rtx mem;
++
++ mem = gen_frame_mem (mode, plus_constant (Pmode, stack_pointer_rtx, offset));
++ fn (gen_rtx_REG (mode, regno), mem);
++}
++
++/* Call FN for each register that is saved by the current function.
++ SP_OFFSET is the offset of the current stack pointer from the start
++ of the frame. */
++
++static void
++riscv_for_each_saved_reg (HOST_WIDE_INT sp_offset, riscv_save_restore_fn fn)
++{
++ HOST_WIDE_INT offset;
++
++ /* Save the link register and s-registers. */
++ offset = cfun->machine->frame.gp_sp_offset - sp_offset;
++ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++ {
++ riscv_save_restore_reg (word_mode, regno, offset, fn);
++ offset -= UNITS_PER_WORD;
++ }
++
++ /* This loop must iterate over the same space as its companion in
++ riscv_compute_frame_info. */
++ offset = cfun->machine->frame.fp_sp_offset - sp_offset;
++ for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
++ {
++ enum machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
++
++ riscv_save_restore_reg (mode, regno, offset, fn);
++ offset -= GET_MODE_SIZE (mode);
++ }
++}
++
++/* Save register REG to MEM. Make the instruction frame-related. */
++
++static void
++riscv_save_reg (rtx reg, rtx mem)
++{
++ riscv_emit_move (mem, reg);
++ riscv_set_frame_expr (riscv_frame_set (mem, reg));
++}
++
++/* Restore register REG from MEM. */
++
++static void
++riscv_restore_reg (rtx reg, rtx mem)
++{
++ rtx insn = riscv_emit_move (reg, mem);
++ rtx dwarf = NULL_RTX;
++ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
++ REG_NOTES (insn) = dwarf;
++
++ RTX_FRAME_RELATED_P (insn) = 1;
++}
++
++/* Return the code to invoke the GPR save routine. */
++
++const char *
++riscv_output_gpr_save (unsigned mask)
++{
++ static char s[32];
++ unsigned n = riscv_save_libcall_count (mask);
++
++ ssize_t bytes = snprintf (s, sizeof (s), "call\tt0,__riscv_save_%u", n);
++ gcc_assert ((size_t) bytes < sizeof (s));
++
++ return s;
++}
++
++/* For stack frames that can't be allocated with a single ADDI instruction,
++ compute the best value to initially allocate. It must at a minimum
++ allocate enough space to spill the callee-saved registers. */
++
++static HOST_WIDE_INT
++riscv_first_stack_step (struct riscv_frame_info *frame)
++{
++ HOST_WIDE_INT min_first_step = frame->total_size - frame->fp_sp_offset;
++ HOST_WIDE_INT max_first_step = IMM_REACH / 2 - STACK_BOUNDARY / 8;
++
++ if (SMALL_OPERAND (frame->total_size))
++ return frame->total_size;
++
++ /* As an optimization, use the least-significant bits of the total frame
++ size, so that the second adjustment step is just LUI + ADD. */
++ if (!SMALL_OPERAND (frame->total_size - max_first_step)
++ && frame->total_size % IMM_REACH < IMM_REACH / 2
++ && frame->total_size % IMM_REACH >= min_first_step)
++ return frame->total_size % IMM_REACH;
++
++ gcc_assert (min_first_step <= max_first_step);
++ return max_first_step;
++}
++
++static rtx
++riscv_adjust_libcall_cfi_prologue ()
++{
++ rtx dwarf = NULL_RTX;
++ rtx adjust_sp_rtx, reg, mem, insn;
++ int saved_size = cfun->machine->frame.save_libcall_adjustment;
++ int offset;
++
++ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++ {
++ /* The save order is ra, s0, s1, s2 to s11. */
++ if (regno == RETURN_ADDR_REGNUM)
++ offset = saved_size - UNITS_PER_WORD;
++ else if (regno == S0_REGNUM)
++ offset = saved_size - UNITS_PER_WORD * 2;
++ else if (regno == S1_REGNUM)
++ offset = saved_size - UNITS_PER_WORD * 3;
++ else
++ offset = saved_size - ((regno - S2_REGNUM + 4) * UNITS_PER_WORD);
++
++ reg = gen_rtx_REG (SImode, regno);
++ mem = gen_frame_mem (SImode, plus_constant (Pmode,
++ stack_pointer_rtx,
++ offset));
++
++ insn = gen_rtx_SET (mem, reg);
++ dwarf = alloc_reg_note (REG_CFA_OFFSET, insn, dwarf);
++ }
++
++ /* Debug info for adjust sp. */
++ adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx, GEN_INT (-saved_size));
++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
++ dwarf);
++ return dwarf;
++}
++
++static void
++riscv_emit_stack_tie (void)
++{
++ if (Pmode == SImode)
++ emit_insn (gen_stack_tiesi (stack_pointer_rtx, hard_frame_pointer_rtx));
++ else
++ emit_insn (gen_stack_tiedi (stack_pointer_rtx, hard_frame_pointer_rtx));
++}
++
++/* Expand the "prologue" pattern. */
++
++void
++riscv_expand_prologue (void)
++{
++ struct riscv_frame_info *frame = &cfun->machine->frame;
++ HOST_WIDE_INT size = frame->total_size;
++ unsigned mask = frame->mask;
++ rtx insn;
++
++ if (flag_stack_usage_info)
++ current_function_static_stack_size = size;
++
++ /* When optimizing for size, call a subroutine to save the registers. */
++ if (riscv_use_save_libcall (frame))
++ {
++ rtx dwarf = NULL_RTX;
++ dwarf = riscv_adjust_libcall_cfi_prologue ();
++
++ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
++ size -= frame->save_libcall_adjustment;
++ insn = emit_insn (gen_gpr_save (GEN_INT (mask)));
++
++ RTX_FRAME_RELATED_P (insn) = 1;
++ REG_NOTES (insn) = dwarf;
++ }
++
++ /* Save the registers. */
++ if ((frame->mask | frame->fmask) != 0)
++ {
++ HOST_WIDE_INT step1 = MIN (size, riscv_first_stack_step (frame));
++
++ insn = gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ GEN_INT (-step1));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ size -= step1;
++ riscv_for_each_saved_reg (size, riscv_save_reg);
++ }
++
++ frame->mask = mask; /* Undo the above fib. */
++
++ /* Set up the frame pointer, if we're using one. */
++ if (frame_pointer_needed)
++ {
++ insn = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (frame->hard_frame_pointer_offset - size));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++
++ riscv_emit_stack_tie ();
++ }
++
++ /* Allocate the rest of the frame. */
++ if (size > 0)
++ {
++ if (SMALL_OPERAND (-size))
++ {
++ insn = gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (-size));
++ RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
++ }
++ else
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
++ emit_insn (gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx,
++ RISCV_PROLOGUE_TEMP (Pmode)));
++
++ /* Describe the effect of the previous instructions. */
++ insn = plus_constant (Pmode, stack_pointer_rtx, -size);
++ insn = gen_rtx_SET (stack_pointer_rtx, insn);
++ riscv_set_frame_expr (insn);
++ }
++ }
++}
++
++static rtx
++riscv_adjust_libcall_cfi_epilogue ()
++{
++ rtx dwarf = NULL_RTX;
++ rtx adjust_sp_rtx, reg;
++ int saved_size = cfun->machine->frame.save_libcall_adjustment;
++
++ /* Debug info for adjust sp. */
++ adjust_sp_rtx = gen_add3_insn (stack_pointer_rtx,
++ stack_pointer_rtx, GEN_INT (saved_size));
++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, adjust_sp_rtx,
++ dwarf);
++
++ for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST-1; regno++)
++ if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++ {
++ reg = gen_rtx_REG (SImode, regno);
++ dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
++ }
++
++ return dwarf;
++}
++
++/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
++ says which. */
++
++void
++riscv_expand_epilogue (bool sibcall_p)
++{
++ /* Split the frame into two. STEP1 is the amount of stack we should
++ deallocate before restoring the registers. STEP2 is the amount we
++ should deallocate afterwards.
++
++ Start off by assuming that no registers need to be restored. */
++ struct riscv_frame_info *frame = &cfun->machine->frame;
++ unsigned mask = frame->mask;
++ HOST_WIDE_INT step1 = frame->total_size;
++ HOST_WIDE_INT step2 = 0;
++ bool use_restore_libcall = !sibcall_p && riscv_use_save_libcall (frame);
++ rtx ra = gen_rtx_REG (Pmode, RETURN_ADDR_REGNUM);
++ rtx insn;
++
++ /* We need to add memory barrier to prevent read from deallocated stack. */
++ bool need_barrier_p = (get_frame_size ()
++ + cfun->machine->frame.arg_pointer_offset) != 0;
++
++ if (!sibcall_p && riscv_can_use_return_insn ())
++ {
++ emit_jump_insn (gen_return ());
++ return;
++ }
++
++ /* Move past any dynamic stack allocations. */
++ if (cfun->calls_alloca)
++ {
++ /* Emit a barrier to prevent loads from a deallocated stack. */
++ riscv_emit_stack_tie ();
++ need_barrier_p = false;
++
++ rtx adjust = GEN_INT (-frame->hard_frame_pointer_offset);
++ if (!SMALL_OPERAND (INTVAL (adjust)))
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
++ adjust = RISCV_PROLOGUE_TEMP (Pmode);
++ }
++
++ insn = emit_insn (
++ gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx,
++ adjust));
++
++ rtx dwarf = NULL_RTX;
++ rtx cfa_adjust_value = gen_rtx_PLUS (
++ Pmode, hard_frame_pointer_rtx,
++ GEN_INT (-frame->hard_frame_pointer_offset));
++ rtx cfa_adjust_rtx = gen_rtx_SET (stack_pointer_rtx, cfa_adjust_value);
++ dwarf = alloc_reg_note (REG_CFA_ADJUST_CFA, cfa_adjust_rtx, dwarf);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ REG_NOTES (insn) = dwarf;
++ }
++
++ /* If we need to restore registers, deallocate as much stack as
++ possible in the second step without going out of range. */
++ if ((frame->mask | frame->fmask) != 0)
++ {
++ step2 = riscv_first_stack_step (frame);
++ step1 -= step2;
++ }
++
++ /* Set TARGET to BASE + STEP1. */
++ if (step1 > 0)
++ {
++ /* Emit a barrier to prevent loads from a deallocated stack. */
++ riscv_emit_stack_tie ();
++ need_barrier_p = false;
++
++ /* Get an rtx for STEP1 that we can add to BASE. */
++ rtx adjust = GEN_INT (step1);
++ if (!SMALL_OPERAND (step1))
++ {
++ riscv_emit_move (RISCV_PROLOGUE_TEMP (Pmode), adjust);
++ adjust = RISCV_PROLOGUE_TEMP (Pmode);
++ }
++
++ insn = emit_insn (
++ gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, adjust));
++
++ rtx dwarf = NULL_RTX;
++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++ GEN_INT (step2));
++
++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ REG_NOTES (insn) = dwarf;
++ }
++
++ if (use_restore_libcall)
++ frame->mask = 0; /* Temporarily fib that we need not save GPRs. */
++
++ /* Restore the registers. */
++ riscv_for_each_saved_reg (frame->total_size - step2, riscv_restore_reg);
++
++ if (use_restore_libcall)
++ {
++ frame->mask = mask; /* Undo the above fib. */
++ gcc_assert (step2 >= frame->save_libcall_adjustment);
++ step2 -= frame->save_libcall_adjustment;
++ }
++
++ if (need_barrier_p)
++ riscv_emit_stack_tie ();
++
++ /* Deallocate the final bit of the frame. */
++ if (step2 > 0)
++ {
++ insn = emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ GEN_INT (step2)));
++
++ rtx dwarf = NULL_RTX;
++ rtx cfa_adjust_rtx = gen_rtx_PLUS (Pmode, stack_pointer_rtx,
++ const0_rtx);
++ dwarf = alloc_reg_note (REG_CFA_DEF_CFA, cfa_adjust_rtx, dwarf);
++ RTX_FRAME_RELATED_P (insn) = 1;
++
++ REG_NOTES (insn) = dwarf;
++ }
++
++ if (use_restore_libcall)
++ {
++ rtx dwarf = riscv_adjust_libcall_cfi_epilogue ();
++ insn = emit_insn (gen_gpr_restore (GEN_INT (riscv_save_libcall_count (mask))));
++ RTX_FRAME_RELATED_P (insn) = 1;
++ REG_NOTES (insn) = dwarf;
++
++ emit_jump_insn (gen_gpr_restore_return (ra));
++ return;
++ }
++
++ /* Add in the __builtin_eh_return stack adjustment. */
++ if (crtl->calls_eh_return)
++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
++ EH_RETURN_STACKADJ_RTX));
++
++ if (!sibcall_p)
++ emit_jump_insn (gen_simple_return_internal (ra));
++}
++
++/* Return nonzero if this function is known to have a null epilogue.
++ This allows the optimizer to omit jumps to jumps if no stack
++ was created. */
++
++bool
++riscv_can_use_return_insn (void)
++{
++ return reload_completed && cfun->machine->frame.total_size == 0;
++}
++
++/* Implement TARGET_REGISTER_MOVE_COST. */
++
++static int
++riscv_register_move_cost (enum machine_mode mode,
++ reg_class_t from, reg_class_t to)
++{
++ return SECONDARY_MEMORY_NEEDED (from, to, mode) ? 8 : 2;
++}
++
++/* Return true if register REGNO can store a value of mode MODE. */
++
++bool
++riscv_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
++{
++ unsigned int nregs = riscv_hard_regno_nregs (regno, mode);
++
++ if (GP_REG_P (regno))
++ {
++ if (!GP_REG_P (regno + nregs - 1))
++ return false;
++ }
++ else if (FP_REG_P (regno))
++ {
++ if (!FP_REG_P (regno + nregs - 1))
++ return false;
++
++ if (GET_MODE_CLASS (mode) != MODE_FLOAT
++ && GET_MODE_CLASS (mode) != MODE_COMPLEX_FLOAT)
++ return false;
++
++ /* Only use callee-saved registers if a potential callee is guaranteed
++ to spill the requisite width. */
++ if (GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_REG
++ || (!call_used_regs[regno]
++ && GET_MODE_UNIT_SIZE (mode) > UNITS_PER_FP_ARG))
++ return false;
++ }
++ else
++ return false;
++
++ /* Require same callee-savedness for all registers. */
++ for (unsigned i = 1; i < nregs; i++)
++ if (call_used_regs[regno] != call_used_regs[regno + i])
++ return false;
++
++ return true;
++}
++
++/* Implement HARD_REGNO_NREGS. */
++
++unsigned int
++riscv_hard_regno_nregs (int regno, enum machine_mode mode)
++{
++ if (FP_REG_P (regno))
++ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
++
++ /* All other registers are word-sized. */
++ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
++}
++
++/* Implement CLASS_MAX_NREGS. */
++
++static unsigned char
++riscv_class_max_nregs (reg_class_t rclass, enum machine_mode mode)
++{
++ if (reg_class_subset_p (FP_REGS, rclass))
++ return riscv_hard_regno_nregs (FP_REG_FIRST, mode);
++
++ if (reg_class_subset_p (GR_REGS, rclass))
++ return riscv_hard_regno_nregs (GP_REG_FIRST, mode);
++
++ return 0;
++}
++
++/* Implement TARGET_PREFERRED_RELOAD_CLASS. */
++
++static reg_class_t
++riscv_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
++{
++ return reg_class_subset_p (FP_REGS, rclass) ? FP_REGS :
++ reg_class_subset_p (GR_REGS, rclass) ? GR_REGS :
++ rclass;
++}
++
++/* Implement TARGET_MEMORY_MOVE_COST. */
++
++static int
++riscv_memory_move_cost (enum machine_mode mode, reg_class_t rclass, bool in)
++{
++ return (tune_info->memory_cost
++ + memory_move_secondary_cost (mode, rclass, in));
++}
++
++/* Return the number of instructions that can be issued per cycle. */
++
++static int
++riscv_issue_rate (void)
++{
++ return tune_info->issue_rate;
++}
++
++/* Implement TARGET_ASM_FILE_START. */
++
++static void
++riscv_file_start (void)
++{
++ default_file_start ();
++
++ /* Instruct GAS to generate position-[in]dependent code. */
++ fprintf (asm_out_file, "\t.option %spic\n", (flag_pic ? "" : "no"));
++}
++
++/* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
++ in order to avoid duplicating too much logic from elsewhere. */
++
++static void
++riscv_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
++ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
++ tree function)
++{
++ rtx this_rtx, temp1, temp2, fnaddr;
++ rtx_insn *insn;
++
++ /* Pretend to be a post-reload pass while generating rtl. */
++ reload_completed = 1;
++
++ /* Mark the end of the (empty) prologue. */
++ emit_note (NOTE_INSN_PROLOGUE_END);
++
++ /* Determine if we can use a sibcall to call FUNCTION directly. */
++ fnaddr = gen_rtx_MEM (FUNCTION_MODE, XEXP (DECL_RTL (function), 0));
++
++ /* We need two temporary registers in some cases. */
++ temp1 = gen_rtx_REG (Pmode, RISCV_PROLOGUE_TEMP_REGNUM);
++ temp2 = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
++
++ /* Find out which register contains the "this" pointer. */
++ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
++ else
++ this_rtx = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++
++ /* Add DELTA to THIS_RTX. */
++ if (delta != 0)
++ {
++ rtx offset = GEN_INT (delta);
++ if (!SMALL_OPERAND (delta))
++ {
++ riscv_emit_move (temp1, offset);
++ offset = temp1;
++ }
++ emit_insn (gen_add3_insn (this_rtx, this_rtx, offset));
++ }
++
++ /* If needed, add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
++ if (vcall_offset != 0)
++ {
++ rtx addr;
++
++ /* Set TEMP1 to *THIS_RTX. */
++ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, this_rtx));
++
++ /* Set ADDR to a legitimate address for *THIS_RTX + VCALL_OFFSET. */
++ addr = riscv_add_offset (temp2, temp1, vcall_offset);
++
++ /* Load the offset and add it to THIS_RTX. */
++ riscv_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
++ emit_insn (gen_add3_insn (this_rtx, this_rtx, temp1));
++ }
++
++ /* Jump to the target function. */
++ insn = emit_call_insn (gen_sibcall (fnaddr, const0_rtx, NULL, const0_rtx));
++ SIBLING_CALL_P (insn) = 1;
++
++ /* Run just enough of rest_of_compilation. This sequence was
++ "borrowed" from alpha.c. */
++ insn = get_insns ();
++ split_all_insns_noflow ();
++ shorten_branches (insn);
++ final_start_function (insn, file, 1);
++ final (insn, file, 1);
++ final_end_function ();
++
++ /* Clean up the vars set above. Note that final_end_function resets
++ the global pointer for us. */
++ reload_completed = 0;
++}
++
++/* Allocate a chunk of memory for per-function machine-dependent data. */
++
++static struct machine_function *
++riscv_init_machine_status (void)
++{
++ return ggc_cleared_alloc<machine_function> ();
++}
++
++/* Implement TARGET_OPTION_OVERRIDE. */
++
++static void
++riscv_option_override (void)
++{
++ const struct riscv_cpu_info *cpu;
++
++#ifdef SUBTARGET_OVERRIDE_OPTIONS
++ SUBTARGET_OVERRIDE_OPTIONS;
++#endif
++
++ flag_pcc_struct_return = 0;
++
++ if (flag_pic)
++ g_switch_value = 0;
++
++ /* The presence of the M extension implies that division instructions
++ are present, so include them unless explicitly disabled. */
++ if (TARGET_MUL && (target_flags_explicit & MASK_DIV) == 0)
++ target_flags |= MASK_DIV;
++ else if (!TARGET_MUL && TARGET_DIV)
++ error ("-mdiv requires -march to subsume the %<M%> extension");
++
++ /* Likewise floating-point division and square root. */
++ if (TARGET_HARD_FLOAT && (target_flags_explicit & MASK_FDIV) == 0)
++ target_flags |= MASK_FDIV;
++
++ /* Handle -mtune. */
++ cpu = riscv_parse_cpu (riscv_tune_string ? riscv_tune_string :
++ RISCV_TUNE_STRING_DEFAULT);
++ tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
++
++ /* If the user hasn't specified a branch cost, use the processor's
++ default. */
++ if (riscv_branch_cost == 0)
++ riscv_branch_cost = tune_info->branch_cost;
++
++ /* Function to allocate machine-dependent function status. */
++ init_machine_status = &riscv_init_machine_status;
++
++ if (flag_pic)
++ riscv_cmodel = CM_PIC;
++
++ /* We get better code with explicit relocs for CM_MEDLOW, but
++ worse code for the others (for now). Pick the best default. */
++ if ((target_flags_explicit & MASK_EXPLICIT_RELOCS) == 0)
++ if (riscv_cmodel == CM_MEDLOW)
++ target_flags |= MASK_EXPLICIT_RELOCS;
++
++ /* Require that the ISA supports the requested floating-point ABI. */
++ if (UNITS_PER_FP_ARG > (TARGET_HARD_FLOAT ? UNITS_PER_FP_REG : 0))
++ error ("requested ABI requires -march to subsume the %qc extension",
++ UNITS_PER_FP_ARG > 8 ? 'Q' : (UNITS_PER_FP_ARG > 4 ? 'D' : 'F'));
++
++ /* We do not yet support ILP32 on RV64. */
++ if (BITS_PER_WORD != POINTER_SIZE)
++ error ("ABI requires -march=rv%d", POINTER_SIZE);
++}
++
++/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
++
++static void
++riscv_conditional_register_usage (void)
++{
++ if (!TARGET_HARD_FLOAT)
++ {
++ for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++ fixed_regs[regno] = call_used_regs[regno] = 1;
++ }
++}
++
++/* Return a register priority for hard reg REGNO. */
++
++static int
++riscv_register_priority (int regno)
++{
++ /* Favor x8-x15/f8-f15 to improve the odds of RVC instruction selection. */
++ if (TARGET_RVC && (IN_RANGE (regno, GP_REG_FIRST + 8, GP_REG_FIRST + 15)
++ || IN_RANGE (regno, FP_REG_FIRST + 8, FP_REG_FIRST + 15)))
++ return 1;
++
++ return 0;
++}
++
++/* Implement TARGET_TRAMPOLINE_INIT. */
++
++static void
++riscv_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
++{
++ rtx addr, end_addr, mem;
++ uint32_t trampoline[4];
++ unsigned int i;
++ HOST_WIDE_INT static_chain_offset, target_function_offset;
++
++ /* Work out the offsets of the pointers from the start of the
++ trampoline code. */
++ gcc_assert (ARRAY_SIZE (trampoline) * 4 == TRAMPOLINE_CODE_SIZE);
++
++ /* Get pointers to the beginning and end of the code block. */
++ addr = force_reg (Pmode, XEXP (m_tramp, 0));
++ end_addr = riscv_force_binary (Pmode, PLUS, addr,
++ GEN_INT (TRAMPOLINE_CODE_SIZE));
++
++
++ if (Pmode == SImode)
++ {
++ chain_value = force_reg (Pmode, chain_value);
++
++ rtx target_function = force_reg (Pmode, XEXP (DECL_RTL (fndecl), 0));
++ /* lui t2, hi(chain)
++ lui t1, hi(func)
++ addi t2, t2, lo(chain)
++ jr r1, lo(func)
++ */
++ unsigned HOST_WIDE_INT lui_hi_chain_code, lui_hi_func_code;
++ unsigned HOST_WIDE_INT lo_chain_code, lo_func_code;
++
++ rtx uimm_mask = force_reg (SImode, gen_int_mode (-IMM_REACH, SImode));
++
++ /* 0xfff. */
++ rtx imm12_mask = gen_reg_rtx (SImode);
++ emit_insn (gen_one_cmplsi2 (imm12_mask, uimm_mask));
++
++ rtx fixup_value = force_reg (SImode, gen_int_mode (IMM_REACH/2, SImode));
++
++ /* Gen lui t2, hi(chain). */
++ rtx hi_chain = riscv_force_binary (SImode, PLUS, chain_value,
++ fixup_value);
++ hi_chain = riscv_force_binary (SImode, AND, hi_chain,
++ uimm_mask);
++ lui_hi_chain_code = OPCODE_LUI | (STATIC_CHAIN_REGNUM << SHIFT_RD);
++ rtx lui_hi_chain = riscv_force_binary (SImode, IOR, hi_chain,
++ gen_int_mode (lui_hi_chain_code, SImode));
++
++ mem = adjust_address (m_tramp, SImode, 0);
++ riscv_emit_move (mem, lui_hi_chain);
++
++ /* Gen lui t1, hi(func). */
++ rtx hi_func = riscv_force_binary (SImode, PLUS, target_function,
++ fixup_value);
++ hi_func = riscv_force_binary (SImode, AND, hi_func,
++ uimm_mask);
++ lui_hi_func_code = OPCODE_LUI | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD);
++ rtx lui_hi_func = riscv_force_binary (SImode, IOR, hi_func,
++ gen_int_mode (lui_hi_func_code, SImode));
++
++ mem = adjust_address (m_tramp, SImode, 1 * GET_MODE_SIZE (SImode));
++ riscv_emit_move (mem, lui_hi_func);
++
++ /* Gen addi t2, t2, lo(chain). */
++ rtx lo_chain = riscv_force_binary (SImode, AND, chain_value,
++ imm12_mask);
++ lo_chain = riscv_force_binary (SImode, ASHIFT, lo_chain, GEN_INT (20));
++
++ lo_chain_code = OPCODE_ADDI
++ | (STATIC_CHAIN_REGNUM << SHIFT_RD)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RS1);
++
++ rtx addi_lo_chain = riscv_force_binary (SImode, IOR, lo_chain,
++ force_reg (SImode, GEN_INT (lo_chain_code)));
++
++ mem = adjust_address (m_tramp, SImode, 2 * GET_MODE_SIZE (SImode));
++ riscv_emit_move (mem, addi_lo_chain);
++
++ /* Gen jr r1, lo(func). */
++ rtx lo_func = riscv_force_binary (SImode, AND, target_function,
++ imm12_mask);
++ lo_func = riscv_force_binary (SImode, ASHIFT, lo_func, GEN_INT (20));
++
++ lo_func_code = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
++
++ rtx jr_lo_func = riscv_force_binary (SImode, IOR, lo_func,
++ force_reg (SImode, GEN_INT (lo_func_code)));
++
++ mem = adjust_address (m_tramp, SImode, 3 * GET_MODE_SIZE (SImode));
++ riscv_emit_move (mem, jr_lo_func);
++ }
++ else
++ {
++ static_chain_offset = TRAMPOLINE_CODE_SIZE;
++ target_function_offset = static_chain_offset + GET_MODE_SIZE (ptr_mode);
++
++ /* auipc t2, 0
++ l[wd] t1, target_function_offset(t2)
++ l[wd] t2, static_chain_offset(t2)
++ jr t1
++ */
++ trampoline[0] = OPCODE_AUIPC | (STATIC_CHAIN_REGNUM << SHIFT_RD);
++ trampoline[1] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
++ | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RD)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
++ | (target_function_offset << SHIFT_IMM);
++ trampoline[2] = (Pmode == DImode ? OPCODE_LD : OPCODE_LW)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RD)
++ | (STATIC_CHAIN_REGNUM << SHIFT_RS1)
++ | (static_chain_offset << SHIFT_IMM);
++ trampoline[3] = OPCODE_JALR | (RISCV_PROLOGUE_TEMP_REGNUM << SHIFT_RS1);
++
++ /* Copy the trampoline code. */
++ for (i = 0; i < ARRAY_SIZE (trampoline); i++)
++ {
++ mem = adjust_address (m_tramp, SImode, i * GET_MODE_SIZE (SImode));
++ riscv_emit_move (mem, gen_int_mode (trampoline[i], SImode));
++ }
++
++ /* Set up the static chain pointer field. */
++ mem = adjust_address (m_tramp, ptr_mode, static_chain_offset);
++ riscv_emit_move (mem, chain_value);
++
++ /* Set up the target function field. */
++ mem = adjust_address (m_tramp, ptr_mode, target_function_offset);
++ riscv_emit_move (mem, XEXP (DECL_RTL (fndecl), 0));
++ }
++
++ /* Flush the code part of the trampoline. */
++ emit_insn (gen_add3_insn (end_addr, addr, GEN_INT (TRAMPOLINE_SIZE)));
++ emit_insn (gen_clear_cache (addr, end_addr));
++}
++
++/* Return leaf_function_p () and memoize the result. */
++
++static bool
++riscv_leaf_function_p (void)
++{
++ if (cfun->machine->is_leaf == 0)
++ cfun->machine->is_leaf = leaf_function_p () ? 1 : -1;
++
++ return cfun->machine->is_leaf > 0;
++}
++
++/* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
++
++static bool
++riscv_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
++ tree exp ATTRIBUTE_UNUSED)
++{
++ /* When optimzing for size, don't use sibcalls in non-leaf routines */
++ if (TARGET_SAVE_RESTORE)
++ return riscv_leaf_function_p ();
++
++ return true;
++}
++
++/* Implement TARGET_CANNOT_COPY_INSN_P. */
++
++static bool
++riscv_cannot_copy_insn_p (rtx_insn *insn)
++{
++ return recog_memoized (insn) >= 0 && get_attr_cannot_copy (insn);
++}
++
++/* Initialize the GCC target structure. */
++#undef TARGET_ASM_ALIGNED_HI_OP
++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
++#undef TARGET_ASM_ALIGNED_SI_OP
++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
++#undef TARGET_ASM_ALIGNED_DI_OP
++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++
++#undef TARGET_OPTION_OVERRIDE
++#define TARGET_OPTION_OVERRIDE riscv_option_override
++
++#undef TARGET_LEGITIMIZE_ADDRESS
++#define TARGET_LEGITIMIZE_ADDRESS riscv_legitimize_address
++
++#undef TARGET_SCHED_ISSUE_RATE
++#define TARGET_SCHED_ISSUE_RATE riscv_issue_rate
++
++#undef TARGET_FUNCTION_OK_FOR_SIBCALL
++#define TARGET_FUNCTION_OK_FOR_SIBCALL riscv_function_ok_for_sibcall
++
++#undef TARGET_REGISTER_MOVE_COST
++#define TARGET_REGISTER_MOVE_COST riscv_register_move_cost
++#undef TARGET_MEMORY_MOVE_COST
++#define TARGET_MEMORY_MOVE_COST riscv_memory_move_cost
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS riscv_rtx_costs
++#undef TARGET_ADDRESS_COST
++#define TARGET_ADDRESS_COST riscv_address_cost
++
++#undef TARGET_PREFERRED_RELOAD_CLASS
++#define TARGET_PREFERRED_RELOAD_CLASS riscv_preferred_reload_class
++
++#undef TARGET_ASM_FILE_START
++#define TARGET_ASM_FILE_START riscv_file_start
++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
++
++#undef TARGET_EXPAND_BUILTIN_VA_START
++#define TARGET_EXPAND_BUILTIN_VA_START riscv_va_start
++
++#undef TARGET_PROMOTE_FUNCTION_MODE
++#define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
++
++#undef TARGET_RETURN_IN_MEMORY
++#define TARGET_RETURN_IN_MEMORY riscv_return_in_memory
++
++#undef TARGET_ASM_OUTPUT_MI_THUNK
++#define TARGET_ASM_OUTPUT_MI_THUNK riscv_output_mi_thunk
++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
++
++#undef TARGET_PRINT_OPERAND
++#define TARGET_PRINT_OPERAND riscv_print_operand
++#undef TARGET_PRINT_OPERAND_ADDRESS
++#define TARGET_PRINT_OPERAND_ADDRESS riscv_print_operand_address
++
++#undef TARGET_SETUP_INCOMING_VARARGS
++#define TARGET_SETUP_INCOMING_VARARGS riscv_setup_incoming_varargs
++#undef TARGET_STRICT_ARGUMENT_NAMING
++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE riscv_pass_by_reference
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES riscv_arg_partial_bytes
++#undef TARGET_FUNCTION_ARG
++#define TARGET_FUNCTION_ARG riscv_function_arg
++#undef TARGET_FUNCTION_ARG_ADVANCE
++#define TARGET_FUNCTION_ARG_ADVANCE riscv_function_arg_advance
++#undef TARGET_FUNCTION_ARG_BOUNDARY
++#define TARGET_FUNCTION_ARG_BOUNDARY riscv_function_arg_boundary
++
++/* The generic ELF target does not always have TLS support. */
++#ifdef HAVE_AS_TLS
++#undef TARGET_HAVE_TLS
++#define TARGET_HAVE_TLS true
++#endif
++
++#undef TARGET_CANNOT_FORCE_CONST_MEM
++#define TARGET_CANNOT_FORCE_CONST_MEM riscv_cannot_force_const_mem
++
++#undef TARGET_LEGITIMATE_CONSTANT_P
++#define TARGET_LEGITIMATE_CONSTANT_P riscv_legitimate_constant_p
++
++#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true
++
++#undef TARGET_LEGITIMATE_ADDRESS_P
++#define TARGET_LEGITIMATE_ADDRESS_P riscv_legitimate_address_p
++
++#undef TARGET_CAN_ELIMINATE
++#define TARGET_CAN_ELIMINATE riscv_can_eliminate
++
++#undef TARGET_CONDITIONAL_REGISTER_USAGE
++#define TARGET_CONDITIONAL_REGISTER_USAGE riscv_conditional_register_usage
++
++#undef TARGET_CLASS_MAX_NREGS
++#define TARGET_CLASS_MAX_NREGS riscv_class_max_nregs
++
++#undef TARGET_TRAMPOLINE_INIT
++#define TARGET_TRAMPOLINE_INIT riscv_trampoline_init
++
++#undef TARGET_IN_SMALL_DATA_P
++#define TARGET_IN_SMALL_DATA_P riscv_in_small_data_p
++
++#undef TARGET_ASM_SELECT_RTX_SECTION
++#define TARGET_ASM_SELECT_RTX_SECTION riscv_elf_select_rtx_section
++
++#undef TARGET_MIN_ANCHOR_OFFSET
++#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
++
++#undef TARGET_MAX_ANCHOR_OFFSET
++#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
++
++#undef TARGET_REGISTER_PRIORITY
++#define TARGET_REGISTER_PRIORITY riscv_register_priority
++
++#undef TARGET_CANNOT_COPY_INSN_P
++#define TARGET_CANNOT_COPY_INSN_P riscv_cannot_copy_insn_p
++
++#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
++#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV riscv_atomic_assign_expand_fenv
++
++#undef TARGET_INIT_BUILTINS
++#define TARGET_INIT_BUILTINS riscv_init_builtins
++
++#undef TARGET_BUILTIN_DECL
++#define TARGET_BUILTIN_DECL riscv_builtin_decl
++
++#undef TARGET_EXPAND_BUILTIN
++#define TARGET_EXPAND_BUILTIN riscv_expand_builtin
++
++struct gcc_target targetm = TARGET_INITIALIZER;
++
++#include "gt-riscv.h"
+diff --git original-gcc/gcc/config/riscv/riscv.h gcc-6.3.0/gcc/config/riscv/riscv.h
+new file mode 100644
+index 00000000000..8d4c75e6770
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv.h
+@@ -0,0 +1,906 @@
++/* Definition of RISC-V target for GNU compiler.
++ Copyright (C) 2011-2017 Free Software Foundation, Inc.
++ Contributed by Andrew Waterman (andrew@sifive.com).
++ Based on MIPS target for GNU compiler.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3. If not see
++<http://www.gnu.org/licenses/>. */
++
++#ifndef GCC_RISCV_H
++#define GCC_RISCV_H
++
++#include "config/riscv/riscv-opts.h"
++
++/* Target CPU builtins. */
++#define TARGET_CPU_CPP_BUILTINS() riscv_cpu_cpp_builtins (pfile)
++
++/* Default target_flags if no switches are specified */
++
++#ifndef TARGET_DEFAULT
++#define TARGET_DEFAULT 0
++#endif
++
++#ifndef RISCV_TUNE_STRING_DEFAULT
++#define RISCV_TUNE_STRING_DEFAULT "rocket"
++#endif
++
++/* Support for a compile-time default CPU, et cetera. The rules are:
++ --with-arch is ignored if -march is specified.
++ --with-abi is ignored if -mabi is specified.
++ --with-tune is ignored if -mtune is specified. */
++#define OPTION_DEFAULT_SPECS \
++ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
++ {"arch", "%{!march=*:-march=%(VALUE)}" }, \
++ {"abi", "%{!mabi=*:-mabi=%(VALUE)}" }, \
++
++#ifdef IN_LIBGCC2
++#undef TARGET_64BIT
++/* Make this compile time constant for libgcc2 */
++#define TARGET_64BIT (__riscv_xlen == 64)
++#endif /* IN_LIBGCC2 */
++
++#undef ASM_SPEC
++#define ASM_SPEC "\
++%(subtarget_asm_debugging_spec) \
++%{" FPIE_OR_FPIC_SPEC ":-fpic} \
++%{march=*} \
++%{mabi=*} \
++%(subtarget_asm_spec)"
++
++#define TARGET_DEFAULT_CMODEL CM_MEDLOW
++
++#define LOCAL_LABEL_PREFIX "."
++#define USER_LABEL_PREFIX ""
++
++/* Offsets recorded in opcodes are a multiple of this alignment factor.
++ The default for this in 64-bit mode is 8, which causes problems with
++ SFmode register saves. */
++#define DWARF_CIE_DATA_ALIGNMENT -4
++
++/* The mapping from gcc register number to DWARF 2 CFA column number. */
++#define DWARF_FRAME_REGNUM(REGNO) \
++ (GP_REG_P (REGNO) || FP_REG_P (REGNO) ? REGNO : INVALID_REGNUM)
++
++/* The DWARF 2 CFA column which tracks the return address. */
++#define DWARF_FRAME_RETURN_COLUMN RETURN_ADDR_REGNUM
++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (VOIDmode, RETURN_ADDR_REGNUM)
++
++/* Describe how we implement __builtin_eh_return. */
++#define EH_RETURN_DATA_REGNO(N) \
++ ((N) < 4 ? (N) + GP_ARG_FIRST : INVALID_REGNUM)
++
++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, GP_ARG_FIRST + 4)
++
++/* Target machine storage layout */
++
++#define BITS_BIG_ENDIAN 0
++#define BYTES_BIG_ENDIAN 0
++#define WORDS_BIG_ENDIAN 0
++
++#define MAX_BITS_PER_WORD 64
++
++/* Width of a word, in units (bytes). */
++#define UNITS_PER_WORD (TARGET_64BIT ? 8 : 4)
++#ifndef IN_LIBGCC2
++#define MIN_UNITS_PER_WORD 4
++#endif
++
++/* The `Q' extension is not yet supported. */
++#define UNITS_PER_FP_REG (TARGET_DOUBLE_FLOAT ? 8 : 4)
++
++/* The largest type that can be passed in floating-point registers. */
++#define UNITS_PER_FP_ARG \
++ (riscv_abi == ABI_ILP32 || riscv_abi == ABI_LP64 ? 0 : \
++ riscv_abi == ABI_ILP32F || riscv_abi == ABI_LP64F ? 4 : 8) \
++
++/* Set the sizes of the core types. */
++#define SHORT_TYPE_SIZE 16
++#define INT_TYPE_SIZE 32
++#define LONG_LONG_TYPE_SIZE 64
++#define POINTER_SIZE (riscv_abi >= ABI_LP64 ? 64 : 32)
++#define LONG_TYPE_SIZE POINTER_SIZE
++
++#define FLOAT_TYPE_SIZE 32
++#define DOUBLE_TYPE_SIZE 64
++#define LONG_DOUBLE_TYPE_SIZE 128
++
++/* Allocation boundary (in *bits*) for storing arguments in argument list. */
++#define PARM_BOUNDARY BITS_PER_WORD
++
++/* Allocation boundary (in *bits*) for the code of a function. */
++#define FUNCTION_BOUNDARY (TARGET_RVC ? 16 : 32)
++
++/* There is no point aligning anything to a rounder boundary than this. */
++#define BIGGEST_ALIGNMENT 128
++
++/* The user-level ISA permits misaligned accesses, but they may execute
++ extremely slowly and non-atomically. Some privileged architectures
++ do not permit them at all. It is best to enforce strict alignment. */
++#define STRICT_ALIGNMENT 1
++
++/* Define this if you wish to imitate the way many other C compilers
++ handle alignment of bitfields and the structures that contain
++ them.
++
++ The behavior is that the type written for a bit-field (`int',
++ `short', or other integer type) imposes an alignment for the
++ entire structure, as if the structure really did contain an
++ ordinary field of that type. In addition, the bit-field is placed
++ within the structure so that it would fit within such a field,
++ not crossing a boundary for it.
++
++ Thus, on most machines, a bit-field whose type is written as `int'
++ would not cross a four-byte boundary, and would force four-byte
++ alignment for the whole structure. (The alignment used may not
++ be four bytes; it is controlled by the other alignment
++ parameters.)
++
++ If the macro is defined, its definition should be a C expression;
++ a nonzero value for the expression enables this behavior. */
++
++#define PCC_BITFIELD_TYPE_MATTERS 1
++
++/* If defined, a C expression to compute the alignment given to a
++ constant that is being placed in memory. CONSTANT is the constant
++ and ALIGN is the alignment that the object would ordinarily have.
++ The value of this macro is used instead of that alignment to align
++ the object.
++
++ If this macro is not defined, then ALIGN is used.
++
++ The typical use of this macro is to increase alignment for string
++ constants to be word aligned so that `strcpy' calls that copy
++ constants can be done inline. */
++
++#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
++ ((TREE_CODE (EXP) == STRING_CST || TREE_CODE (EXP) == CONSTRUCTOR) \
++ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
++
++/* If defined, a C expression to compute the alignment for a static
++ variable. TYPE is the data type, and ALIGN is the alignment that
++ the object would ordinarily have. The value of this macro is used
++ instead of that alignment to align the object.
++
++ If this macro is not defined, then ALIGN is used.
++
++ One use of this macro is to increase alignment of medium-size
++ data to make it all fit in fewer cache lines. Another is to
++ cause character arrays to be word-aligned so that `strcpy' calls
++ that copy constants to character arrays can be done inline. */
++
++#define DATA_ALIGNMENT(TYPE, ALIGN) \
++ ((((ALIGN) < BITS_PER_WORD) \
++ && (TREE_CODE (TYPE) == ARRAY_TYPE \
++ || TREE_CODE (TYPE) == UNION_TYPE \
++ || TREE_CODE (TYPE) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
++
++/* We need this for the same reason as DATA_ALIGNMENT, namely to cause
++ character arrays to be word-aligned so that `strcpy' calls that copy
++ constants to character arrays can be done inline, and 'strcmp' can be
++ optimised to use word loads. */
++#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
++ DATA_ALIGNMENT (TYPE, ALIGN)
++
++/* Define if operations between registers always perform the operation
++ on the full register even if a narrower mode is specified. */
++#define WORD_REGISTER_OPERATIONS 1
++
++/* When in 64-bit mode, move insns will sign extend SImode and CCmode
++ moves. All other references are zero extended. */
++#define LOAD_EXTEND_OP(MODE) \
++ (TARGET_64BIT && (MODE) == SImode ? SIGN_EXTEND : ZERO_EXTEND)
++
++/* Define this macro if it is advisable to hold scalars in registers
++ in a wider mode than that declared by the program. In such cases,
++ the value is constrained to be within the bounds of the declared
++ type, but kept valid in the wider mode. The signedness of the
++ extension may differ from that of the type. */
++
++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
++ if (GET_MODE_CLASS (MODE) == MODE_INT \
++ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
++ { \
++ if ((MODE) == SImode) \
++ (UNSIGNEDP) = 0; \
++ (MODE) = word_mode; \
++ }
++
++/* Pmode is always the same as ptr_mode, but not always the same as word_mode.
++ Extensions of pointers to word_mode must be signed. */
++#define POINTERS_EXTEND_UNSIGNED false
++
++/* When floating-point registers are wider than integer ones, moves between
++ them must go through memory. */
++#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
++ (GET_MODE_SIZE (MODE) > UNITS_PER_WORD \
++ && ((CLASS1) == FP_REGS) != ((CLASS2) == FP_REGS))
++
++/* Define if loading short immediate values into registers sign extends. */
++#define SHORT_IMMEDIATES_SIGN_EXTEND 1
++
++/* Standard register usage. */
++
++/* Number of hardware registers. We have:
++
++ - 32 integer registers
++ - 32 floating point registers
++ - 2 fake registers:
++ - ARG_POINTER_REGNUM
++ - FRAME_POINTER_REGNUM */
++
++#define FIRST_PSEUDO_REGISTER 66
++
++/* x0, sp, gp, and tp are fixed. */
++
++#define FIXED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ /* Floating-point registers. */ \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++/* a0-a7, t0-a6, fa0-fa7, and ft0-ft11 are volatile across calls.
++ The call RTLs themselves clobber ra. */
++
++#define CALL_USED_REGISTERS \
++{ /* General registers. */ \
++ 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Floating-point registers. */ \
++ 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, \
++ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, \
++ /* Others. */ \
++ 1, 1 \
++}
++
++/* Internal macros to classify an ISA register's type. */
++
++#define GP_REG_FIRST 0
++#define GP_REG_LAST 31
++#define GP_REG_NUM (GP_REG_LAST - GP_REG_FIRST + 1)
++
++#define FP_REG_FIRST 32
++#define FP_REG_LAST 63
++#define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
++
++/* The DWARF 2 CFA column which tracks the return address from a
++ signal handler context. This means that to maintain backwards
++ compatibility, no hard register can be assigned this column if it
++ would need to be handled by the DWARF unwinder. */
++#define DWARF_ALT_FRAME_RETURN_COLUMN 64
++
++#define GP_REG_P(REGNO) \
++ ((unsigned int) ((int) (REGNO) - GP_REG_FIRST) < GP_REG_NUM)
++#define FP_REG_P(REGNO) \
++ ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
++
++#define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
++
++#define HARD_REGNO_NREGS(REGNO, MODE) riscv_hard_regno_nregs (REGNO, MODE)
++
++#define HARD_REGNO_MODE_OK(REGNO, MODE) \
++ riscv_hard_regno_mode_ok_p (REGNO, MODE)
++
++/* Don't allow floating-point modes to be tied, since type punning of
++ single-precision and double-precision is implementation defined. */
++#define MODES_TIEABLE_P(MODE1, MODE2) \
++ ((MODE1) == (MODE2) \
++ || !(GET_MODE_CLASS (MODE1) == MODE_FLOAT \
++ && GET_MODE_CLASS (MODE2) == MODE_FLOAT))
++
++/* Use s0 as the frame pointer if it is so requested. */
++#define HARD_FRAME_POINTER_REGNUM 8
++#define STACK_POINTER_REGNUM 2
++#define THREAD_POINTER_REGNUM 4
++
++/* These two registers don't really exist: they get eliminated to either
++ the stack or hard frame pointer. */
++#define ARG_POINTER_REGNUM 64
++#define FRAME_POINTER_REGNUM 65
++
++/* Register in which static-chain is passed to a function. */
++#define STATIC_CHAIN_REGNUM (GP_TEMP_FIRST + 2)
++
++/* Registers used as temporaries in prologue/epilogue code.
++
++ The prologue registers mustn't conflict with any
++ incoming arguments, the static chain pointer, or the frame pointer.
++ The epilogue temporary mustn't conflict with the return registers,
++ the frame pointer, the EH stack adjustment, or the EH data registers. */
++
++#define RISCV_PROLOGUE_TEMP_REGNUM (GP_TEMP_FIRST + 1)
++#define RISCV_PROLOGUE_TEMP(MODE) gen_rtx_REG (MODE, RISCV_PROLOGUE_TEMP_REGNUM)
++
++#define MCOUNT_NAME "_mcount"
++
++#define NO_PROFILE_COUNTERS 1
++
++/* Emit rtl for profiling. Output assembler code to FILE
++ to call "_mcount" for profiling a function entry. */
++#define PROFILE_HOOK(LABEL) \
++ { \
++ rtx fun, ra; \
++ ra = get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNUM); \
++ fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
++ emit_library_call (fun, LCT_NORMAL, VOIDmode, 1, ra, Pmode); \
++ }
++
++/* All the work done in PROFILE_HOOK, but still required. */
++#define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
++
++/* Define this macro if it is as good or better to call a constant
++ function address than to call an address kept in a register. */
++#define NO_FUNCTION_CSE 1
++
++/* Define the classes of registers for register constraints in the
++ machine description. Also define ranges of constants.
++
++ One of the classes must always be named ALL_REGS and include all hard regs.
++ If there is more than one class, another class must be named NO_REGS
++ and contain no registers.
++
++ The name GENERAL_REGS must be the name of a class (or an alias for
++ another name such as ALL_REGS). This is the class of registers
++ that is allowed by "g" or "r" in a register constraint.
++ Also, registers outside this class are allocated only when
++ instructions express preferences for them.
++
++ The classes must be numbered in nondecreasing order; that is,
++ a larger-numbered class must never be contained completely
++ in a smaller-numbered class.
++
++ For any two classes, it is very desirable that there be another
++ class that represents their union. */
++
++enum reg_class
++{
++ NO_REGS, /* no registers in set */
++ SIBCALL_REGS, /* registers used by indirect sibcalls */
++ JALR_REGS, /* registers used by indirect calls */
++ GR_REGS, /* integer registers */
++ FP_REGS, /* floating-point registers */
++ FRAME_REGS, /* arg pointer and frame pointer */
++ ALL_REGS, /* all registers */
++ LIM_REG_CLASSES /* max value + 1 */
++};
++
++#define N_REG_CLASSES (int) LIM_REG_CLASSES
++
++#define GENERAL_REGS GR_REGS
++
++/* An initializer containing the names of the register classes as C
++ string constants. These names are used in writing some of the
++ debugging dumps. */
++
++#define REG_CLASS_NAMES \
++{ \
++ "NO_REGS", \
++ "SIBCALL_REGS", \
++ "JALR_REGS", \
++ "GR_REGS", \
++ "FP_REGS", \
++ "FRAME_REGS", \
++ "ALL_REGS" \
++}
++
++/* An initializer containing the contents of the register classes,
++ as integers which are bit masks. The Nth integer specifies the
++ contents of class N. The way the integer MASK is interpreted is
++ that register R is in the class if `MASK & (1 << R)' is 1.
++
++ When the machine has more than 32 registers, an integer does not
++ suffice. Then the integers are replaced by sub-initializers,
++ braced groupings containing several integers. Each
++ sub-initializer must be suitable as an initializer for the type
++ `HARD_REG_SET' which is defined in `hard-reg-set.h'. */
++
++#define REG_CLASS_CONTENTS \
++{ \
++ { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
++ { 0xf00000c0, 0x00000000, 0x00000000 }, /* SIBCALL_REGS */ \
++ { 0xffffffc0, 0x00000000, 0x00000000 }, /* JALR_REGS */ \
++ { 0xffffffff, 0x00000000, 0x00000000 }, /* GR_REGS */ \
++ { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
++ { 0x00000000, 0x00000000, 0x00000003 }, /* FRAME_REGS */ \
++ { 0xffffffff, 0xffffffff, 0x00000003 } /* ALL_REGS */ \
++}
++
++/* A C expression whose value is a register class containing hard
++ register REGNO. In general there is more that one such class;
++ choose a class which is "minimal", meaning that no smaller class
++ also contains the register. */
++
++#define REGNO_REG_CLASS(REGNO) riscv_regno_to_class[ (REGNO) ]
++
++/* A macro whose definition is the name of the class to which a
++ valid base register must belong. A base register is one used in
++ an address which is the register value plus a displacement. */
++
++#define BASE_REG_CLASS GR_REGS
++
++/* A macro whose definition is the name of the class to which a
++ valid index register must belong. An index register is one used
++ in an address where its value is either multiplied by a scale
++ factor or added to another register (as well as added to a
++ displacement). */
++
++#define INDEX_REG_CLASS NO_REGS
++
++/* We generally want to put call-clobbered registers ahead of
++ call-saved ones. (IRA expects this.) */
++
++#define REG_ALLOC_ORDER \
++{ \
++ /* Call-clobbered GPRs. */ \
++ 15, 14, 13, 12, 11, 10, 16, 17, 6, 28, 29, 30, 31, 5, 7, 1, \
++ /* Call-saved GPRs. */ \
++ 8, 9, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, \
++ /* GPRs that can never be exposed to the register allocator. */ \
++ 0, 2, 3, 4, \
++ /* Call-clobbered FPRs. */ \
++ 47, 46, 45, 44, 43, 42, 32, 33, 34, 35, 36, 37, 38, 39, 48, 49, \
++ 60, 61, 62, 63, \
++ /* Call-saved FPRs. */ \
++ 40, 41, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, \
++ /* None of the remaining classes have defined call-saved \
++ registers. */ \
++ 64, 65 \
++}
++
++/* True if VALUE is a signed 12-bit number. */
++
++#define SMALL_OPERAND(VALUE) \
++ ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH/2 < IMM_REACH)
++
++/* True if VALUE can be loaded into a register using LUI. */
++
++#define LUI_OPERAND(VALUE) \
++ (((VALUE) | ((1UL<<31) - IMM_REACH)) == ((1UL<<31) - IMM_REACH) \
++ || ((VALUE) | ((1UL<<31) - IMM_REACH)) + IMM_REACH == 0)
++
++#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
++ reg_classes_intersect_p (FP_REGS, CLASS)
++
++/* Stack layout; function entry, exit and calling. */
++
++#define STACK_GROWS_DOWNWARD 1
++
++#define FRAME_GROWS_DOWNWARD 1
++
++#define STARTING_FRAME_OFFSET 0
++
++#define RETURN_ADDR_RTX riscv_return_addr
++
++#define ELIMINABLE_REGS \
++{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
++ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
++
++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
++ (OFFSET) = riscv_initial_elimination_offset (FROM, TO)
++
++/* Allocate stack space for arguments at the beginning of each function. */
++#define ACCUMULATE_OUTGOING_ARGS 1
++
++/* The argument pointer always points to the first argument. */
++#define FIRST_PARM_OFFSET(FNDECL) 0
++
++#define REG_PARM_STACK_SPACE(FNDECL) 0
++
++/* Define this if it is the responsibility of the caller to
++ allocate the area reserved for arguments passed in registers.
++ If `ACCUMULATE_OUTGOING_ARGS' is also defined, the only effect
++ of this macro is to determine whether the space is included in
++ `crtl->outgoing_args_size'. */
++#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
++
++#define STACK_BOUNDARY 128
++
++/* Symbolic macros for the registers used to return integer and floating
++ point values. */
++
++#define GP_RETURN GP_ARG_FIRST
++#define FP_RETURN (UNITS_PER_FP_ARG == 0 ? GP_RETURN : FP_ARG_FIRST)
++
++#define MAX_ARGS_IN_REGISTERS 8
++
++/* Symbolic macros for the first/last argument registers. */
++
++#define GP_ARG_FIRST (GP_REG_FIRST + 10)
++#define GP_ARG_LAST (GP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++#define GP_TEMP_FIRST (GP_REG_FIRST + 5)
++#define FP_ARG_FIRST (FP_REG_FIRST + 10)
++#define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
++
++#define CALLEE_SAVED_REG_NUMBER(REGNO) \
++ ((REGNO) >= 8 && (REGNO) <= 9 ? (REGNO) - 8 : \
++ (REGNO) >= 18 && (REGNO) <= 27 ? (REGNO) - 16 : -1)
++
++#define LIBCALL_VALUE(MODE) \
++ riscv_function_value (NULL_TREE, NULL_TREE, MODE)
++
++#define FUNCTION_VALUE(VALTYPE, FUNC) \
++ riscv_function_value (VALTYPE, FUNC, VOIDmode)
++
++#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN)
++
++/* 1 if N is a possible register number for function argument passing.
++ We have no FP argument registers when soft-float. When FP registers
++ are 32 bits, we can't directly reference the odd numbered ones. */
++
++/* Accept arguments in a0-a7, and in fa0-fa7 if permitted by the ABI. */
++#define FUNCTION_ARG_REGNO_P(N) \
++ (IN_RANGE ((N), GP_ARG_FIRST, GP_ARG_LAST) \
++ || (UNITS_PER_FP_ARG && IN_RANGE ((N), FP_ARG_FIRST, FP_ARG_LAST)))
++
++typedef struct {
++ /* Number of integer registers used so far, up to MAX_ARGS_IN_REGISTERS. */
++ unsigned int num_gprs;
++
++ /* Number of floating-point registers used so far, likewise. */
++ unsigned int num_fprs;
++} CUMULATIVE_ARGS;
++
++/* Initialize a variable CUM of type CUMULATIVE_ARGS
++ for a call to a function whose data type is FNTYPE.
++ For a library call, FNTYPE is 0. */
++
++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
++ memset (&(CUM), 0, sizeof (CUM))
++
++#define EPILOGUE_USES(REGNO) ((REGNO) == RETURN_ADDR_REGNUM)
++
++/* ABI requires 16-byte alignment, even on RV32. */
++#define RISCV_STACK_ALIGN(LOC) (((LOC) + 15) & -16)
++
++/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
++ the stack pointer does not matter. The value is tested only in
++ functions that have frame pointers.
++ No definition is equivalent to always zero. */
++
++#define EXIT_IGNORE_STACK 1
++
++
++/* Trampolines are a block of code followed by two pointers. */
++
++#define TRAMPOLINE_CODE_SIZE 16
++#define TRAMPOLINE_SIZE \
++ ((Pmode == SImode) \
++ ? TRAMPOLINE_CODE_SIZE \
++ : (TRAMPOLINE_CODE_SIZE + POINTER_SIZE * 2))
++#define TRAMPOLINE_ALIGNMENT POINTER_SIZE
++
++/* Addressing modes, and classification of registers for them. */
++
++#define REGNO_OK_FOR_INDEX_P(REGNO) 0
++#define REGNO_MODE_OK_FOR_BASE_P(REGNO, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO, MODE, 1)
++
++/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
++ and check its validity for a certain class.
++ We have two alternate definitions for each of them.
++ The usual definition accepts all pseudo regs; the other rejects them all.
++ The symbol REG_OK_STRICT causes the latter definition to be used.
++
++ Most source files want to accept pseudo regs in the hope that
++ they will get allocated to the class that the insn wants them to be in.
++ Some source files that are used after register allocation
++ need to be strict. */
++
++#ifndef REG_OK_STRICT
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 0)
++#else
++#define REG_MODE_OK_FOR_BASE_P(X, MODE) \
++ riscv_regno_mode_ok_for_base_p (REGNO (X), MODE, 1)
++#endif
++
++#define REG_OK_FOR_INDEX_P(X) 0
++
++/* Maximum number of registers that can appear in a valid memory address. */
++
++#define MAX_REGS_PER_ADDRESS 1
++
++#define CONSTANT_ADDRESS_P(X) \
++ (CONSTANT_P (X) && memory_address_p (SImode, X))
++
++/* This handles the magic '..CURRENT_FUNCTION' symbol, which means
++ 'the start of the function that this code is output in'. */
++
++#define ASM_OUTPUT_LABELREF(FILE,NAME) \
++ if (strcmp (NAME, "..CURRENT_FUNCTION") == 0) \
++ asm_fprintf ((FILE), "%U%s", \
++ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
++ else \
++ asm_fprintf ((FILE), "%U%s", (NAME))
++
++#define JUMP_TABLES_IN_TEXT_SECTION 0
++#define CASE_VECTOR_MODE SImode
++#define CASE_VECTOR_PC_RELATIVE (riscv_cmodel != CM_MEDLOW)
++
++/* The load-address macro is used for PC-relative addressing of symbols
++ that bind locally. Don't use it for symbols that should be addressed
++ via the GOT. Also, avoid it for CM_MEDLOW, where LUI addressing
++ currently results in more opportunities for linker relaxation. */
++#define USE_LOAD_ADDRESS_MACRO(sym) \
++ (!TARGET_EXPLICIT_RELOCS && \
++ ((flag_pic \
++ && ((SYMBOL_REF_P (sym) && SYMBOL_REF_LOCAL_P (sym)) \
++ || ((GET_CODE (sym) == CONST) \
++ && SYMBOL_REF_P (XEXP (XEXP (sym, 0),0)) \
++ && SYMBOL_REF_LOCAL_P (XEXP (XEXP (sym, 0),0))))) \
++ || riscv_cmodel == CM_MEDANY))
++
++/* Define this as 1 if `char' should by default be signed; else as 0. */
++#define DEFAULT_SIGNED_CHAR 0
++
++#define MOVE_MAX UNITS_PER_WORD
++#define MAX_MOVE_MAX 8
++
++#define SLOW_BYTE_ACCESS 0
++
++#define SHIFT_COUNT_TRUNCATED 1
++
++#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
++
++/* Specify the machine mode that pointers have.
++ After generation of rtl, the compiler makes no further distinction
++ between pointers and any other objects of this machine mode. */
++
++#define Pmode word_mode
++
++/* Give call MEMs SImode since it is the "most permissive" mode
++ for both 32-bit and 64-bit targets. */
++
++#define FUNCTION_MODE SImode
++
++/* A C expression for the cost of a branch instruction. A value of 2
++ seems to minimize code size. */
++
++#define BRANCH_COST(speed_p, predictable_p) \
++ ((!(speed_p) || (predictable_p)) ? 2 : riscv_branch_cost)
++
++#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
++
++/* Control the assembler format that we output. */
++
++/* Output to assembler file text saying following lines
++ may contain character constants, extra white space, comments, etc. */
++
++#ifndef ASM_APP_ON
++#define ASM_APP_ON " #APP\n"
++#endif
++
++/* Output to assembler file text saying following lines
++ no longer contain unusual constructs. */
++
++#ifndef ASM_APP_OFF
++#define ASM_APP_OFF " #NO_APP\n"
++#endif
++
++#define REGISTER_NAMES \
++{ "zero","ra", "sp", "gp", "tp", "t0", "t1", "t2", \
++ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", \
++ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", \
++ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", \
++ "ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7", \
++ "fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5", \
++ "fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7", \
++ "fs8", "fs9", "fs10","fs11","ft8", "ft9", "ft10","ft11", \
++ "arg", "frame", }
++
++#define ADDITIONAL_REGISTER_NAMES \
++{ \
++ { "x0", 0 + GP_REG_FIRST }, \
++ { "x1", 1 + GP_REG_FIRST }, \
++ { "x2", 2 + GP_REG_FIRST }, \
++ { "x3", 3 + GP_REG_FIRST }, \
++ { "x4", 4 + GP_REG_FIRST }, \
++ { "x5", 5 + GP_REG_FIRST }, \
++ { "x6", 6 + GP_REG_FIRST }, \
++ { "x7", 7 + GP_REG_FIRST }, \
++ { "x8", 8 + GP_REG_FIRST }, \
++ { "x9", 9 + GP_REG_FIRST }, \
++ { "x10", 10 + GP_REG_FIRST }, \
++ { "x11", 11 + GP_REG_FIRST }, \
++ { "x12", 12 + GP_REG_FIRST }, \
++ { "x13", 13 + GP_REG_FIRST }, \
++ { "x14", 14 + GP_REG_FIRST }, \
++ { "x15", 15 + GP_REG_FIRST }, \
++ { "x16", 16 + GP_REG_FIRST }, \
++ { "x17", 17 + GP_REG_FIRST }, \
++ { "x18", 18 + GP_REG_FIRST }, \
++ { "x19", 19 + GP_REG_FIRST }, \
++ { "x20", 20 + GP_REG_FIRST }, \
++ { "x21", 21 + GP_REG_FIRST }, \
++ { "x22", 22 + GP_REG_FIRST }, \
++ { "x23", 23 + GP_REG_FIRST }, \
++ { "x24", 24 + GP_REG_FIRST }, \
++ { "x25", 25 + GP_REG_FIRST }, \
++ { "x26", 26 + GP_REG_FIRST }, \
++ { "x27", 27 + GP_REG_FIRST }, \
++ { "x28", 28 + GP_REG_FIRST }, \
++ { "x29", 29 + GP_REG_FIRST }, \
++ { "x30", 30 + GP_REG_FIRST }, \
++ { "x31", 31 + GP_REG_FIRST }, \
++ { "f0", 0 + FP_REG_FIRST }, \
++ { "f1", 1 + FP_REG_FIRST }, \
++ { "f2", 2 + FP_REG_FIRST }, \
++ { "f3", 3 + FP_REG_FIRST }, \
++ { "f4", 4 + FP_REG_FIRST }, \
++ { "f5", 5 + FP_REG_FIRST }, \
++ { "f6", 6 + FP_REG_FIRST }, \
++ { "f7", 7 + FP_REG_FIRST }, \
++ { "f8", 8 + FP_REG_FIRST }, \
++ { "f9", 9 + FP_REG_FIRST }, \
++ { "f10", 10 + FP_REG_FIRST }, \
++ { "f11", 11 + FP_REG_FIRST }, \
++ { "f12", 12 + FP_REG_FIRST }, \
++ { "f13", 13 + FP_REG_FIRST }, \
++ { "f14", 14 + FP_REG_FIRST }, \
++ { "f15", 15 + FP_REG_FIRST }, \
++ { "f16", 16 + FP_REG_FIRST }, \
++ { "f17", 17 + FP_REG_FIRST }, \
++ { "f18", 18 + FP_REG_FIRST }, \
++ { "f19", 19 + FP_REG_FIRST }, \
++ { "f20", 20 + FP_REG_FIRST }, \
++ { "f21", 21 + FP_REG_FIRST }, \
++ { "f22", 22 + FP_REG_FIRST }, \
++ { "f23", 23 + FP_REG_FIRST }, \
++ { "f24", 24 + FP_REG_FIRST }, \
++ { "f25", 25 + FP_REG_FIRST }, \
++ { "f26", 26 + FP_REG_FIRST }, \
++ { "f27", 27 + FP_REG_FIRST }, \
++ { "f28", 28 + FP_REG_FIRST }, \
++ { "f29", 29 + FP_REG_FIRST }, \
++ { "f30", 30 + FP_REG_FIRST }, \
++ { "f31", 31 + FP_REG_FIRST }, \
++}
++
++/* Globalizing directive for a label. */
++#define GLOBAL_ASM_OP "\t.globl\t"
++
++/* This is how to store into the string LABEL
++ the symbol_ref name of an internal numbered label where
++ PREFIX is the class of label and NUM is the number within the class.
++ This is suitable for output with `assemble_name'. */
++
++#undef ASM_GENERATE_INTERNAL_LABEL
++#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
++ sprintf ((LABEL), "*%s%s%ld", (LOCAL_LABEL_PREFIX), (PREFIX), (long)(NUM))
++
++/* This is how to output an element of a case-vector that is absolute. */
++
++#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
++ fprintf (STREAM, "\t.word\t%sL%d\n", LOCAL_LABEL_PREFIX, VALUE)
++
++/* This is how to output an element of a PIC case-vector. */
++
++#define ASM_OUTPUT_ADDR_DIFF_ELT(STREAM, BODY, VALUE, REL) \
++ fprintf (STREAM, "\t.word\t%sL%d-%sL%d\n", \
++ LOCAL_LABEL_PREFIX, VALUE, LOCAL_LABEL_PREFIX, REL)
++
++/* This is how to output an assembler line
++ that says to advance the location counter
++ to a multiple of 2**LOG bytes. */
++
++#define ASM_OUTPUT_ALIGN(STREAM,LOG) \
++ fprintf (STREAM, "\t.align\t%d\n", (LOG))
++
++/* Define the strings to put out for each section in the object file. */
++#define TEXT_SECTION_ASM_OP "\t.text" /* instructions */
++#define DATA_SECTION_ASM_OP "\t.data" /* large data */
++#define READONLY_DATA_SECTION_ASM_OP "\t.section\t.rodata"
++#define BSS_SECTION_ASM_OP "\t.bss"
++#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\",@nobits"
++#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\",@progbits"
++
++#define ASM_OUTPUT_REG_PUSH(STREAM,REGNO) \
++do \
++ { \
++ fprintf (STREAM, "\taddi\t%s,%s,-8\n\t%s\t%s,0(%s)\n", \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM], \
++ TARGET_64BIT ? "sd" : "sw", \
++ reg_names[REGNO], \
++ reg_names[STACK_POINTER_REGNUM]); \
++ } \
++while (0)
++
++#define ASM_OUTPUT_REG_POP(STREAM,REGNO) \
++do \
++ { \
++ fprintf (STREAM, "\t%s\t%s,0(%s)\n\taddi\t%s,%s,8\n", \
++ TARGET_64BIT ? "ld" : "lw", \
++ reg_names[REGNO], \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM], \
++ reg_names[STACK_POINTER_REGNUM]); \
++ } \
++while (0)
++
++#define ASM_COMMENT_START "#"
++
++#undef SIZE_TYPE
++#define SIZE_TYPE (POINTER_SIZE == 64 ? "long unsigned int" : "unsigned int")
++
++#undef PTRDIFF_TYPE
++#define PTRDIFF_TYPE (POINTER_SIZE == 64 ? "long int" : "int")
++
++/* If a memory-to-memory move would take MOVE_RATIO or more simple
++ move-instruction pairs, we will do a movmem or libcall instead. */
++
++#define MOVE_RATIO(speed) (CLEAR_RATIO (speed) / 2)
++
++/* For CLEAR_RATIO, when optimizing for size, give a better estimate
++ of the length of a memset call, but use the default otherwise. */
++
++#define CLEAR_RATIO(speed) ((speed) ? 16 : 6)
++
++/* This is similar to CLEAR_RATIO, but for a non-zero constant, so when
++ optimizing for size adjust the ratio to account for the overhead of
++ loading the constant and replicating it across the word. */
++
++#define SET_RATIO(speed) (CLEAR_RATIO (speed) - ((speed) ? 0 : 2))
++
++#ifndef USED_FOR_TARGET
++extern const enum reg_class riscv_regno_to_class[];
++extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
++#endif
++
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
++ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
++
++#define XLEN_SPEC \
++ "%{march=rv32*:32}" \
++ "%{march=rv64*:64}" \
++
++#define ABI_SPEC \
++ "%{mabi=ilp32:ilp32}" \
++ "%{mabi=ilp32f:ilp32f}" \
++ "%{mabi=ilp32d:ilp32d}" \
++ "%{mabi=lp64:lp64}" \
++ "%{mabi=lp64f:lp64f}" \
++ "%{mabi=lp64d:lp64d}" \
++
++#define STARTFILE_PREFIX_SPEC \
++ "/lib" XLEN_SPEC "/" ABI_SPEC "/ " \
++ "/usr/lib" XLEN_SPEC "/" ABI_SPEC "/ " \
++ "/lib/ " \
++ "/usr/lib/ "
++
++/* ISA constants needed for code generation. */
++#define OPCODE_LW 0x2003
++#define OPCODE_LD 0x3003
++#define OPCODE_AUIPC 0x17
++#define OPCODE_JALR 0x67
++#define OPCODE_LUI 0x37
++#define OPCODE_ADDI 0x13
++#define SHIFT_RD 7
++#define SHIFT_RS1 15
++#define SHIFT_IMM 20
++#define IMM_BITS 12
++
++#define IMM_REACH (1LL << IMM_BITS)
++#define CONST_HIGH_PART(VALUE) (((VALUE) + (IMM_REACH/2)) & ~(IMM_REACH-1))
++#define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
++
++#endif /* ! GCC_RISCV_H */
+diff --git original-gcc/gcc/config/riscv/riscv.md gcc-6.3.0/gcc/config/riscv/riscv.md
+new file mode 100644
+index 00000000000..4cbb2431335
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv.md
+@@ -0,0 +1,2079 @@
++;; Machine description for RISC-V for GNU compiler.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_c_enum "unspec" [
++ ;; Override return address for exception handling.
++ UNSPEC_EH_RETURN
++
++ ;; Symbolic accesses. The order of this list must match that of
++ ;; enum riscv_symbol_type in riscv-protos.h.
++ UNSPEC_ADDRESS_FIRST
++ UNSPEC_PCREL
++ UNSPEC_LOAD_GOT
++ UNSPEC_TLS
++ UNSPEC_TLS_LE
++ UNSPEC_TLS_IE
++ UNSPEC_TLS_GD
++
++ ;; High part of PC-relative address.
++ UNSPEC_AUIPC
++
++ ;; Floating-point unspecs.
++ UNSPEC_FLT_QUIET
++ UNSPEC_FLE_QUIET
++ UNSPEC_COPYSIGN
++ UNSPEC_LRINT
++ UNSPEC_LROUND
++
++ ;; Stack tie
++ UNSPEC_TIE
++])
++
++(define_c_enum "unspecv" [
++ ;; Register save and restore.
++ UNSPECV_GPR_SAVE
++ UNSPECV_GPR_RESTORE
++
++ ;; Floating-point unspecs.
++ UNSPECV_FRFLAGS
++ UNSPECV_FSFLAGS
++
++ ;; Blockage and synchronization.
++ UNSPECV_BLOCKAGE
++ UNSPECV_FENCE
++ UNSPECV_FENCE_I
++])
++
++(define_constants
++ [(RETURN_ADDR_REGNUM 1)
++ (T0_REGNUM 5)
++ (T1_REGNUM 6)
++ (S0_REGNUM 8)
++ (S1_REGNUM 9)
++ (S2_REGNUM 18)
++])
++
++(include "predicates.md")
++(include "constraints.md")
++
++;; ....................
++;;
++;; Attributes
++;;
++;; ....................
++
++(define_attr "got" "unset,xgot_high,load"
++ (const_string "unset"))
++
++;; Classification of moves, extensions and truncations. Most values
++;; are as for "type" (see below) but there are also the following
++;; move-specific values:
++;;
++;; andi a single ANDI instruction
++;; shift_shift a shift left followed by a shift right
++;;
++;; This attribute is used to determine the instruction's length and
++;; scheduling type. For doubleword moves, the attribute always describes
++;; the split instructions; in some cases, it is more appropriate for the
++;; scheduling type to be "multi" instead.
++(define_attr "move_type"
++ "unknown,load,fpload,store,fpstore,mtc,mfc,move,fmove,
++ const,logical,arith,andi,shift_shift"
++ (const_string "unknown"))
++
++;; Main data type used by the insn
++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF"
++ (const_string "unknown"))
++
++;; True if the main data type is twice the size of a word.
++(define_attr "dword_mode" "no,yes"
++ (cond [(and (eq_attr "mode" "DI,DF")
++ (eq (symbol_ref "TARGET_64BIT") (const_int 0)))
++ (const_string "yes")
++
++ (and (eq_attr "mode" "TI,TF")
++ (ne (symbol_ref "TARGET_64BIT") (const_int 0)))
++ (const_string "yes")]
++ (const_string "no")))
++
++;; Classification of each insn.
++;; branch conditional branch
++;; jump unconditional jump
++;; call unconditional call
++;; load load instruction(s)
++;; fpload floating point load
++;; store store instruction(s)
++;; fpstore floating point store
++;; mtc transfer to coprocessor
++;; mfc transfer from coprocessor
++;; const load constant
++;; arith integer arithmetic instructions
++;; logical integer logical instructions
++;; shift integer shift instructions
++;; slt set less than instructions
++;; imul integer multiply
++;; idiv integer divide
++;; move integer register move (addi rd, rs1, 0)
++;; fmove floating point register move
++;; fadd floating point add/subtract
++;; fmul floating point multiply
++;; fmadd floating point multiply-add
++;; fdiv floating point divide
++;; fcmp floating point compare
++;; fcvt floating point convert
++;; fsqrt floating point square root
++;; multi multiword sequence (or user asm statements)
++;; nop no operation
++;; ghost an instruction that produces no real code
++(define_attr "type"
++ "unknown,branch,jump,call,load,fpload,store,fpstore,
++ mtc,mfc,const,arith,logical,shift,slt,imul,idiv,move,fmove,fadd,fmul,
++ fmadd,fdiv,fcmp,fcvt,fsqrt,multi,nop,ghost"
++ (cond [(eq_attr "got" "load") (const_string "load")
++
++ ;; If a doubleword move uses these expensive instructions,
++ ;; it is usually better to schedule them in the same way
++ ;; as the singleword form, rather than as "multi".
++ (eq_attr "move_type" "load") (const_string "load")
++ (eq_attr "move_type" "fpload") (const_string "fpload")
++ (eq_attr "move_type" "store") (const_string "store")
++ (eq_attr "move_type" "fpstore") (const_string "fpstore")
++ (eq_attr "move_type" "mtc") (const_string "mtc")
++ (eq_attr "move_type" "mfc") (const_string "mfc")
++
++ ;; These types of move are always single insns.
++ (eq_attr "move_type" "fmove") (const_string "fmove")
++ (eq_attr "move_type" "arith") (const_string "arith")
++ (eq_attr "move_type" "logical") (const_string "logical")
++ (eq_attr "move_type" "andi") (const_string "logical")
++
++ ;; These types of move are always split.
++ (eq_attr "move_type" "shift_shift")
++ (const_string "multi")
++
++ ;; These types of move are split for doubleword modes only.
++ (and (eq_attr "move_type" "move,const")
++ (eq_attr "dword_mode" "yes"))
++ (const_string "multi")
++ (eq_attr "move_type" "move") (const_string "move")
++ (eq_attr "move_type" "const") (const_string "const")]
++ (const_string "unknown")))
++
++;; Length of instruction in bytes.
++(define_attr "length" ""
++ (cond [
++ ;; Branches further than +/- 4 KiB require two instructions.
++ (eq_attr "type" "branch")
++ (if_then_else (and (le (minus (match_dup 0) (pc)) (const_int 4088))
++ (le (minus (pc) (match_dup 0)) (const_int 4092)))
++ (const_int 4)
++ (const_int 8))
++
++ ;; Conservatively assume calls take two instructions (AUIPC + JALR).
++ ;; The linker will opportunistically relax the sequence to JAL.
++ (eq_attr "type" "call") (const_int 8)
++
++ ;; "Ghost" instructions occupy no space.
++ (eq_attr "type" "ghost") (const_int 0)
++
++ (eq_attr "got" "load") (const_int 8)
++
++ (eq_attr "type" "fcmp") (const_int 8)
++
++ ;; SHIFT_SHIFTs are decomposed into two separate instructions.
++ (eq_attr "move_type" "shift_shift")
++ (const_int 8)
++
++ ;; Check for doubleword moves that are decomposed into two
++ ;; instructions.
++ (and (eq_attr "move_type" "mtc,mfc,move")
++ (eq_attr "dword_mode" "yes"))
++ (const_int 8)
++
++ ;; Doubleword CONST{,N} moves are split into two word
++ ;; CONST{,N} moves.
++ (and (eq_attr "move_type" "const")
++ (eq_attr "dword_mode" "yes"))
++ (symbol_ref "riscv_split_const_insns (operands[1]) * 4")
++
++ ;; Otherwise, constants, loads and stores are handled by external
++ ;; routines.
++ (eq_attr "move_type" "load,fpload")
++ (symbol_ref "riscv_load_store_insns (operands[1], insn) * 4")
++ (eq_attr "move_type" "store,fpstore")
++ (symbol_ref "riscv_load_store_insns (operands[0], insn) * 4")
++ ] (const_int 4)))
++
++;; Is copying of this instruction disallowed?
++(define_attr "cannot_copy" "no,yes" (const_string "no"))
++
++;; Describe a user's asm statement.
++(define_asm_attributes
++ [(set_attr "type" "multi")])
++
++;; This mode iterator allows 32-bit and 64-bit GPR patterns to be generated
++;; from the same template.
++(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
++
++;; This mode iterator allows :P to be used for patterns that operate on
++;; pointer-sized quantities. Exactly one of the two alternatives will match.
++(define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
++
++;; Likewise, but for XLEN-sized quantities.
++(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")])
++
++;; Branches operate on XLEN-sized quantities, but for RV64 we accept
++;; QImode values so we can force zero-extension.
++(define_mode_iterator BR [(QI "TARGET_64BIT") SI (DI "TARGET_64BIT")])
++
++;; 32-bit moves for which we provide move patterns.
++(define_mode_iterator MOVE32 [SI])
++
++;; 64-bit modes for which we provide move patterns.
++(define_mode_iterator MOVE64 [DI DF])
++
++;; Iterator for sub-32-bit integer modes.
++(define_mode_iterator SHORT [QI HI])
++
++;; Iterator for HImode constant generation.
++(define_mode_iterator HISI [HI SI])
++
++;; Iterator for QImode extension patterns.
++(define_mode_iterator SUPERQI [HI SI (DI "TARGET_64BIT")])
++
++;; Iterator for hardware integer modes narrower than XLEN.
++(define_mode_iterator SUBX [QI HI (SI "TARGET_64BIT")])
++
++;; Iterator for hardware-supported integer modes.
++(define_mode_iterator ANYI [QI HI SI (DI "TARGET_64BIT")])
++
++;; Iterator for hardware-supported floating-point modes.
++(define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
++ (DF "TARGET_DOUBLE_FLOAT")])
++
++;; This attribute gives the length suffix for a sign- or zero-extension
++;; instruction.
++(define_mode_attr size [(QI "b") (HI "h")])
++
++;; Mode attributes for loads.
++(define_mode_attr load [(QI "lb") (HI "lh") (SI "lw") (DI "ld") (SF "flw") (DF "fld")])
++
++;; Instruction names for stores.
++(define_mode_attr store [(QI "sb") (HI "sh") (SI "sw") (DI "sd") (SF "fsw") (DF "fsd")])
++
++;; This attribute gives the best constraint to use for registers of
++;; a given mode.
++(define_mode_attr reg [(SI "d") (DI "d") (CC "d")])
++
++;; This attribute gives the format suffix for floating-point operations.
++(define_mode_attr fmt [(SF "s") (DF "d")])
++
++;; This attribute gives the integer suffix for floating-point conversions.
++(define_mode_attr ifmt [(SI "w") (DI "l")])
++
++;; This attribute gives the format suffix for atomic memory operations.
++(define_mode_attr amo [(SI "w") (DI "d")])
++
++;; This attribute gives the upper-case mode name for one unit of a
++;; floating-point mode.
++(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
++
++;; This attribute gives the integer mode that has half the size of
++;; the controlling mode.
++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
++
++;; Iterator and attributes for floating-point rounding instructions.
++(define_int_iterator RINT [UNSPEC_LRINT UNSPEC_LROUND])
++(define_int_attr rint_pattern [(UNSPEC_LRINT "rint") (UNSPEC_LROUND "round")])
++(define_int_attr rint_rm [(UNSPEC_LRINT "dyn") (UNSPEC_LROUND "rmm")])
++
++;; Iterator and attributes for quiet comparisons.
++(define_int_iterator QUIET_COMPARISON [UNSPEC_FLT_QUIET UNSPEC_FLE_QUIET])
++(define_int_attr quiet_pattern [(UNSPEC_FLT_QUIET "lt") (UNSPEC_FLE_QUIET "le")])
++
++;; This code iterator allows signed and unsigned widening multiplications
++;; to use the same template.
++(define_code_iterator any_extend [sign_extend zero_extend])
++
++;; This code iterator allows the two right shift instructions to be
++;; generated from the same template.
++(define_code_iterator any_shiftrt [ashiftrt lshiftrt])
++
++;; This code iterator allows the three shift instructions to be generated
++;; from the same template.
++(define_code_iterator any_shift [ashift ashiftrt lshiftrt])
++
++;; This code iterator allows the three bitwise instructions to be generated
++;; from the same template.
++(define_code_iterator any_bitwise [and ior xor])
++
++;; This code iterator allows unsigned and signed division to be generated
++;; from the same template.
++(define_code_iterator any_div [div udiv mod umod])
++
++;; This code iterator allows unsigned and signed modulus to be generated
++;; from the same template.
++(define_code_iterator any_mod [mod umod])
++
++;; These code iterators allow the signed and unsigned scc operations to use
++;; the same template.
++(define_code_iterator any_gt [gt gtu])
++(define_code_iterator any_ge [ge geu])
++(define_code_iterator any_lt [lt ltu])
++(define_code_iterator any_le [le leu])
++
++;; <u> expands to an empty string when doing a signed operation and
++;; "u" when doing an unsigned operation.
++(define_code_attr u [(sign_extend "") (zero_extend "u")
++ (gt "") (gtu "u")
++ (ge "") (geu "u")
++ (lt "") (ltu "u")
++ (le "") (leu "u")])
++
++;; <su> is like <u>, but the signed form expands to "s" rather than "".
++(define_code_attr su [(sign_extend "s") (zero_extend "u")])
++
++;; <optab> expands to the name of the optab for a particular code.
++(define_code_attr optab [(ashift "ashl")
++ (ashiftrt "ashr")
++ (lshiftrt "lshr")
++ (div "div")
++ (mod "mod")
++ (udiv "udiv")
++ (umod "umod")
++ (ge "ge")
++ (le "le")
++ (gt "gt")
++ (lt "lt")
++ (ior "ior")
++ (xor "xor")
++ (and "and")
++ (plus "add")
++ (minus "sub")])
++
++;; <insn> expands to the name of the insn that implements a particular code.
++(define_code_attr insn [(ashift "sll")
++ (ashiftrt "sra")
++ (lshiftrt "srl")
++ (div "div")
++ (mod "rem")
++ (udiv "divu")
++ (umod "remu")
++ (ior "or")
++ (xor "xor")
++ (and "and")
++ (plus "add")
++ (minus "sub")])
++
++;; Ghost instructions produce no real code and introduce no hazards.
++;; They exist purely to express an effect on dataflow.
++(define_insn_reservation "ghost" 0
++ (eq_attr "type" "ghost")
++ "nothing")
++
++;;
++;; ....................
++;;
++;; ADDITION
++;;
++;; ....................
++;;
++
++(define_insn "add<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (plus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fadd.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "addsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "arith_operand" "r,I")))]
++ ""
++ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "adddi3"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++ (match_operand:DI 2 "arith_operand" "r,I")))]
++ "TARGET_64BIT"
++ "add\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "*addsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI
++ (plus:SI (match_operand:SI 1 "register_operand" "r,r")
++ (match_operand:SI 2 "arith_operand" "r,I"))))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*addsi3_extended2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI
++ (subreg:SI (plus:DI (match_operand:DI 1 "register_operand" "r,r")
++ (match_operand:DI 2 "arith_operand" "r,I"))
++ 0)))]
++ "TARGET_64BIT"
++ "addw\t%0,%1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; SUBTRACTION
++;;
++;; ....................
++;;
++
++(define_insn "sub<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (minus:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fsub.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "subdi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_64BIT"
++ "sub\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "DI")])
++
++(define_insn "subsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "register_operand" "r")))]
++ ""
++ { return TARGET_64BIT ? "subw\t%0,%z1,%2" : "sub\t%0,%z1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++(define_insn "*subsi3_extended2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (subreg:SI (minus:DI (match_operand:DI 1 "reg_or_0_operand" "r")
++ (match_operand:DI 2 "register_operand" "r"))
++ 0)))]
++ "TARGET_64BIT"
++ "subw\t%0,%z1,%2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; MULTIPLICATION
++;;
++;; ....................
++;;
++
++(define_insn "mul<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (mult:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmul.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmul")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "mulsi3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (mult:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_MUL"
++ { return TARGET_64BIT ? "mulw\t%0,%1,%2" : "mul\t%0,%1,%2"; }
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "muldi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mul\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++(define_insn "*mulsi3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (mult:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulw\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++(define_insn "*mulsi3_extended2"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (subreg:SI (mult:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r"))
++ 0)))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulw\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++;;
++;; ........................
++;;
++;; MULTIPLICATION HIGH-PART
++;;
++;; ........................
++;;
++
++
++(define_expand "<u>mulditi3"
++ [(set (match_operand:TI 0 "register_operand")
++ (mult:TI (any_extend:TI (match_operand:DI 1 "register_operand"))
++ (any_extend:TI (match_operand:DI 2 "register_operand"))))]
++ "TARGET_MUL && TARGET_64BIT"
++{
++ rtx low = gen_reg_rtx (DImode);
++ emit_insn (gen_muldi3 (low, operands[1], operands[2]));
++
++ rtx high = gen_reg_rtx (DImode);
++ emit_insn (gen_<u>muldi3_highpart (high, operands[1], operands[2]));
++
++ emit_move_insn (gen_lowpart (DImode, operands[0]), low);
++ emit_move_insn (gen_highpart (DImode, operands[0]), high);
++ DONE;
++})
++
++(define_insn "<u>muldi3_highpart"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (any_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (any_extend:TI
++ (match_operand:DI 2 "register_operand" "r")))
++ (const_int 64))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulh<u>\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++(define_expand "usmulditi3"
++ [(set (match_operand:TI 0 "register_operand")
++ (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand"))
++ (sign_extend:TI (match_operand:DI 2 "register_operand"))))]
++ "TARGET_MUL && TARGET_64BIT"
++{
++ rtx low = gen_reg_rtx (DImode);
++ emit_insn (gen_muldi3 (low, operands[1], operands[2]));
++
++ rtx high = gen_reg_rtx (DImode);
++ emit_insn (gen_usmuldi3_highpart (high, operands[1], operands[2]));
++
++ emit_move_insn (gen_lowpart (DImode, operands[0]), low);
++ emit_move_insn (gen_highpart (DImode, operands[0]), high);
++ DONE;
++})
++
++(define_insn "usmuldi3_highpart"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (truncate:DI
++ (lshiftrt:TI
++ (mult:TI (zero_extend:TI
++ (match_operand:DI 1 "register_operand" "r"))
++ (sign_extend:TI
++ (match_operand:DI 2 "register_operand" "r")))
++ (const_int 64))))]
++ "TARGET_MUL && TARGET_64BIT"
++ "mulhsu\t%0,%2,%1"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "DI")])
++
++(define_expand "<u>mulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (any_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (any_extend:DI
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_MUL && !TARGET_64BIT"
++{
++ rtx temp = gen_reg_rtx (SImode);
++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++ emit_insn (gen_<u>mulsi3_highpart (riscv_subword (operands[0], true),
++ operands[1], operands[2]));
++ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++ DONE;
++})
++
++(define_insn "<u>mulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (any_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (any_extend:DI
++ (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_MUL && !TARGET_64BIT"
++ "mulh<u>\t%0,%1,%2"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++
++(define_expand "usmulsidi3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (mult:DI (zero_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_MUL && !TARGET_64BIT"
++{
++ rtx temp = gen_reg_rtx (SImode);
++ emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++ emit_insn (gen_usmulsi3_highpart (riscv_subword (operands[0], true),
++ operands[1], operands[2]));
++ emit_insn (gen_movsi (riscv_subword (operands[0], false), temp));
++ DONE;
++})
++
++(define_insn "usmulsi3_highpart"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (truncate:SI
++ (lshiftrt:DI
++ (mult:DI (zero_extend:DI
++ (match_operand:SI 1 "register_operand" "r"))
++ (sign_extend:DI
++ (match_operand:SI 2 "register_operand" "r")))
++ (const_int 32))))]
++ "TARGET_MUL && !TARGET_64BIT"
++ "mulhsu\t%0,%2,%1"
++ [(set_attr "type" "imul")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; DIVISION and REMAINDER
++;;
++;; ....................
++;;
++
++(define_insn "<optab>si3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r")))]
++ "TARGET_DIV"
++ { return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2"; }
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "SI")])
++
++(define_insn "<optab>di3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_div:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "register_operand" "r")))]
++ "TARGET_DIV && TARGET_64BIT"
++ "<insn>\t%0,%1,%2"
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "DI")])
++
++(define_insn "*<optab>si3_extended"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (any_div:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "register_operand" "r"))))]
++ "TARGET_DIV && TARGET_64BIT"
++ "<insn>w\t%0,%1,%2"
++ [(set_attr "type" "idiv")
++ (set_attr "mode" "DI")])
++
++(define_insn "div<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (div:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_FDIV"
++ "fdiv.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fdiv")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; SQUARE ROOT
++;;
++;; ....................
++
++(define_insn "sqrt<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT && TARGET_FDIV"
++{
++ return "fsqrt.<fmt>\t%0,%1";
++}
++ [(set_attr "type" "fsqrt")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; Floating point multiply accumulate instructions.
++
++;; a * b + c
++(define_insn "fma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; a * b - c
++(define_insn "fms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT"
++ "fmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; -a * b - c
++(define_insn "fnms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT"
++ "fnmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; -a * b + c
++(define_insn "fnma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (fma:ANYF
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fnmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; -(-a * b - c), modulo signed zeros
++(define_insn "*fma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; -(-a * b + c), modulo signed zeros
++(define_insn "*fms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f"))
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; -(a * b + c), modulo signed zeros
++(define_insn "*fnms<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f"))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fnmadd.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;; -(a * b - c), modulo signed zeros
++(define_insn "*fnma<mode>4"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF
++ (fma:ANYF
++ (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")
++ (neg:ANYF (match_operand:ANYF 3 "register_operand" "f")))))]
++ "TARGET_HARD_FLOAT && !HONOR_SIGNED_ZEROS (<MODE>mode)"
++ "fnmsub.<fmt>\t%0,%1,%2,%3"
++ [(set_attr "type" "fmadd")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; SIGN INJECTION
++;;
++;; ....................
++
++(define_insn "abs<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (abs:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fabs.<fmt>\t%0,%1"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "copysign<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")]
++ UNSPEC_COPYSIGN))]
++ "TARGET_HARD_FLOAT"
++ "fsgnj.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "neg<mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (neg:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fneg.<fmt>\t%0,%1"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; MIN/MAX
++;;
++;; ....................
++
++(define_insn "smin<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmin.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "smax<mode>3"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fmax.<fmt>\t%0,%1,%2"
++ [(set_attr "type" "fmove")
++ (set_attr "mode" "<UNITMODE>")])
++
++;;
++;; ....................
++;;
++;; LOGICAL
++;;
++;; ....................
++;;
++
++;; For RV64, we don't expose the SImode operations to the rtl expanders,
++;; but SImode versions exist for combine.
++
++(define_insn "<optab><mode>3"
++ [(set (match_operand:X 0 "register_operand" "=r,r")
++ (any_bitwise:X (match_operand:X 1 "register_operand" "%r,r")
++ (match_operand:X 2 "arith_operand" "r,I")))]
++ ""
++ "<insn>\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "*<optab>si3_internal"
++ [(set (match_operand:SI 0 "register_operand" "=r,r")
++ (any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
++ (match_operand:SI 2 "arith_operand" "r,I")))]
++ "TARGET_64BIT"
++ "<insn>\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "SI")])
++
++(define_insn "one_cmpl<mode>2"
++ [(set (match_operand:X 0 "register_operand" "=r")
++ (not:X (match_operand:X 1 "register_operand" "r")))]
++ ""
++ "not\t%0,%1"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "*one_cmplsi2_internal"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (not:SI (match_operand:SI 1 "register_operand" "r")))]
++ "TARGET_64BIT"
++ "not\t%0,%1"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; TRUNCATION
++;;
++;; ....................
++
++(define_insn "truncdfsf2"
++ [(set (match_operand:SF 0 "register_operand" "=f")
++ (float_truncate:SF (match_operand:DF 1 "register_operand" "f")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.s.d\t%0,%1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "SF")])
++
++;;
++;; ....................
++;;
++;; ZERO EXTENSION
++;;
++;; ....................
++
++;; Extension insns.
++
++(define_insn_and_split "zero_extendsidi2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (zero_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
++ "TARGET_64BIT"
++ "@
++ #
++ lwu\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:DI (match_dup 1) (const_int 32)))
++ (set (match_dup 0)
++ (lshiftrt:DI (match_dup 0) (const_int 32)))]
++ { operands[1] = gen_lowpart (DImode, operands[1]); }
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "DI")])
++
++(define_insn_and_split "zero_extendhi<GPR:mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r,r")
++ (zero_extend:GPR (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ #
++ lhu\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0)
++ (ashift:GPR (match_dup 1) (match_dup 2)))
++ (set (match_dup 0)
++ (lshiftrt:GPR (match_dup 0) (match_dup 2)))]
++ {
++ operands[1] = gen_lowpart (<GPR:MODE>mode, operands[1]);
++ operands[2] = GEN_INT(GET_MODE_BITSIZE(<GPR:MODE>mode) - 16);
++ }
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "<GPR:MODE>")])
++
++(define_insn "zero_extendqi<SUPERQI:mode>2"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++ (zero_extend:SUPERQI
++ (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ and\t%0,%1,0xff
++ lbu\t%0,%1"
++ [(set_attr "move_type" "andi,load")
++ (set_attr "mode" "<SUPERQI:MODE>")])
++
++;;
++;; ....................
++;;
++;; SIGN EXTENSION
++;;
++;; ....................
++
++(define_insn "extendsidi2"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m")))]
++ "TARGET_64BIT"
++ "@
++ sext.w\t%0,%1
++ lw\t%0,%1"
++ [(set_attr "move_type" "move,load")
++ (set_attr "mode" "DI")])
++
++(define_insn_and_split "extend<SHORT:mode><SUPERQI:mode>2"
++ [(set (match_operand:SUPERQI 0 "register_operand" "=r,r")
++ (sign_extend:SUPERQI
++ (match_operand:SHORT 1 "nonimmediate_operand" "r,m")))]
++ ""
++ "@
++ #
++ l<SHORT:size>\t%0,%1"
++ "&& reload_completed && REG_P (operands[1])"
++ [(set (match_dup 0) (ashift:SI (match_dup 1) (match_dup 2)))
++ (set (match_dup 0) (ashiftrt:SI (match_dup 0) (match_dup 2)))]
++{
++ operands[0] = gen_lowpart (SImode, operands[0]);
++ operands[1] = gen_lowpart (SImode, operands[1]);
++ operands[2] = GEN_INT (GET_MODE_BITSIZE (SImode)
++ - GET_MODE_BITSIZE (<SHORT:MODE>mode));
++}
++ [(set_attr "move_type" "shift_shift,load")
++ (set_attr "mode" "SI")])
++
++(define_insn "extendsfdf2"
++ [(set (match_operand:DF 0 "register_operand" "=f")
++ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
++ "TARGET_DOUBLE_FLOAT"
++ "fcvt.d.s\t%0,%1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "DF")])
++
++;;
++;; ....................
++;;
++;; CONVERSIONS
++;;
++;; ....................
++
++(define_insn "fix_trunc<ANYF:mode><GPR:mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (fix:GPR (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "<ANYF:MODE>")])
++
++(define_insn "fixuns_trunc<ANYF:mode><GPR:mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (unsigned_fix:GPR (match_operand:ANYF 1 "register_operand" "f")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.<GPR:ifmt>u.<ANYF:fmt> %0,%1,rtz"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "<ANYF:MODE>")])
++
++(define_insn "float<GPR:mode><ANYF:mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (float:ANYF (match_operand:GPR 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.<ANYF:fmt>.<GPR:ifmt>\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "<ANYF:MODE>")])
++
++(define_insn "floatuns<GPR:mode><ANYF:mode>2"
++ [(set (match_operand:ANYF 0 "register_operand" "=f")
++ (unsigned_float:ANYF (match_operand:GPR 1 "reg_or_0_operand" "rJ")))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.<ANYF:fmt>.<GPR:ifmt>u\t%0,%z1"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "<ANYF:MODE>")])
++
++(define_insn "l<rint_pattern><ANYF:mode><GPR:mode>2"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (unspec:GPR [(match_operand:ANYF 1 "register_operand" "f")]
++ RINT))]
++ "TARGET_HARD_FLOAT"
++ "fcvt.<GPR:ifmt>.<ANYF:fmt> %0,%1,<rint_rm>"
++ [(set_attr "type" "fcvt")
++ (set_attr "mode" "<ANYF:MODE>")])
++
++;;
++;; ....................
++;;
++;; DATA MOVEMENT
++;;
++;; ....................
++
++;; Lower-level instructions for loading an address from the GOT.
++;; We could use MEMs, but an unspec gives more optimization
++;; opportunities.
++
++(define_insn "got_load<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_LOAD_GOT))]
++ ""
++ "la\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "tls_add_tp_le<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "register_operand" "r")
++ (match_operand:P 2 "register_operand" "r")
++ (match_operand:P 3 "symbolic_operand" "")]
++ UNSPEC_TLS_LE))]
++ ""
++ "add\t%0,%1,%2,%%tprel_add(%3)"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_gd<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_TLS_GD))]
++ ""
++ "la.tls.gd\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "got_load_tls_ie<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++ UNSPEC_TLS_IE))]
++ ""
++ "la.tls.ie\t%0,%1"
++ [(set_attr "got" "load")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "auipc<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")
++ (match_operand:P 2 "const_int_operand")
++ (pc)]
++ UNSPEC_AUIPC))]
++ ""
++ ".LA%2: auipc\t%0,%h1"
++ [(set_attr "type" "arith")
++ (set_attr "cannot_copy" "yes")])
++
++;; Instructions for adding the low 12 bits of an address to a register.
++;; Operand 2 is the address: riscv_print_operand works out which relocation
++;; should be applied.
++
++(define_insn "*low<mode>"
++ [(set (match_operand:P 0 "register_operand" "=r")
++ (lo_sum:P (match_operand:P 1 "register_operand" "r")
++ (match_operand:P 2 "symbolic_operand" "")))]
++ ""
++ "addi\t%0,%1,%R2"
++ [(set_attr "type" "arith")
++ (set_attr "mode" "<MODE>")])
++
++;; Allow combine to split complex const_int load sequences, using operand 2
++;; to store the intermediate results. See move_operand for details.
++(define_split
++ [(set (match_operand:GPR 0 "register_operand")
++ (match_operand:GPR 1 "splittable_const_int_operand"))
++ (clobber (match_operand:GPR 2 "register_operand"))]
++ ""
++ [(const_int 0)]
++{
++ riscv_move_integer (operands[2], operands[0], INTVAL (operands[1]));
++ DONE;
++})
++
++;; Likewise, for symbolic operands.
++(define_split
++ [(set (match_operand:P 0 "register_operand")
++ (match_operand:P 1))
++ (clobber (match_operand:P 2 "register_operand"))]
++ "riscv_split_symbol (operands[2], operands[1], MAX_MACHINE_MODE, NULL)"
++ [(set (match_dup 0) (match_dup 3))]
++{
++ riscv_split_symbol (operands[2], operands[1],
++ MAX_MACHINE_MODE, &operands[3]);
++})
++
++;; 64-bit integer moves
++
++(define_expand "movdi"
++ [(set (match_operand:DI 0 "")
++ (match_operand:DI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (DImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movdi_32bit"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,m, *f,*f,*r,*f,*m")
++ (match_operand:DI 1 "move_operand" " r,i,m,r,*J*r,*m,*f,*f,*f"))]
++ "!TARGET_64BIT
++ && (register_operand (operands[0], DImode)
++ || reg_or_0_operand (operands[1], DImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
++ (set_attr "mode" "DI")])
++
++(define_insn "*movdi_64bit"
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r, m,*f,*f,*r,*f,*m")
++ (match_operand:DI 1 "move_operand" " r,T,m,rJ,*r*J,*m,*f,*f,*f"))]
++ "TARGET_64BIT
++ && (register_operand (operands[0], DImode)
++ || reg_or_0_operand (operands[1], DImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fmove,fpstore")
++ (set_attr "mode" "DI")])
++
++;; 32-bit Integer moves
++
++(define_expand "mov<mode>"
++ [(set (match_operand:MOVE32 0 "")
++ (match_operand:MOVE32 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (<MODE>mode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movsi_internal"
++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,*r,*m")
++ (match_operand:SI 1 "move_operand" "r,T,m,rJ,*r*J,*m,*f,*f"))]
++ "(register_operand (operands[0], SImode)
++ || reg_or_0_operand (operands[1], SImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,fpload,mfc,fpstore")
++ (set_attr "mode" "SI")])
++
++;; 16-bit Integer moves
++
++;; Unlike most other insns, the move insns can't be split with
++;; different predicates, because register spilling and other parts of
++;; the compiler, have memoized the insn number already.
++;; Unsigned loads are used because LOAD_EXTEND_OP returns ZERO_EXTEND.
++
++(define_expand "movhi"
++ [(set (match_operand:HI 0 "")
++ (match_operand:HI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (HImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movhi_internal"
++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++ (match_operand:HI 1 "move_operand" "r,T,m,rJ,*r*J,*f"))]
++ "(register_operand (operands[0], HImode)
++ || reg_or_0_operand (operands[1], HImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++ (set_attr "mode" "HI")])
++
++;; HImode constant generation; see riscv_move_integer for details.
++;; si+si->hi without truncation is legal because of TRULY_NOOP_TRUNCATION.
++
++(define_insn "*add<mode>hi3"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (plus:HI (match_operand:HISI 1 "register_operand" "r,r")
++ (match_operand:HISI 2 "arith_operand" "r,I")))]
++ ""
++ { return TARGET_64BIT ? "addw\t%0,%1,%2" : "add\t%0,%1,%2"; }
++ [(set_attr "type" "arith")
++ (set_attr "mode" "HI")])
++
++(define_insn "*xor<mode>hi3"
++ [(set (match_operand:HI 0 "register_operand" "=r,r")
++ (xor:HI (match_operand:HISI 1 "register_operand" "r,r")
++ (match_operand:HISI 2 "arith_operand" "r,I")))]
++ ""
++ "xor\t%0,%1,%2"
++ [(set_attr "type" "logical")
++ (set_attr "mode" "HI")])
++
++;; 8-bit Integer moves
++
++(define_expand "movqi"
++ [(set (match_operand:QI 0 "")
++ (match_operand:QI 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (QImode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movqi_internal"
++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m,*f,*r")
++ (match_operand:QI 1 "move_operand" "r,I,m,rJ,*r*J,*f"))]
++ "(register_operand (operands[0], QImode)
++ || reg_or_0_operand (operands[1], QImode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,const,load,store,mtc,mfc")
++ (set_attr "mode" "QI")])
++
++;; 32-bit floating point moves
++
++(define_expand "movsf"
++ [(set (match_operand:SF 0 "")
++ (match_operand:SF 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (SFmode, operands[0], operands[1]))
++ DONE;
++})
++
++(define_insn "*movsf_hardfloat"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++ (match_operand:SF 1 "move_operand" "f,G,m,f,G,*r,*f,*G*r,*m,*r"))]
++ "TARGET_HARD_FLOAT
++ && (register_operand (operands[0], SFmode)
++ || reg_or_0_operand (operands[1], SFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++ (set_attr "mode" "SF")])
++
++(define_insn "*movsf_softfloat"
++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m")
++ (match_operand:SF 1 "move_operand" "Gr,m,r"))]
++ "!TARGET_HARD_FLOAT
++ && (register_operand (operands[0], SFmode)
++ || reg_or_0_operand (operands[1], SFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,load,store")
++ (set_attr "mode" "SF")])
++
++;; 64-bit floating point moves
++
++(define_expand "movdf"
++ [(set (match_operand:DF 0 "")
++ (match_operand:DF 1 ""))]
++ ""
++{
++ if (riscv_legitimize_move (DFmode, operands[0], operands[1]))
++ DONE;
++})
++
++;; In RV32, we lack fmv.x.d and fmv.d.x. Go through memory instead.
++;; (However, we can still use fcvt.d.w to zero a floating-point register.)
++(define_insn "*movdf_hardfloat_rv32"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*r,*r,*m")
++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r*G,*m,*r"))]
++ "!TARGET_64BIT && TARGET_DOUBLE_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_insn "*movdf_hardfloat_rv64"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,m,*f,*r,*r,*r,*m")
++ (match_operand:DF 1 "move_operand" "f,G,m,f,G,*r,*f,*r*G,*m,*r"))]
++ "TARGET_64BIT && TARGET_DOUBLE_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "fmove,mtc,fpload,fpstore,store,mtc,mfc,move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_insn "*movdf_softfloat"
++ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
++ (match_operand:DF 1 "move_operand" "rG,m,rG"))]
++ "!TARGET_DOUBLE_FLOAT
++ && (register_operand (operands[0], DFmode)
++ || reg_or_0_operand (operands[1], DFmode))"
++ { return riscv_output_move (operands[0], operands[1]); }
++ [(set_attr "move_type" "move,load,store")
++ (set_attr "mode" "DF")])
++
++(define_split
++ [(set (match_operand:MOVE64 0 "nonimmediate_operand")
++ (match_operand:MOVE64 1 "move_operand"))]
++ "reload_completed
++ && riscv_split_64bit_move_p (operands[0], operands[1])"
++ [(const_int 0)]
++{
++ riscv_split_doubleword_move (operands[0], operands[1]);
++ DONE;
++})
++
++;; Expand in-line code to clear the instruction cache between operand[0] and
++;; operand[1].
++(define_expand "clear_cache"
++ [(match_operand 0 "pmode_register_operand")
++ (match_operand 1 "pmode_register_operand")]
++ ""
++{
++ emit_insn (gen_fence_i ());
++ DONE;
++})
++
++(define_insn "fence"
++ [(unspec_volatile [(const_int 0)] UNSPECV_FENCE)]
++ ""
++ "%|fence%-")
++
++(define_insn "fence_i"
++ [(unspec_volatile [(const_int 0)] UNSPECV_FENCE_I)]
++ ""
++ "fence.i")
++
++;;
++;; ....................
++;;
++;; SHIFTS
++;;
++;; ....................
++
++(define_insn "<optab>si3"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "arith_operand" "rI")))]
++ ""
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2])
++ & (GET_MODE_BITSIZE (SImode) - 1));
++
++ return TARGET_64BIT ? "<insn>w\t%0,%1,%2" : "<insn>\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++(define_insn "<optab>di3"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (any_shift:DI (match_operand:DI 1 "register_operand" "r")
++ (match_operand:DI 2 "arith_operand" "rI")))]
++ "TARGET_64BIT"
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2])
++ & (GET_MODE_BITSIZE (DImode) - 1));
++
++ return "<insn>\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "DI")])
++
++(define_insn "*<optab>si3_extend"
++ [(set (match_operand:DI 0 "register_operand" "=r")
++ (sign_extend:DI
++ (any_shift:SI (match_operand:SI 1 "register_operand" "r")
++ (match_operand:SI 2 "arith_operand" "rI"))))]
++ "TARGET_64BIT"
++{
++ if (GET_CODE (operands[2]) == CONST_INT)
++ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
++
++ return "<insn>w\t%0,%1,%2";
++}
++ [(set_attr "type" "shift")
++ (set_attr "mode" "SI")])
++
++;;
++;; ....................
++;;
++;; CONDITIONAL BRANCHES
++;;
++;; ....................
++
++;; Conditional branches
++
++(define_insn "*branch_order<mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 1 "order_operator"
++ [(match_operand:X 2 "register_operand" "r")
++ (match_operand:X 3 "register_operand" "r")])
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "b%C1\t%2,%3,%0"
++ [(set_attr "type" "branch")
++ (set_attr "mode" "none")])
++
++(define_insn "*branch_zero<mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 1 "signed_order_operator"
++ [(match_operand:X 2 "register_operand" "r")
++ (const_int 0)])
++ (label_ref (match_operand 0 "" ""))
++ (pc)))]
++ ""
++ "b%C1z\t%2,%0"
++ [(set_attr "type" "branch")
++ (set_attr "mode" "none")])
++
++;; Used to implement built-in functions.
++(define_expand "condjump"
++ [(set (pc)
++ (if_then_else (match_operand 0)
++ (label_ref (match_operand 1))
++ (pc)))])
++
++(define_expand "cbranch<mode>4"
++ [(set (pc)
++ (if_then_else (match_operator 0 "comparison_operator"
++ [(match_operand:BR 1 "register_operand")
++ (match_operand:BR 2 "nonmemory_operand")])
++ (label_ref (match_operand 3 ""))
++ (pc)))]
++ ""
++{
++ riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
++ operands[1], operands[2]);
++ DONE;
++})
++
++(define_expand "cbranch<mode>4"
++ [(set (pc)
++ (if_then_else (match_operator 0 "fp_branch_comparison"
++ [(match_operand:ANYF 1 "register_operand")
++ (match_operand:ANYF 2 "register_operand")])
++ (label_ref (match_operand 3 ""))
++ (pc)))]
++ "TARGET_HARD_FLOAT"
++{
++ riscv_expand_conditional_branch (operands[3], GET_CODE (operands[0]),
++ operands[1], operands[2]);
++ DONE;
++})
++
++(define_insn_and_split "*branch_on_bit<X:mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 0 "equality_operator"
++ [(zero_extract:X (match_operand:X 2 "register_operand" "r")
++ (const_int 1)
++ (match_operand 3 "branch_on_bit_operand"))
++ (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))
++ (clobber (match_scratch:X 4 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(set (match_dup 4)
++ (ashift:X (match_dup 2) (match_dup 3)))
++ (set (pc)
++ (if_then_else
++ (match_op_dup 0 [(match_dup 4) (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))]
++{
++ int shift = GET_MODE_BITSIZE (<MODE>mode) - 1 - INTVAL (operands[3]);
++ operands[3] = GEN_INT (shift);
++
++ if (GET_CODE (operands[0]) == EQ)
++ operands[0] = gen_rtx_GE (<MODE>mode, operands[4], const0_rtx);
++ else
++ operands[0] = gen_rtx_LT (<MODE>mode, operands[4], const0_rtx);
++})
++
++(define_insn_and_split "*branch_on_bit_range<X:mode>"
++ [(set (pc)
++ (if_then_else
++ (match_operator 0 "equality_operator"
++ [(zero_extract:X (match_operand:X 2 "register_operand" "r")
++ (match_operand 3 "branch_on_bit_operand")
++ (const_int 0))
++ (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))
++ (clobber (match_scratch:X 4 "=&r"))]
++ ""
++ "#"
++ "reload_completed"
++ [(set (match_dup 4)
++ (ashift:X (match_dup 2) (match_dup 3)))
++ (set (pc)
++ (if_then_else
++ (match_op_dup 0 [(match_dup 4) (const_int 0)])
++ (label_ref (match_operand 1))
++ (pc)))]
++{
++ operands[3] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - INTVAL (operands[3]));
++})
++
++;;
++;; ....................
++;;
++;; SETTING A REGISTER FROM A COMPARISON
++;;
++;; ....................
++
++;; Destination is always set in SI mode.
++
++(define_expand "cstore<mode>4"
++ [(set (match_operand:SI 0 "register_operand")
++ (match_operator:SI 1 "order_operator"
++ [(match_operand:GPR 2 "register_operand")
++ (match_operand:GPR 3 "nonmemory_operand")]))]
++ ""
++{
++ riscv_expand_int_scc (operands[0], GET_CODE (operands[1]), operands[2],
++ operands[3]);
++ DONE;
++})
++
++(define_expand "cstore<mode>4"
++ [(set (match_operand:SI 0 "register_operand")
++ (match_operator:SI 1 "fp_scc_comparison"
++ [(match_operand:ANYF 2 "register_operand")
++ (match_operand:ANYF 3 "register_operand")]))]
++ "TARGET_HARD_FLOAT"
++{
++ riscv_expand_float_scc (operands[0], GET_CODE (operands[1]), operands[2],
++ operands[3]);
++ DONE;
++})
++
++(define_insn "*cstore<ANYF:mode><X:mode>4"
++ [(set (match_operand:X 0 "register_operand" "=r")
++ (match_operator:X 1 "fp_native_comparison"
++ [(match_operand:ANYF 2 "register_operand" "f")
++ (match_operand:ANYF 3 "register_operand" "f")]))]
++ "TARGET_HARD_FLOAT"
++ "f%C1.<fmt>\t%0,%2,%3"
++ [(set_attr "type" "fcmp")
++ (set_attr "mode" "<UNITMODE>")])
++
++(define_insn "f<quiet_pattern>_quiet<ANYF:mode><X:mode>4"
++ [(set (match_operand:X 0 "register_operand" "=r")
++ (unspec:X
++ [(match_operand:ANYF 1 "register_operand" "f")
++ (match_operand:ANYF 2 "register_operand" "f")]
++ QUIET_COMPARISON))
++ (clobber (match_scratch:X 3 "=&r"))]
++ "TARGET_HARD_FLOAT"
++ "frflags\t%3\n\tf<quiet_pattern>.<fmt>\t%0,%1,%2\n\tfsflags %3"
++ [(set_attr "type" "fcmp")
++ (set_attr "mode" "<UNITMODE>")
++ (set (attr "length") (const_int 12))])
++
++(define_insn "*seq_zero_<X:mode><GPR:mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (eq:GPR (match_operand:X 1 "register_operand" "r")
++ (const_int 0)))]
++ ""
++ "seqz\t%0,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<X:MODE>")])
++
++(define_insn "*sne_zero_<X:mode><GPR:mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (ne:GPR (match_operand:X 1 "register_operand" "r")
++ (const_int 0)))]
++ ""
++ "snez\t%0,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<X:MODE>")])
++
++(define_insn "*sgt<u>_<X:mode><GPR:mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (any_gt:GPR (match_operand:X 1 "register_operand" "r")
++ (match_operand:X 2 "reg_or_0_operand" "rJ")))]
++ ""
++ "sgt<u>\t%0,%1,%z2"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<X:MODE>")])
++
++(define_insn "*sge<u>_<X:mode><GPR:mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (any_ge:GPR (match_operand:X 1 "register_operand" "r")
++ (const_int 1)))]
++ ""
++ "slt<u>\t%0,zero,%1"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "*slt<u>_<X:mode><GPR:mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (any_lt:GPR (match_operand:X 1 "register_operand" "r")
++ (match_operand:X 2 "arith_operand" "rI")))]
++ ""
++ "slt<u>\t%0,%1,%2"
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<MODE>")])
++
++(define_insn "*sle<u>_<X:mode><GPR:mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=r")
++ (any_le:GPR (match_operand:X 1 "register_operand" "r")
++ (match_operand:X 2 "sle_operand" "")))]
++ ""
++{
++ operands[2] = GEN_INT (INTVAL (operands[2]) + 1);
++ return "slt<u>\t%0,%1,%2";
++}
++ [(set_attr "type" "slt")
++ (set_attr "mode" "<MODE>")])
++
++;;
++;; ....................
++;;
++;; UNCONDITIONAL BRANCHES
++;;
++;; ....................
++
++;; Unconditional branches.
++
++(define_insn "jump"
++ [(set (pc)
++ (label_ref (match_operand 0 "" "")))]
++ ""
++ "j\t%l0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++(define_expand "indirect_jump"
++ [(set (pc) (match_operand 0 "register_operand"))]
++ ""
++{
++ operands[0] = force_reg (Pmode, operands[0]);
++ if (Pmode == SImode)
++ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
++ else
++ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
++ DONE;
++})
++
++(define_insn "indirect_jump<mode>"
++ [(set (pc) (match_operand:P 0 "register_operand" "l"))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++(define_expand "tablejump"
++ [(set (pc) (match_operand 0 "register_operand" ""))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++{
++ if (CASE_VECTOR_PC_RELATIVE)
++ operands[0] = expand_simple_binop (Pmode, PLUS, operands[0],
++ gen_rtx_LABEL_REF (Pmode, operands[1]),
++ NULL_RTX, 0, OPTAB_DIRECT);
++
++ if (CASE_VECTOR_PC_RELATIVE && Pmode == DImode)
++ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
++ else
++ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
++ DONE;
++})
++
++(define_insn "tablejump<mode>"
++ [(set (pc) (match_operand:GPR 0 "register_operand" "l"))
++ (use (label_ref (match_operand 1 "" "")))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;;
++;; ....................
++;;
++;; Function prologue/epilogue
++;;
++;; ....................
++;;
++
++(define_expand "prologue"
++ [(const_int 1)]
++ ""
++{
++ riscv_expand_prologue ();
++ DONE;
++})
++
++;; Block any insns from being moved before this point, since the
++;; profiling call to mcount can use various registers that aren't
++;; saved or used to pass arguments.
++
++(define_insn "blockage"
++ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
++ ""
++ ""
++ [(set_attr "type" "ghost")
++ (set_attr "mode" "none")])
++
++(define_expand "epilogue"
++ [(const_int 2)]
++ ""
++{
++ riscv_expand_epilogue (false);
++ DONE;
++})
++
++(define_expand "sibcall_epilogue"
++ [(const_int 2)]
++ ""
++{
++ riscv_expand_epilogue (true);
++ DONE;
++})
++
++;; Trivial return. Make it look like a normal return insn as that
++;; allows jump optimizations to work better.
++
++(define_expand "return"
++ [(simple_return)]
++ "riscv_can_use_return_insn ()"
++ "")
++
++(define_insn "simple_return"
++ [(simple_return)]
++ ""
++ "ret"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;; Normal return.
++
++(define_insn "simple_return_internal"
++ [(simple_return)
++ (use (match_operand 0 "pmode_register_operand" ""))]
++ ""
++ "jr\t%0"
++ [(set_attr "type" "jump")
++ (set_attr "mode" "none")])
++
++;; This is used in compiling the unwind routines.
++(define_expand "eh_return"
++ [(use (match_operand 0 "general_operand"))]
++ ""
++{
++ if (GET_MODE (operands[0]) != word_mode)
++ operands[0] = convert_to_mode (word_mode, operands[0], 0);
++ if (TARGET_64BIT)
++ emit_insn (gen_eh_set_lr_di (operands[0]));
++ else
++ emit_insn (gen_eh_set_lr_si (operands[0]));
++ DONE;
++})
++
++;; Clobber the return address on the stack. We can't expand this
++;; until we know where it will be put in the stack frame.
++
++(define_insn "eh_set_lr_si"
++ [(unspec [(match_operand:SI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:SI 1 "=&r"))]
++ "! TARGET_64BIT"
++ "#")
++
++(define_insn "eh_set_lr_di"
++ [(unspec [(match_operand:DI 0 "register_operand" "r")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch:DI 1 "=&r"))]
++ "TARGET_64BIT"
++ "#")
++
++(define_split
++ [(unspec [(match_operand 0 "register_operand")] UNSPEC_EH_RETURN)
++ (clobber (match_scratch 1))]
++ "reload_completed"
++ [(const_int 0)]
++{
++ riscv_set_return_address (operands[0], operands[1]);
++ DONE;
++})
++
++;;
++;; ....................
++;;
++;; FUNCTION CALLS
++;;
++;; ....................
++
++(define_expand "sibcall"
++ [(parallel [(call (match_operand 0 "")
++ (match_operand 1 ""))
++ (use (match_operand 2 "")) ;; next_arg_reg
++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
++ ""
++{
++ rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
++ emit_call_insn (gen_sibcall_internal (target, operands[1]));
++ DONE;
++})
++
++(define_insn "sibcall_internal"
++ [(call (mem:SI (match_operand 0 "call_insn_operand" "j,S,U"))
++ (match_operand 1 "" ""))]
++ "SIBLING_CALL_P (insn)"
++ "@
++ jr\t%0
++ tail\t%0
++ tail\t%0@plt"
++ [(set_attr "type" "call")])
++
++(define_expand "sibcall_value"
++ [(parallel [(set (match_operand 0 "")
++ (call (match_operand 1 "")
++ (match_operand 2 "")))
++ (use (match_operand 3 ""))])] ;; next_arg_reg
++ ""
++{
++ rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
++ emit_call_insn (gen_sibcall_value_internal (operands[0], target, operands[2]));
++ DONE;
++})
++
++(define_insn "sibcall_value_internal"
++ [(set (match_operand 0 "" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "j,S,U"))
++ (match_operand 2 "" "")))]
++ "SIBLING_CALL_P (insn)"
++ "@
++ jr\t%1
++ tail\t%1
++ tail\t%1@plt"
++ [(set_attr "type" "call")])
++
++(define_expand "call"
++ [(parallel [(call (match_operand 0 "")
++ (match_operand 1 ""))
++ (use (match_operand 2 "")) ;; next_arg_reg
++ (use (match_operand 3 ""))])] ;; struct_value_size_rtx
++ ""
++{
++ rtx target = riscv_legitimize_call_address (XEXP (operands[0], 0));
++ emit_call_insn (gen_call_internal (target, operands[1]));
++ DONE;
++})
++
++(define_insn "call_internal"
++ [(call (mem:SI (match_operand 0 "call_insn_operand" "l,S,U"))
++ (match_operand 1 "" ""))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ "@
++ jalr\t%0
++ call\t%0
++ call\t%0@plt"
++ [(set_attr "type" "call")])
++
++(define_expand "call_value"
++ [(parallel [(set (match_operand 0 "")
++ (call (match_operand 1 "")
++ (match_operand 2 "")))
++ (use (match_operand 3 ""))])] ;; next_arg_reg
++ ""
++{
++ rtx target = riscv_legitimize_call_address (XEXP (operands[1], 0));
++ emit_call_insn (gen_call_value_internal (operands[0], target, operands[2]));
++ DONE;
++})
++
++(define_insn "call_value_internal"
++ [(set (match_operand 0 "" "")
++ (call (mem:SI (match_operand 1 "call_insn_operand" "l,S,U"))
++ (match_operand 2 "" "")))
++ (clobber (reg:SI RETURN_ADDR_REGNUM))]
++ ""
++ "@
++ jalr\t%1
++ call\t%1
++ call\t%1@plt"
++ [(set_attr "type" "call")])
++
++;; Call subroutine returning any type.
++
++(define_expand "untyped_call"
++ [(parallel [(call (match_operand 0 "")
++ (const_int 0))
++ (match_operand 1 "")
++ (match_operand 2 "")])]
++ ""
++{
++ int i;
++
++ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx));
++
++ for (i = 0; i < XVECLEN (operands[2], 0); i++)
++ {
++ rtx set = XVECEXP (operands[2], 0, i);
++ riscv_emit_move (SET_DEST (set), SET_SRC (set));
++ }
++
++ emit_insn (gen_blockage ());
++ DONE;
++})
++
++(define_insn "nop"
++ [(const_int 0)]
++ ""
++ "nop"
++ [(set_attr "type" "nop")
++ (set_attr "mode" "none")])
++
++(define_insn "trap"
++ [(trap_if (const_int 1) (const_int 0))]
++ ""
++ "ebreak")
++
++(define_insn "gpr_save"
++ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_SAVE)
++ (clobber (reg:SI T0_REGNUM))
++ (clobber (reg:SI T1_REGNUM))]
++ ""
++ { return riscv_output_gpr_save (INTVAL (operands[0])); })
++
++(define_insn "gpr_restore"
++ [(unspec_volatile [(match_operand 0 "const_int_operand")] UNSPECV_GPR_RESTORE)]
++ ""
++ "tail\t__riscv_restore_%0")
++
++(define_insn "gpr_restore_return"
++ [(return)
++ (use (match_operand 0 "pmode_register_operand" ""))
++ (const_int 0)]
++ ""
++ "")
++
++(define_insn "riscv_frflags"
++ [(set (match_operand:SI 0 "register_operand" "=r")
++ (unspec_volatile [(const_int 0)] UNSPECV_FRFLAGS))]
++ "TARGET_HARD_FLOAT"
++ "frflags %0")
++
++(define_insn "riscv_fsflags"
++ [(unspec_volatile [(match_operand:SI 0 "csr_operand" "rK")] UNSPECV_FSFLAGS)]
++ "TARGET_HARD_FLOAT"
++ "fsflags %0")
++
++(define_insn "stack_tie<mode>"
++ [(set (mem:BLK (scratch))
++ (unspec:BLK [(match_operand:X 0 "register_operand" "r")
++ (match_operand:X 1 "register_operand" "r")]
++ UNSPEC_TIE))]
++ ""
++ ""
++ [(set_attr "length" "0")]
++)
++
++(include "sync.md")
++(include "peephole.md")
++(include "pic.md")
++(include "generic.md")
+diff --git original-gcc/gcc/config/riscv/riscv.opt gcc-6.3.0/gcc/config/riscv/riscv.opt
+new file mode 100644
+index 00000000000..0466bb29d14
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/riscv.opt
+@@ -0,0 +1,111 @@
++; Options for the RISC-V port of the compiler
++;
++; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;
++; This file is part of GCC.
++;
++; GCC is free software; you can redistribute it and/or modify it under
++; the terms of the GNU General Public License as published by the Free
++; Software Foundation; either version 3, or (at your option) any later
++; version.
++;
++; GCC is distributed in the hope that it will be useful, but WITHOUT
++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
++; License for more details.
++;
++; You should have received a copy of the GNU General Public License
++; along with GCC; see the file COPYING3. If not see
++; <http://www.gnu.org/licenses/>.
++
++HeaderInclude
++config/riscv/riscv-opts.h
++
++mbranch-cost=
++Target RejectNegative Joined UInteger Var(riscv_branch_cost)
++-mbranch-cost=N Set the cost of branches to roughly N instructions.
++
++mplt
++Target Report Var(TARGET_PLT) Init(1)
++When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
++
++mabi=
++Target Report RejectNegative Joined Enum(abi_type) Var(riscv_abi) Init(ABI_ILP32)
++Specify integer and floating-point calling convention.
++
++Enum
++Name(abi_type) Type(enum riscv_abi_type)
++Supported ABIs (for use with the -mabi= option):
++
++EnumValue
++Enum(abi_type) String(ilp32) Value(ABI_ILP32)
++
++EnumValue
++Enum(abi_type) String(ilp32f) Value(ABI_ILP32F)
++
++EnumValue
++Enum(abi_type) String(ilp32d) Value(ABI_ILP32D)
++
++EnumValue
++Enum(abi_type) String(lp64) Value(ABI_LP64)
++
++EnumValue
++Enum(abi_type) String(lp64f) Value(ABI_LP64F)
++
++EnumValue
++Enum(abi_type) String(lp64d) Value(ABI_LP64D)
++
++mfdiv
++Target Report Mask(FDIV)
++Use hardware floating-point divide and square root instructions.
++
++mdiv
++Target Report Mask(DIV)
++Use hardware instructions for integer division.
++
++march=
++Target Report RejectNegative Joined
++-march= Generate code for given RISC-V ISA (e.g. RV64IM). ISA strings must be
++lower-case.
++
++mtune=
++Target RejectNegative Joined Var(riscv_tune_string)
++-mtune=PROCESSOR Optimize the output for PROCESSOR.
++
++msmall-data-limit=
++Target Joined Separate UInteger Var(g_switch_value) Init(8)
++-msmall-data-limit=N Put global and static data smaller than <number> bytes into a special section (on some targets).
++
++msave-restore
++Target Report Mask(SAVE_RESTORE)
++Use smaller but slower prologue and epilogue code.
++
++mcmodel=
++Target Report RejectNegative Joined Enum(code_model) Var(riscv_cmodel) Init(TARGET_DEFAULT_CMODEL)
++Specify the code model.
++
++Enum
++Name(code_model) Type(enum riscv_code_model)
++Known code models (for use with the -mcmodel= option):
++
++EnumValue
++Enum(code_model) String(medlow) Value(CM_MEDLOW)
++
++EnumValue
++Enum(code_model) String(medany) Value(CM_MEDANY)
++
++mexplicit-relocs
++Target Report Mask(EXPLICIT_RELOCS)
++Use %reloc() operators, rather than assembly macros, to load addresses.
++
++Mask(64BIT)
++
++Mask(MUL)
++
++Mask(ATOMIC)
++
++Mask(HARD_FLOAT)
++
++Mask(DOUBLE_FLOAT)
++
++Mask(RVC)
+diff --git original-gcc/gcc/config/riscv/sync.md gcc-6.3.0/gcc/config/riscv/sync.md
+new file mode 100644
+index 00000000000..09970b9f36b
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/sync.md
+@@ -0,0 +1,194 @@
++;; Machine description for RISC-V atomic operations.
++;; Copyright (C) 2011-2017 Free Software Foundation, Inc.
++;; Contributed by Andrew Waterman (andrew@sifive.com).
++;; Based on MIPS target for GNU compiler.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3. If not see
++;; <http://www.gnu.org/licenses/>.
++
++(define_c_enum "unspec" [
++ UNSPEC_COMPARE_AND_SWAP
++ UNSPEC_SYNC_OLD_OP
++ UNSPEC_SYNC_EXCHANGE
++ UNSPEC_ATOMIC_STORE
++ UNSPEC_MEMORY_BARRIER
++])
++
++(define_code_iterator any_atomic [plus ior xor and])
++(define_code_attr atomic_optab
++ [(plus "add") (ior "or") (xor "xor") (and "and")])
++
++;; Memory barriers.
++
++(define_expand "mem_thread_fence"
++ [(match_operand:SI 0 "const_int_operand" "")] ;; model
++ ""
++{
++ if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
++ {
++ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
++ MEM_VOLATILE_P (mem) = 1;
++ emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
++ }
++ DONE;
++})
++
++;; Until the RISC-V memory model (hence its mapping from C++) is finalized,
++;; conservatively emit a full FENCE.
++(define_insn "mem_thread_fence_1"
++ [(set (match_operand:BLK 0 "" "")
++ (unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
++ (match_operand:SI 1 "const_int_operand" "")] ;; model
++ ""
++ "fence\trw,rw")
++
++;; Atomic memory operations.
++
++;; Implement atomic stores with amoswap. Fall back to fences for atomic loads.
++(define_insn "atomic_store<mode>"
++ [(set (match_operand:GPR 0 "memory_operand" "=A")
++ (unspec_volatile:GPR
++ [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
++ (match_operand:SI 2 "const_int_operand")] ;; model
++ UNSPEC_ATOMIC_STORE))]
++ "TARGET_ATOMIC"
++ "%F2amoswap.<amo>%A2 zero,%z1,%0"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "atomic_<atomic_optab><mode>"
++ [(set (match_operand:GPR 0 "memory_operand" "+A")
++ (unspec_volatile:GPR
++ [(any_atomic:GPR (match_dup 0)
++ (match_operand:GPR 1 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 2 "const_int_operand")] ;; model
++ UNSPEC_SYNC_OLD_OP))]
++ "TARGET_ATOMIC"
++ "%F2amo<insn>.<amo>%A2 zero,%z1,%0"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "atomic_fetch_<atomic_optab><mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (match_operand:GPR 1 "memory_operand" "+A"))
++ (set (match_dup 1)
++ (unspec_volatile:GPR
++ [(any_atomic:GPR (match_dup 1)
++ (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
++ (match_operand:SI 3 "const_int_operand")] ;; model
++ UNSPEC_SYNC_OLD_OP))]
++ "TARGET_ATOMIC"
++ "%F3amo<insn>.<amo>%A3 %0,%z2,%1"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "atomic_exchange<mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (unspec_volatile:GPR
++ [(match_operand:GPR 1 "memory_operand" "+A")
++ (match_operand:SI 3 "const_int_operand")] ;; model
++ UNSPEC_SYNC_EXCHANGE))
++ (set (match_dup 1)
++ (match_operand:GPR 2 "register_operand" "0"))]
++ "TARGET_ATOMIC"
++ "%F3amoswap.<amo>%A3 %0,%z2,%1"
++ [(set (attr "length") (const_int 8))])
++
++(define_insn "atomic_cas_value_strong<mode>"
++ [(set (match_operand:GPR 0 "register_operand" "=&r")
++ (match_operand:GPR 1 "memory_operand" "+A"))
++ (set (match_dup 1)
++ (unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
++ (match_operand:GPR 3 "reg_or_0_operand" "rJ")
++ (match_operand:SI 4 "const_int_operand") ;; mod_s
++ (match_operand:SI 5 "const_int_operand")] ;; mod_f
++ UNSPEC_COMPARE_AND_SWAP))
++ (clobber (match_scratch:GPR 6 "=&r"))]
++ "TARGET_ATOMIC"
++ "%F5 1: lr.<amo>%A5 %0,%1; bne %0,%z2,1f; sc.<amo>%A4 %6,%z3,%1; bnez %6,1b; 1:"
++ [(set (attr "length") (const_int 20))])
++
++(define_expand "atomic_compare_and_swap<mode>"
++ [(match_operand:SI 0 "register_operand" "") ;; bool output
++ (match_operand:GPR 1 "register_operand" "") ;; val output
++ (match_operand:GPR 2 "memory_operand" "") ;; memory
++ (match_operand:GPR 3 "reg_or_0_operand" "") ;; expected value
++ (match_operand:GPR 4 "reg_or_0_operand" "") ;; desired value
++ (match_operand:SI 5 "const_int_operand" "") ;; is_weak
++ (match_operand:SI 6 "const_int_operand" "") ;; mod_s
++ (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
++ "TARGET_ATOMIC"
++{
++ emit_insn (gen_atomic_cas_value_strong<mode> (operands[1], operands[2],
++ operands[3], operands[4],
++ operands[6], operands[7]));
++
++ rtx compare = operands[1];
++ if (operands[3] != const0_rtx)
++ {
++ rtx difference = gen_rtx_MINUS (<MODE>mode, operands[1], operands[3]);
++ compare = gen_reg_rtx (<MODE>mode);
++ emit_insn (gen_rtx_SET (compare, difference));
++ }
++
++ if (word_mode != <MODE>mode)
++ {
++ rtx reg = gen_reg_rtx (word_mode);
++ emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)));
++ compare = reg;
++ }
++
++ emit_insn (gen_rtx_SET (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx)));
++ DONE;
++})
++
++(define_expand "atomic_test_and_set"
++ [(match_operand:QI 0 "register_operand" "") ;; bool output
++ (match_operand:QI 1 "memory_operand" "+A") ;; memory
++ (match_operand:SI 2 "const_int_operand" "")] ;; model
++ "TARGET_ATOMIC"
++{
++ /* We have no QImode atomics, so use the address LSBs to form a mask,
++ then use an aligned SImode atomic. */
++ rtx result = operands[0];
++ rtx mem = operands[1];
++ rtx model = operands[2];
++ rtx addr = force_reg (Pmode, XEXP (mem, 0));
++
++ rtx aligned_addr = gen_reg_rtx (Pmode);
++ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, GEN_INT (-4)));
++
++ rtx aligned_mem = change_address (mem, SImode, aligned_addr);
++ set_mem_alias_set (aligned_mem, 0);
++
++ rtx offset = gen_reg_rtx (SImode);
++ emit_move_insn (offset, gen_rtx_AND (SImode, gen_lowpart (SImode, addr),
++ GEN_INT (3)));
++
++ rtx tmp = gen_reg_rtx (SImode);
++ emit_move_insn (tmp, GEN_INT (1));
++
++ rtx shmt = gen_reg_rtx (SImode);
++ emit_move_insn (shmt, gen_rtx_ASHIFT (SImode, offset, GEN_INT (3)));
++
++ rtx word = gen_reg_rtx (SImode);
++ emit_move_insn (word, gen_rtx_ASHIFT (SImode, tmp, shmt));
++
++ tmp = gen_reg_rtx (SImode);
++ emit_insn (gen_atomic_fetch_orsi (tmp, aligned_mem, word, model));
++
++ emit_move_insn (gen_lowpart (SImode, result),
++ gen_rtx_LSHIFTRT (SImode, tmp,
++ gen_lowpart (SImode, shmt)));
++ DONE;
++})
+diff --git original-gcc/gcc/config/riscv/t-elf-multilib gcc-6.3.0/gcc/config/riscv/t-elf-multilib
+new file mode 100644
+index 00000000000..6a39ece03bd
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/t-elf-multilib
+@@ -0,0 +1,6 @@
++# This file was generated by multilib-generator with the command:
++# ./multilib-generator rv32i-ilp32--c rv32im-ilp32--c rv32iac-ilp32-- rv32imac-ilp32-- rv32imafc-ilp32f-rv32imafdc- rv64imac-lp64-- rv64imafdc-lp64d--
++MULTILIB_OPTIONS = march=rv32i/march=rv32ic/march=rv32im/march=rv32imc/march=rv32iac/march=rv32imac/march=rv32imafc/march=rv32imafdc/march=rv32gc/march=rv64imac/march=rv64imafdc/march=rv64gc mabi=ilp32/mabi=ilp32f/mabi=lp64/mabi=lp64d
++MULTILIB_DIRNAMES = rv32i rv32ic rv32im rv32imc rv32iac rv32imac rv32imafc rv32imafdc rv32gc rv64imac rv64imafdc rv64gc ilp32 ilp32f lp64 lp64d
++MULTILIB_REQUIRED = march=rv32i/mabi=ilp32 march=rv32im/mabi=ilp32 march=rv32iac/mabi=ilp32 march=rv32imac/mabi=ilp32 march=rv32imafc/mabi=ilp32f march=rv64imac/mabi=lp64 march=rv64imafdc/mabi=lp64d
++MULTILIB_REUSE = march.rv32i/mabi.ilp32=march.rv32ic/mabi.ilp32 march.rv32im/mabi.ilp32=march.rv32imc/mabi.ilp32 march.rv32imafc/mabi.ilp32f=march.rv32imafdc/mabi.ilp32f march.rv32imafc/mabi.ilp32f=march.rv32gc/mabi.ilp32f march.rv64imafdc/mabi.lp64d=march.rv64gc/mabi.lp64d
+diff --git original-gcc/gcc/config/riscv/t-linux gcc-6.3.0/gcc/config/riscv/t-linux
+new file mode 100644
+index 00000000000..216d2776a18
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/t-linux
+@@ -0,0 +1,3 @@
++# Only XLEN and ABI affect Linux multilib dir names, e.g. /lib32/ilp32d/
++MULTILIB_DIRNAMES := $(patsubst rv32%,lib32,$(patsubst rv64%,lib64,$(MULTILIB_DIRNAMES)))
++MULTILIB_OSDIRNAMES := $(patsubst lib%,../lib%,$(MULTILIB_DIRNAMES))
+diff --git original-gcc/gcc/config/riscv/t-linux-multilib gcc-6.3.0/gcc/config/riscv/t-linux-multilib
+new file mode 100644
+index 00000000000..e94d4da5212
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/t-linux-multilib
+@@ -0,0 +1,6 @@
++# This file was generated by multilib-generator with the command:
++# ./multilib-generator rv32imac-ilp32-rv32ima,rv32imaf,rv32imafd,rv32imafc,rv32imafdc- rv32imafdc-ilp32d-rv32imafd- rv64imac-lp64-rv64ima,rv64imaf,rv64imafd,rv64imafc,rv64imafdc- rv64imafdc-lp64d-rv64imafd-
++MULTILIB_OPTIONS = march=rv32imac/march=rv32ima/march=rv32imaf/march=rv32imafd/march=rv32imafc/march=rv32imafdc/march=rv32g/march=rv32gc/march=rv64imac/march=rv64ima/march=rv64imaf/march=rv64imafd/march=rv64imafc/march=rv64imafdc/march=rv64g/march=rv64gc mabi=ilp32/mabi=ilp32d/mabi=lp64/mabi=lp64d
++MULTILIB_DIRNAMES = rv32imac rv32ima rv32imaf rv32imafd rv32imafc rv32imafdc rv32g rv32gc rv64imac rv64ima rv64imaf rv64imafd rv64imafc rv64imafdc rv64g rv64gc ilp32 ilp32d lp64 lp64d
++MULTILIB_REQUIRED = march=rv32imac/mabi=ilp32 march=rv32imafdc/mabi=ilp32d march=rv64imac/mabi=lp64 march=rv64imafdc/mabi=lp64d
++MULTILIB_REUSE = march.rv32imac/mabi.ilp32=march.rv32ima/mabi.ilp32 march.rv32imac/mabi.ilp32=march.rv32imaf/mabi.ilp32 march.rv32imac/mabi.ilp32=march.rv32imafd/mabi.ilp32 march.rv32imac/mabi.ilp32=march.rv32imafc/mabi.ilp32 march.rv32imac/mabi.ilp32=march.rv32imafdc/mabi.ilp32 march.rv32imac/mabi.ilp32=march.rv32g/mabi.ilp32 march.rv32imac/mabi.ilp32=march.rv32gc/mabi.ilp32 march.rv32imafdc/mabi.ilp32d=march.rv32imafd/mabi.ilp32d march.rv32imafdc/mabi.ilp32d=march.rv32gc/mabi.ilp32d march.rv32imafdc/mabi.ilp32d=march.rv32g/mabi.ilp32d march.rv64imac/mabi.lp64=march.rv64ima/mabi.lp64 march.rv64imac/mabi.lp64=march.rv64imaf/mabi.lp64 march.rv64imac/mabi.lp64=march.rv64imafd/mabi.lp64 march.rv64imac/mabi.lp64=march.rv64imafc/mabi.lp64 march.rv64imac/mabi.lp64=march.rv64imafdc/mabi.lp64 march.rv64imac/mabi.lp64=march.rv64g/mabi.lp64 march.rv64imac/mabi.lp64=march.rv64gc/mabi.lp64 march.rv64imafdc/mabi.lp64d=march.rv64imafd/mabi.lp64d march.rv64imafdc/mabi.lp64d=march.rv64gc/mabi.lp64d march.rv64imafdc/mabi.lp64d=march.rv64g/mabi.lp64d
+diff --git original-gcc/gcc/config/riscv/t-riscv gcc-6.3.0/gcc/config/riscv/t-riscv
+new file mode 100644
+index 00000000000..0765b49f90f
+--- /dev/null
++++ gcc-6.3.0/gcc/config/riscv/t-riscv
+@@ -0,0 +1,11 @@
++riscv-builtins.o: $(srcdir)/config/riscv/riscv-builtins.c $(CONFIG_H) \
++ $(SYSTEM_H) coretypes.h $(TM_H) $(RTL_H) $(TREE_H) $(RECOG_H) langhooks.h \
++ $(DIAGNOSTIC_CORE_H) $(OPTABS_H) $(srcdir)/config/riscv/riscv-ftypes.def \
++ $(srcdir)/config/riscv/riscv-modes.def
++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
++ $(srcdir)/config/riscv/riscv-builtins.c
++
++riscv-c.o: $(srcdir)/config/riscv/riscv-c.c $(CONFIG_H) $(SYSTEM_H) \
++ coretypes.h $(TM_H) $(TREE_H) output.h $(C_COMMON_H) $(TARGET_H)
++ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) \
++ $(srcdir)/config/riscv/riscv-c.c
+diff --git original-gcc/gcc/configure gcc-6.3.0/gcc/configure
+index c9e43fb80e3..5359a4e6ee5 100755
+--- original-gcc/gcc/configure
++++ gcc-6.3.0/gcc/configure
+@@ -24156,6 +24156,17 @@ x3: .space 4
+ tls_first_minor=14
+ tls_as_opt="-a32 --fatal-warnings"
+ ;;
++ riscv*-*-*)
++ conftest_s='
++ .section .tdata,"awT",@progbits
++x: .word 2
++ .text
++ la.tls.gd a0,x
++ call __tls_get_addr'
++ tls_first_major=2
++ tls_first_minor=21
++ tls_as_opt='--fatal-warnings'
++ ;;
+ s390-*-*)
+ conftest_s='
+ .section ".tdata","awT",@progbits
+@@ -27516,8 +27527,8 @@ esac
+ # version to the per-target configury.
+ case "$cpu_type" in
+ aarch64 | alpha | arm | avr | bfin | cris | i386 | m32c | m68k | microblaze \
+- | mips | nios2 | pa | rs6000 | score | sparc | spu | tilegx | tilepro \
+- | visium | xstormy16 | xtensa)
++ | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu | tilegx \
++ | tilepro | visium | xstormy16 | xtensa)
+ insn="nop"
+ ;;
+ ia64 | s390)
+diff --git original-gcc/gcc/configure.ac gcc-6.3.0/gcc/configure.ac
+index 33f9a0ecdc6..673fb1bb891 100644
+--- original-gcc/gcc/configure.ac
++++ gcc-6.3.0/gcc/configure.ac
+@@ -3393,6 +3393,17 @@ x3: .space 4
+ tls_first_minor=14
+ tls_as_opt="-a32 --fatal-warnings"
+ ;;
++ riscv*-*-*)
++ conftest_s='
++ .section .tdata,"awT",@progbits
++x: .word 2
++ .text
++ la.tls.gd a0,x
++ call __tls_get_addr'
++ tls_first_major=2
++ tls_first_minor=21
++ tls_as_opt='--fatal-warnings'
++ ;;
+ s390-*-*)
+ conftest_s='
+ .section ".tdata","awT",@progbits
+@@ -4744,8 +4755,8 @@ esac
+ # version to the per-target configury.
+ case "$cpu_type" in
+ aarch64 | alpha | arm | avr | bfin | cris | i386 | m32c | m68k | microblaze \
+- | mips | nios2 | pa | rs6000 | score | sparc | spu | tilegx | tilepro \
+- | visium | xstormy16 | xtensa)
++ | mips | nios2 | pa | riscv | rs6000 | score | sparc | spu | tilegx \
++ | tilepro | visium | xstormy16 | xtensa)
+ insn="nop"
+ ;;
+ ia64 | s390)
+diff --git original-gcc/gcc/doc/contrib.texi gcc-6.3.0/gcc/doc/contrib.texi
+index 5554d5f04c8..5b14fc445b5 100644
+--- original-gcc/gcc/doc/contrib.texi
++++ gcc-6.3.0/gcc/doc/contrib.texi
+@@ -173,6 +173,10 @@ Denis Chertykov for contributing and maintaining the AVR port, the first GCC por
+ for an 8-bit architecture.
+
+ @item
++Kito Cheng for his work on the RISC-V port, including bringing up the test
++suite and maintiance.
++
++@item
+ Scott Christley for his Objective-C contributions.
+
+ @item
+@@ -217,6 +221,9 @@ Paul Dale for his work to add uClinux platform support to the
+ m68k backend.
+
+ @item
++Palmer Dabbelt for his work maintaining the RISC-V port.
++
++@item
+ Dario Dariol contributed the four varieties of sample programs
+ that print a copy of their source.
+
+@@ -1035,6 +1042,9 @@ associated configure steps.
+ Todd Vierling for contributions for NetBSD ports.
+
+ @item
++Andrew Waterman for contributing the RISC-V port, as well as maintaining it.
++
++@item
+ Jonathan Wakely for contributing libstdc++ Doxygen notes and XHTML
+ guidance.
+
+diff --git original-gcc/gcc/doc/install.texi gcc-6.3.0/gcc/doc/install.texi
+index bc4edfdb096..0c82fe9eb94 100644
+--- original-gcc/gcc/doc/install.texi
++++ gcc-6.3.0/gcc/doc/install.texi
+@@ -4297,6 +4297,36 @@ This configuration is intended for embedded systems.
+ @html
+ <hr />
+ @end html
++@anchor{riscv32-x-elf}
++@heading riscv32-*-elf
++The RISC-V RV32 instruction set.
++This configuration is intended for embedded systems.
++
++@html
++<hr />
++@end html
++@anchor{riscv64-x-elf}
++@heading riscv64-*-elf
++The RISC-V RV64 instruction set.
++This configuration is intended for embedded systems.
++
++@html
++<hr />
++@end html
++@anchor{riscv32-x-linux}
++@heading riscv32-*-linux
++The RISC-V RV32 instruction set running GNU/Linux.
++
++@html
++<hr />
++@end html
++@anchor{riscv64-x-linux}
++@heading riscv64-*-linux
++The RISC-V RV64 instruction set running GNU/Linux.
++
++@html
++<hr />
++@end html
+ @anchor{rx-x-elf}
+ @heading rx-*-elf
+ The Renesas RX processor. See
+diff --git original-gcc/gcc/doc/invoke.texi gcc-6.3.0/gcc/doc/invoke.texi
+index 4b13aeb7426..581c4effbc5 100644
+--- original-gcc/gcc/doc/invoke.texi
++++ gcc-6.3.0/gcc/doc/invoke.texi
+@@ -1046,6 +1046,20 @@ See RS/6000 and PowerPC Options.
+ -mstack-protector-guard-offset=@var{offset} @gol
+ -mlra -mno-lra}
+
++@emph{RISC-V Options}
++@gccoptlist{-mbranch-cost=@var{N-instruction} @gol
++-mmemcpy -mno-memcpy @gol
++-mplt -mno-plt @gol
++-mabi=@var{ABI-string} @gol
++-mfdiv -mno-fdiv @gol
++-mdiv -mno-div @gol
++-march=@var{ISA-string} @gol
++-mtune=@var{processor-string} @gol
++-msmall-data-limit=@var{N-bytes} @gol
++-msave-restore -mno-save-restore @gol
++-mcmodel=@var{code-model} @gol
++-mexplicit-relocs -mno-explicit-relocs @gol}
++
+ @emph{RX Options}
+ @gccoptlist{-m64bit-doubles -m32bit-doubles -fpu -nofpu@gol
+ -mcpu=@gol
+@@ -13881,6 +13895,7 @@ platform.
+ * PowerPC Options::
+ * RL78 Options::
+ * RS/6000 and PowerPC Options::
++* RISC-V Options::
+ * RX Options::
+ * S/390 and zSeries Options::
+ * Score Options::
+@@ -22274,6 +22289,70 @@ offset from that base register. The default for those is as specified in the
+ relevant ABI.
+ @end table
+
++@node RISC-V Options
++@subsection RISC-V Options
++@cindex RISC-V Options
++
++These command-line options are defined for RISC-V targets:
++
++@table @gcctabopt
++@item -mbranch-cost=@var{N}
++@opindex mbranch-cost
++Set the cost of branches to roughly N instructions.
++
++@item -mmemcpy
++@itemx -mno-memcpy
++@opindex mmemcpy
++Don't optimize block moves.
++
++@item -mplt
++@itemx -mno-plt
++@opindex plt
++When generating -fpic code, allow the use of PLTs. Ignored for fno-pic.
++
++@item -mabi=@var{ABI-string}
++@opindex mabi
++Specify integer and floating-point calling convention. This defaults to the
++natural calling convention: eg LP64 for RV64I, ILP32 for RV32I, LP64D for
++RV64G.
++
++@item -mfdiv
++@itemx -mno-fdiv
++@opindex mfdiv
++Use hardware floating-point divide and square root instructions. This requires
++the F or D extensions for floating-point registers.
++
++@item -mdiv
++@itemx -mno-div
++@opindex mdiv
++Use hardware instructions for integer division. This requires the M extension.
++
++@item -march=@var{ISA-string}
++@opindex march
++Generate code for given RISC-V ISA (e.g. rv64im). ISA strings must be
++lower-case. Examples include "rv64i", "rv32g", and "rv32imaf".
++
++@item -mtune=@var{processor-string}
++@opindex mtune
++Optimize the output for the given processor, specified by microarchitecture
++name.
++
++@item -msmall-data-limit=@var{N}
++@opindex msmall-data-limit
++Put global and static data smaller than @var{N} bytes into a special section
++(on some targets).
++
++@item -msave-restore
++@itemx -mno-save-restore
++@opindex msave-restore
++Use smaller but slower prologue and epilogue code.
++
++@item -mcmodel=@var{code-model}
++@opindex mcmodel
++Specify the code model.
++
++@end table
++
+ @node RX Options
+ @subsection RX Options
+ @cindex RX Options
+diff --git original-gcc/gcc/doc/md.texi gcc-6.3.0/gcc/doc/md.texi
+index 11266d7dd3f..3f710740b22 100644
+--- original-gcc/gcc/doc/md.texi
++++ gcc-6.3.0/gcc/doc/md.texi
+@@ -3362,6 +3362,26 @@ The @code{X} register.
+
+ @end table
+
++@item RISC-V---@file{config/riscv/constraints.md}
++@table @code
++
++@item f
++A floating-point register (if availiable).
++
++@item I
++An I-type 12-bit signed immediate.
++
++@item J
++Integer zero.
++
++@item K
++A 5-bit unsigned immediate for CSR access instructions.
++
++@item A
++An address that is held in a general-purpose register.
++
++@end table
++
+ @item RX---@file{config/rx/constraints.md}
+ @table @code
+ @item Q
+diff --git original-gcc/libatomic/configure.tgt gcc-6.3.0/libatomic/configure.tgt
+index 6d77c9482a5..b8af3ab2546 100644
+--- original-gcc/libatomic/configure.tgt
++++ gcc-6.3.0/libatomic/configure.tgt
+@@ -37,6 +37,7 @@ case "${target_cpu}" in
+ ARCH=alpha
+ ;;
+ rs6000 | powerpc*) ARCH=powerpc ;;
++ riscv*) ARCH=riscv ;;
+ sh*) ARCH=sh ;;
+
+ arm*)
+diff --git original-gcc/libgcc/config.host gcc-6.3.0/libgcc/config.host
+index 540bfa96358..9472a60886c 100644
+--- original-gcc/libgcc/config.host
++++ gcc-6.3.0/libgcc/config.host
+@@ -167,6 +167,9 @@ powerpc*-*-*)
+ ;;
+ rs6000*-*-*)
+ ;;
++riscv*-*-*)
++ cpu_type=riscv
++ ;;
+ sparc64*-*-*)
+ cpu_type=sparc
+ ;;
+@@ -1093,6 +1096,15 @@ powerpcle-*-eabi*)
+ tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
+ extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
+ ;;
++riscv*-*-linux*)
++ tmake_file="${tmake_file} riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
++ md_unwind_header=riscv/linux-unwind.h
++ ;;
++riscv*-*-*)
++ tmake_file="${tmake_file} riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
++ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
++ ;;
+ rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
+ md_unwind_header=rs6000/aix-unwind.h
+ tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
+diff --git original-gcc/libgcc/config/riscv/atomic.c gcc-6.3.0/libgcc/config/riscv/atomic.c
+new file mode 100644
+index 00000000000..448b0e55b5a
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/atomic.c
+@@ -0,0 +1,111 @@
++/* Legacy sub-word atomics for RISC-V.
++
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++#ifdef __riscv_atomic
++
++#include <stdbool.h>
++
++#define INVERT "not %[tmp1], %[tmp1]\n\t"
++#define DONT_INVERT ""
++
++#define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop) \
++ type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v) \
++ { \
++ unsigned long aligned_addr = ((unsigned long) p) & ~3UL; \
++ int shift = (((unsigned long) p) & 3) * 8; \
++ unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift; \
++ unsigned old, tmp1, tmp2; \
++ \
++ asm volatile ("1:\n\t" \
++ "lr.w.aq %[old], %[mem]\n\t" \
++ #insn " %[tmp1], %[old], %[value]\n\t" \
++ invert \
++ "and %[tmp1], %[tmp1], %[mask]\n\t" \
++ "and %[tmp2], %[old], %[not_mask]\n\t" \
++ "or %[tmp2], %[tmp2], %[tmp1]\n\t" \
++ "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t" \
++ "bnez %[tmp1], 1b" \
++ : [old] "=&r" (old), \
++ [mem] "+A" (*(volatile unsigned*) aligned_addr), \
++ [tmp1] "=&r" (tmp1), \
++ [tmp2] "=&r" (tmp2) \
++ : [value] "r" (((unsigned) v) << shift), \
++ [mask] "r" (mask), \
++ [not_mask] "r" (~mask)); \
++ \
++ return (type) (old >> shift); \
++ } \
++ \
++ type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v) \
++ { \
++ type o = __sync_fetch_and_ ## opname ## _ ## size (p, v); \
++ return cop; \
++ }
++
++#define GENERATE_COMPARE_AND_SWAP(type, size) \
++ type __sync_val_compare_and_swap_ ## size (type *p, type o, type n) \
++ { \
++ unsigned long aligned_addr = ((unsigned long) p) & ~3UL; \
++ int shift = (((unsigned long) p) & 3) * 8; \
++ unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift; \
++ unsigned old, tmp1; \
++ \
++ asm volatile ("1:\n\t" \
++ "lr.w.aq %[old], %[mem]\n\t" \
++ "and %[tmp1], %[old], %[mask]\n\t" \
++ "bne %[tmp1], %[o], 1f\n\t" \
++ "and %[tmp1], %[old], %[not_mask]\n\t" \
++ "or %[tmp1], %[tmp1], %[n]\n\t" \
++ "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t" \
++ "bnez %[tmp1], 1b\n\t" \
++ "1:" \
++ : [old] "=&r" (old), \
++ [mem] "+A" (*(volatile unsigned*) aligned_addr), \
++ [tmp1] "=&r" (tmp1) \
++ : [o] "r" ((((unsigned) o) << shift) & mask), \
++ [n] "r" ((((unsigned) n) << shift) & mask), \
++ [mask] "r" (mask), \
++ [not_mask] "r" (~mask)); \
++ \
++ return (type) (old >> shift); \
++ } \
++ bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n) \
++ { \
++ return __sync_val_compare_and_swap(p, o, n) == o; \
++ }
++
++#define GENERATE_ALL(type, size) \
++ GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v) \
++ GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v) \
++ GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v) \
++ GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v) \
++ GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v) \
++ GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v)) \
++ GENERATE_COMPARE_AND_SWAP(type, size)
++
++GENERATE_ALL(unsigned char, 1)
++GENERATE_ALL(unsigned short, 2)
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/crti.S gcc-6.3.0/libgcc/config/riscv/crti.S
+new file mode 100644
+index 00000000000..89bac706c63
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/crti.S
+@@ -0,0 +1 @@
++/* crti.S is empty because .init_array/.fini_array are used exclusively. */
+diff --git original-gcc/libgcc/config/riscv/crtn.S gcc-6.3.0/libgcc/config/riscv/crtn.S
+new file mode 100644
+index 00000000000..ca6ee7b6fba
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/crtn.S
+@@ -0,0 +1 @@
++/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
+diff --git original-gcc/libgcc/config/riscv/div.S gcc-6.3.0/libgcc/config/riscv/div.S
+new file mode 100644
+index 00000000000..63d542e846c
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/div.S
+@@ -0,0 +1,146 @@
++/* Integer division routines for RISC-V.
++
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++ .text
++ .align 2
++
++#if __riscv_xlen == 32
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __udivdi3 __udivsi3
++# define __umoddi3 __umodsi3
++# define __divdi3 __divsi3
++# define __moddi3 __modsi3
++#else
++ .globl __udivsi3
++__udivsi3:
++ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
++ sll a0, a0, 32
++ sll a1, a1, 32
++ move t0, ra
++ jal __udivdi3
++ sext.w a0, a0
++ jr t0
++
++ .globl __umodsi3
++__umodsi3:
++ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
++ sll a0, a0, 32
++ sll a1, a1, 32
++ srl a0, a0, 32
++ srl a1, a1, 32
++ move t0, ra
++ jal __udivdi3
++ sext.w a0, a1
++ jr t0
++
++ .globl __modsi3
++ __modsi3 = __moddi3
++
++ .globl __divsi3
++__divsi3:
++ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
++ li t0, -1
++ beq a1, t0, .L20
++#endif
++
++ .globl __divdi3
++__divdi3:
++ bltz a0, .L10
++ bltz a1, .L11
++ /* Since the quotient is positive, fall into __udivdi3. */
++
++ .globl __udivdi3
++__udivdi3:
++ mv a2, a1
++ mv a1, a0
++ li a0, -1
++ beqz a2, .L5
++ li a3, 1
++ bgeu a2, a1, .L2
++.L1:
++ blez a2, .L2
++ slli a2, a2, 1
++ slli a3, a3, 1
++ bgtu a1, a2, .L1
++.L2:
++ li a0, 0
++.L3:
++ bltu a1, a2, .L4
++ sub a1, a1, a2
++ or a0, a0, a3
++.L4:
++ srli a3, a3, 1
++ srli a2, a2, 1
++ bnez a3, .L3
++.L5:
++ ret
++
++ .globl __umoddi3
++__umoddi3:
++ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
++ move t0, ra
++ jal __udivdi3
++ move a0, a1
++ jr t0
++
++ /* Handle negative arguments to __divdi3. */
++.L10:
++ neg a0, a0
++ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
++ neg a1, a1
++ j __udivdi3 /* Compute __udivdi3(-a0, -a1). */
++.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
++ neg a1, a1
++.L12:
++ move t0, ra
++ jal __udivdi3
++ neg a0, a0
++ jr t0
++
++ .globl __moddi3
++__moddi3:
++ move t0, ra
++ bltz a1, .L31
++ bltz a0, .L32
++.L30:
++ jal __udivdi3 /* The dividend is not negative. */
++ move a0, a1
++ jr t0
++.L31:
++ neg a1, a1
++ bgez a0, .L30
++.L32:
++ neg a0, a0
++ jal __udivdi3 /* The dividend is hella negative. */
++ neg a0, a1
++ jr t0
++
++#if __riscv_xlen == 64
++ /* continuation of __divsi3 */
++.L20:
++ sll t0, t0, 31
++ bne a0, t0, __divdi3
++ ret
++#endif
+diff --git original-gcc/libgcc/config/riscv/linux-unwind.h gcc-6.3.0/libgcc/config/riscv/linux-unwind.h
+new file mode 100644
+index 00000000000..a051a2869d4
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/linux-unwind.h
+@@ -0,0 +1,89 @@
++/* Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++ This file is free software; you can redistribute it and/or modify it
++ under the terms of the GNU General Public License as published by the
++ Free Software Foundation; either version 3, or (at your option) any
++ later version.
++
++ This file is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ Under Section 7 of GPL version 3, you are granted additional
++ permissions described in the GCC Runtime Library Exception, version
++ 3.1, as published by the Free Software Foundation.
++
++ You should have received a copy of the GNU General Public License and
++ a copy of the GCC Runtime Library Exception along with this program;
++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++ <http://www.gnu.org/licenses/>. */
++
++#ifndef inhibit_libc
++
++#include <signal.h>
++#include <stdint.h>
++#include <sys/ucontext.h>
++
++#define LI_A7_8B 0x08b00893
++#define ECALL 0x00000073
++
++#define MD_FALLBACK_FRAME_STATE_FOR riscv_fallback_frame_state
++
++static _Unwind_Reason_Code
++riscv_fallback_frame_state (struct _Unwind_Context *context,
++ _Unwind_FrameState * fs)
++{
++ /* The kernel creates an rt_sigframe on the stack immediately prior
++ to delivering a signal.
++
++ This structure must have the same shape as the linux kernel
++ equivalent. */
++ struct rt_sigframe
++ {
++ siginfo_t info;
++ struct ucontext uc;
++ };
++
++ struct rt_sigframe *rt_;
++ _Unwind_Ptr new_cfa;
++ uint16_t *pc = context->ra;
++ struct sigcontext *sc;
++ int i;
++
++ /* A signal frame will have a return address pointing to
++ __default_sa_restorer. This code is hardwired as:
++
++ 0x08b00893 li a7,0x8b
++ 0x00000073 ecall
++
++ Note, the PC might only have 2-byte alignment.
++ */
++ if (pc[0] != (uint16_t)LI_A7_8B || pc[1] != (uint16_t)(LI_A7_8B >> 16)
++ || pc[2] != (uint16_t)ECALL || pc[3] != (uint16_t)(ECALL >> 16))
++ return _URC_END_OF_STACK;
++
++ rt_ = context->cfa;
++ sc = &rt_->uc.uc_mcontext;
++
++ new_cfa = (_Unwind_Ptr) sc;
++ fs->regs.cfa_how = CFA_REG_OFFSET;
++ fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__;
++ fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa;
++
++ for (i = 0; i < 32; i++)
++ {
++ fs->regs.reg[i].how = REG_SAVED_OFFSET;
++ fs->regs.reg[i].loc.offset = (_Unwind_Ptr) &sc->gregs[i] - new_cfa;
++ }
++
++ fs->signal_frame = 1;
++ fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__;
++ fs->regs.reg[fs->retaddr_column].how = REG_SAVED_VAL_OFFSET;
++ fs->regs.reg[fs->retaddr_column].loc.offset =
++ (_Unwind_Ptr) sc->gregs[0] - new_cfa;
++
++ return _URC_NO_REASON;
++}
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/muldi3.S gcc-6.3.0/libgcc/config/riscv/muldi3.S
+new file mode 100644
+index 00000000000..eb3d9b0df3d
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/muldi3.S
+@@ -0,0 +1,46 @@
++/* Integer multiplication routines for RISC-V.
++
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++ .text
++ .align 2
++
++#if __riscv_xlen == 32
++/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
++# define __muldi3 __mulsi3
++#endif
++
++ .globl __muldi3
++__muldi3:
++ mv a2, a0
++ li a0, 0
++.L1:
++ andi a3, a1, 1
++ beqz a3, .L2
++ add a0, a0, a2
++.L2:
++ srli a1, a1, 1
++ slli a2, a2, 1
++ bnez a1, .L1
++ ret
+diff --git original-gcc/libgcc/config/riscv/multi3.S gcc-6.3.0/libgcc/config/riscv/multi3.S
+new file mode 100644
+index 00000000000..4d454e65013
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/multi3.S
+@@ -0,0 +1,81 @@
++/* Integer multiplication routines for RISC-V.
++
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++ .text
++ .align 2
++
++#if __riscv_xlen == 32
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __multi3 __muldi3
++#endif
++
++ .globl __multi3
++__multi3:
++
++#if __riscv_xlen == 32
++/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
++# define __muldi3 __mulsi3
++#endif
++
++/* We rely on the fact that __muldi3 doesn't clobber the t-registers. */
++
++ mv t0, ra
++ mv t5, a0
++ mv a0, a1
++ mv t6, a3
++ mv a1, t5
++ mv a4, a2
++ li a5, 0
++ li t2, 0
++ li t4, 0
++.L1:
++ add a6, t2, a1
++ andi t3, a4, 1
++ slli a7, a5, 1
++ slti t1, a1, 0
++ srli a4, a4, 1
++ add a5, t4, a5
++ beqz t3, .L2
++ sltu t3, a6, t2
++ mv t2, a6
++ add t4, t3, a5
++.L2:
++ slli a1, a1, 1
++ or a5, t1, a7
++ bnez a4, .L1
++ beqz a0, .L3
++ mv a1, a2
++ call __muldi3
++ add t4, t4, a0
++.L3:
++ beqz t6, .L4
++ mv a1, t6
++ mv a0, t5
++ call __muldi3
++ add t4, t4, a0
++.L4:
++ mv a0, t2
++ mv a1, t4
++ jr t0
+diff --git original-gcc/libgcc/config/riscv/save-restore.S gcc-6.3.0/libgcc/config/riscv/save-restore.S
+new file mode 100644
+index 00000000000..2073a73089b
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/save-restore.S
+@@ -0,0 +1,463 @@
++/* Callee-saved register spill and fill routines for RISC-V.
++
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++ .text
++
++ .globl __riscv_save_12
++ .globl __riscv_save_11
++ .globl __riscv_save_10
++ .globl __riscv_save_9
++ .globl __riscv_save_8
++ .globl __riscv_save_7
++ .globl __riscv_save_6
++ .globl __riscv_save_5
++ .globl __riscv_save_4
++ .globl __riscv_save_3
++ .globl __riscv_save_2
++ .globl __riscv_save_1
++ .globl __riscv_save_0
++
++ .globl __riscv_restore_12
++ .globl __riscv_restore_11
++ .globl __riscv_restore_10
++ .globl __riscv_restore_9
++ .globl __riscv_restore_8
++ .globl __riscv_restore_7
++ .globl __riscv_restore_6
++ .globl __riscv_restore_5
++ .globl __riscv_restore_4
++ .globl __riscv_restore_3
++ .globl __riscv_restore_2
++ .globl __riscv_restore_1
++ .globl __riscv_restore_0
++
++#if __riscv_xlen == 64
++
++__riscv_save_12:
++ .cfi_startproc
++ # __riscv_save_* routine use t0/x5 as return address
++ .cfi_return_column 5
++ addi sp, sp, -112
++ .cfi_def_cfa_offset 112
++ li t1, 0
++ sd s11, 8(sp)
++ .cfi_offset 27, -104
++ j .Ls10
++
++__riscv_save_11:
++__riscv_save_10:
++ .cfi_restore 27
++ addi sp, sp, -112
++ .cfi_def_cfa_offset 112
++ li t1, -16
++.Ls10:
++ sd s10, 16(sp)
++ .cfi_offset 26, -96
++ sd s9, 24(sp)
++ .cfi_offset 25, -88
++ j .Ls8
++
++__riscv_save_9:
++__riscv_save_8:
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ addi sp, sp, -112
++ .cfi_def_cfa_offset 112
++ li t1, -32
++.Ls8:
++ sd s8, 32(sp)
++ .cfi_offset 24, -80
++ sd s7, 40(sp)
++ .cfi_offset 23, -72
++ j .Ls6
++
++__riscv_save_7:
++__riscv_save_6:
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ addi sp, sp, -112
++ .cfi_def_cfa_offset 112
++ li t1, -48
++.Ls6:
++ sd s6, 48(sp)
++ .cfi_offset 22, -64
++ sd s5, 56(sp)
++ .cfi_offset 21, -56
++ j .Ls4
++
++__riscv_save_5:
++__riscv_save_4:
++ .cfi_restore 21
++ .cfi_restore 22
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ addi sp, sp, -112
++ .cfi_def_cfa_offset 112
++ li t1, -64
++.Ls4:
++ sd s4, 64(sp)
++ .cfi_offset 20, -48
++ sd s3, 72(sp)
++ .cfi_offset 19, -40
++ j .Ls2
++
++__riscv_save_3:
++__riscv_save_2:
++ .cfi_restore 19
++ .cfi_restore 20
++ .cfi_restore 21
++ .cfi_restore 22
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ addi sp, sp, -112
++ .cfi_def_cfa_offset 112
++ li t1, -80
++.Ls2:
++ sd s2, 80(sp)
++ .cfi_offset 18, -32
++ sd s1, 88(sp)
++ .cfi_offset 9, -24
++ sd s0, 96(sp)
++ .cfi_offset 8, -16
++ sd ra, 104(sp)
++ .cfi_offset 1, -8
++ # CFA info is not correct in next 2 instruction since t1's
++ # value is depend on how may register really save.
++ sub sp, sp, t1
++ jr t0
++ .cfi_endproc
++
++__riscv_save_1:
++__riscv_save_0:
++ .cfi_startproc
++ # __riscv_save_* routine use t0/x5 as return address
++ .cfi_return_column 5
++ addi sp, sp, -16
++ .cfi_def_cfa_offset 16
++ sd s0, 0(sp)
++ .cfi_offset 8, -16
++ sd ra, 8(sp)
++ .cfi_offset 1, -8
++ jr t0
++ .cfi_endproc
++
++__riscv_restore_12:
++ .cfi_startproc
++ .cfi_def_cfa_offset 112
++ .cfi_offset 27, -104
++ .cfi_offset 26, -96
++ .cfi_offset 25, -88
++ .cfi_offset 24, -80
++ .cfi_offset 23, -72
++ .cfi_offset 22, -64
++ .cfi_offset 21, -56
++ .cfi_offset 20, -48
++ .cfi_offset 19, -40
++ .cfi_offset 18, -32
++ .cfi_offset 9, -24
++ .cfi_offset 8, -16
++ .cfi_offset 1, -8
++ ld s11, 8(sp)
++ .cfi_restore 27
++ addi sp, sp, 16
++
++__riscv_restore_11:
++__riscv_restore_10:
++ .cfi_restore 27
++ .cfi_def_cfa_offset 96
++ ld s10, 0(sp)
++ .cfi_restore 26
++ ld s9, 8(sp)
++ .cfi_restore 25
++ addi sp, sp, 16
++
++__riscv_restore_9:
++__riscv_restore_8:
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 80
++ ld s8, 0(sp)
++ .cfi_restore 24
++ ld s7, 8(sp)
++ .cfi_restore 23
++ addi sp, sp, 16
++
++__riscv_restore_7:
++__riscv_restore_6:
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 64
++ ld s6, 0(sp)
++ .cfi_restore 22
++ ld s5, 8(sp)
++ .cfi_restore 21
++ addi sp, sp, 16
++
++__riscv_restore_5:
++__riscv_restore_4:
++ .cfi_restore 21
++ .cfi_restore 22
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 48
++ ld s4, 0(sp)
++ .cfi_restore 20
++ ld s3, 8(sp)
++ .cfi_restore 19
++ addi sp, sp, 16
++
++__riscv_restore_3:
++__riscv_restore_2:
++ .cfi_restore 19
++ .cfi_restore 20
++ .cfi_restore 21
++ .cfi_restore 22
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 32
++ ld s2, 0(sp)
++ .cfi_restore 18
++ ld s1, 8(sp)
++ .cfi_restore 9
++ addi sp, sp, 16
++
++__riscv_restore_1:
++__riscv_restore_0:
++ .cfi_restore 9
++ .cfi_restore 18
++ .cfi_restore 19
++ .cfi_restore 20
++ .cfi_restore 21
++ .cfi_restore 22
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 16
++ ld s0, 0(sp)
++ .cfi_restore 8
++ ld ra, 8(sp)
++ .cfi_restore 1
++ addi sp, sp, 16
++ .cfi_def_cfa_offset 0
++ ret
++ .cfi_endproc
++
++#else
++
++__riscv_save_12:
++ .cfi_startproc
++ # __riscv_save_* routine use t0/x5 as return address
++ .cfi_return_column 5
++ addi sp, sp, -64
++ .cfi_def_cfa_offset 64
++ li t1, 0
++ sw s11, 12(sp)
++ .cfi_offset 27, -52
++ j .Ls10
++
++__riscv_save_11:
++__riscv_save_10:
++__riscv_save_9:
++__riscv_save_8:
++ .cfi_restore 27
++ addi sp, sp, -64
++ .cfi_def_cfa_offset 64
++ li t1, -16
++.Ls10:
++ sw s10, 16(sp)
++ .cfi_offset 26, -48
++ sw s9, 20(sp)
++ .cfi_offset 25, -44
++ sw s8, 24(sp)
++ .cfi_offset 24, -40
++ sw s7, 28(sp)
++ .cfi_offset 23, -36
++ j .Ls6
++
++__riscv_save_7:
++__riscv_save_6:
++__riscv_save_5:
++__riscv_save_4:
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ addi sp, sp, -64
++ .cfi_def_cfa_offset 64
++ li t1, -32
++.Ls6:
++ sw s6, 32(sp)
++ .cfi_offset 22, -32
++ sw s5, 36(sp)
++ .cfi_offset 21, -28
++ sw s4, 40(sp)
++ .cfi_offset 20, -24
++ sw s3, 44(sp)
++ .cfi_offset 19, -20
++ sw s2, 48(sp)
++ .cfi_offset 18, -16
++ sw s1, 52(sp)
++ .cfi_offset 9, -12
++ sw s0, 56(sp)
++ .cfi_offset 8, -8
++ sw ra, 60(sp)
++ .cfi_offset 1, -4
++ # CFA info is not correct in next 2 instruction since t1's
++ # value is depend on how may register really save.
++ sub sp, sp, t1
++ jr t0
++ .cfi_endproc
++
++__riscv_save_3:
++__riscv_save_2:
++__riscv_save_1:
++__riscv_save_0:
++ .cfi_startproc
++ # __riscv_save_* routine use t0/x5 as return address
++ .cfi_return_column 5
++ addi sp, sp, -16
++ .cfi_def_cfa_offset 16
++ sw s2, 0(sp)
++ sw s1, 4(sp)
++ .cfi_offset 9, -16
++ sw s0, 8(sp)
++ .cfi_offset 8, -8
++ sw ra, 12(sp)
++ .cfi_offset 1, -4
++ jr t0
++ .cfi_endproc
++
++__riscv_restore_12:
++ .cfi_startproc
++ .cfi_def_cfa_offset 64
++ .cfi_offset 27, -52
++ .cfi_offset 26, -48
++ .cfi_offset 25, -44
++ .cfi_offset 24, -40
++ .cfi_offset 23, -36
++ .cfi_offset 22, -32
++ .cfi_offset 21, -28
++ .cfi_offset 20, -24
++ .cfi_offset 19, -20
++ .cfi_offset 18, -16
++ .cfi_offset 9, -12
++ .cfi_offset 8, -8
++ .cfi_offset 1, -4
++ lw s11, 12(sp)
++ .cfi_restore 27
++ addi sp, sp, 16
++
++__riscv_restore_11:
++__riscv_restore_10:
++__riscv_restore_9:
++__riscv_restore_8:
++ .cfi_restore 27
++ .cfi_def_cfa_offset 48
++ lw s10, 0(sp)
++ .cfi_restore 26
++ lw s9, 4(sp)
++ .cfi_restore 25
++ lw s8, 8(sp)
++ .cfi_restore 24
++ lw s7, 12(sp)
++ .cfi_restore 23
++ addi sp, sp, 16
++
++__riscv_restore_7:
++__riscv_restore_6:
++__riscv_restore_5:
++__riscv_restore_4:
++ .cfi_restore 23
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 32
++ lw s6, 0(sp)
++ .cfi_restore 22
++ lw s5, 4(sp)
++ .cfi_restore 21
++ lw s4, 8(sp)
++ .cfi_restore 20
++ lw s3, 12(sp)
++ .cfi_restore 19
++ addi sp, sp, 16
++
++__riscv_restore_3:
++__riscv_restore_2:
++__riscv_restore_1:
++__riscv_restore_0:
++ .cfi_restore 19
++ .cfi_restore 20
++ .cfi_restore 21
++ .cfi_restore 22
++ .cfi_restore 24
++ .cfi_restore 25
++ .cfi_restore 26
++ .cfi_restore 27
++ .cfi_def_cfa_offset 16
++ lw s2, 0(sp)
++ .cfi_restore 18
++ lw s1, 4(sp)
++ .cfi_restore 9
++ lw s0, 8(sp)
++ .cfi_restore 8
++ lw ra, 12(sp)
++ .cfi_restore 1
++ addi sp, sp, 16
++ .cfi_def_cfa_offset 0
++ ret
++ .cfi_endproc
++
++#endif
+diff --git original-gcc/libgcc/config/riscv/sfp-machine.h gcc-6.3.0/libgcc/config/riscv/sfp-machine.h
+new file mode 100644
+index 00000000000..b1a27e7ed44
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/sfp-machine.h
+@@ -0,0 +1,137 @@
++/* Software floating-point machine description for RISC-V.
++
++ Copyright (C) 2016-2017 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
++<http://www.gnu.org/licenses/>. */
++
++#if __riscv_xlen == 32
++
++#define _FP_W_TYPE_SIZE 32
++#define _FP_W_TYPE unsigned long
++#define _FP_WS_TYPE signed long
++#define _FP_I_TYPE long
++
++#define _FP_MUL_MEAT_S(R,X,Y) \
++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_D(R,X,Y) \
++ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_Q(R,X,Y) \
++ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
++
++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
++
++#define _FP_NANFRAC_S _FP_QNANBIT_S
++#define _FP_NANFRAC_D _FP_QNANBIT_D, 0
++#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0, 0, 0
++
++#else
++
++#define _FP_W_TYPE_SIZE 64
++#define _FP_W_TYPE unsigned long long
++#define _FP_WS_TYPE signed long long
++#define _FP_I_TYPE long long
++
++#define _FP_MUL_MEAT_S(R,X,Y) \
++ _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
++#define _FP_MUL_MEAT_D(R,X,Y) \
++ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
++#define _FP_MUL_MEAT_Q(R,X,Y) \
++ _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
++
++#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
++#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
++#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
++
++#define _FP_NANFRAC_S _FP_QNANBIT_S
++#define _FP_NANFRAC_D _FP_QNANBIT_D
++#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0
++
++#endif
++
++#if __riscv_xlen == 64
++typedef int TItype __attribute__ ((mode (TI)));
++typedef unsigned int UTItype __attribute__ ((mode (TI)));
++#define TI_BITS (__CHAR_BIT__ * (int)sizeof(TItype))
++#endif
++
++/* The type of the result of a floating point comparison. This must
++ match __libgcc_cmp_return__ in GCC for the target. */
++typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
++#define CMPtype __gcc_CMPtype
++
++#define _FP_NANSIGN_S 0
++#define _FP_NANSIGN_D 0
++#define _FP_NANSIGN_Q 0
++
++#define _FP_KEEPNANFRACP 0
++#define _FP_QNANNEGATEDP 0
++
++#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
++ do { \
++ R##_s = _FP_NANSIGN_##fs; \
++ _FP_FRAC_SET_##wc(R,_FP_NANFRAC_##fs); \
++ R##_c = FP_CLS_NAN; \
++ } while (0)
++
++#define _FP_DECL_EX int _frm __attribute__ ((unused));
++#define FP_ROUNDMODE _frm
++
++#define FP_RND_NEAREST 0x0
++#define FP_RND_ZERO 0x1
++#define FP_RND_PINF 0x3
++#define FP_RND_MINF 0x2
++
++#define FP_EX_INVALID 0x10
++#define FP_EX_OVERFLOW 0x04
++#define FP_EX_UNDERFLOW 0x02
++#define FP_EX_DIVZERO 0x08
++#define FP_EX_INEXACT 0x01
++
++#define _FP_TININESS_AFTER_ROUNDING 1
++
++#ifdef __riscv_flen
++#define FP_INIT_ROUNDMODE \
++do { \
++ __asm__ volatile ("frrm %0" : "=r" (_frm)); \
++} while (0)
++
++#define FP_HANDLE_EXCEPTIONS \
++do { \
++ if (__builtin_expect (_fex, 0)) \
++ __asm__ volatile ("csrs fflags, %0" : : "rK" (_fex)); \
++} while (0)
++#else
++#define FP_INIT_ROUNDMODE _frm = FP_RND_NEAREST
++#endif
++
++#define __LITTLE_ENDIAN 1234
++#define __BIG_ENDIAN 4321
++
++#define __BYTE_ORDER __LITTLE_ENDIAN
++
++
++/* Define ALIASNAME as a strong alias for NAME. */
++# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
++# define _strong_alias(name, aliasname) \
++ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+diff --git original-gcc/libgcc/config/riscv/t-elf gcc-6.3.0/libgcc/config/riscv/t-elf
+new file mode 100644
+index 00000000000..01d5ebaa417
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/t-elf
+@@ -0,0 +1,6 @@
++LIB2ADD += $(srcdir)/config/riscv/save-restore.S \
++ $(srcdir)/config/riscv/muldi3.S \
++ $(srcdir)/config/riscv/multi3.S \
++ $(srcdir)/config/riscv/div.S \
++ $(srcdir)/config/riscv/atomic.c \
++
+diff --git original-gcc/libgcc/config/riscv/t-elf32 gcc-6.3.0/libgcc/config/riscv/t-elf32
+new file mode 100644
+index 00000000000..f3751234d55
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/t-elf32
+@@ -0,0 +1 @@
++LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
+diff --git original-gcc/libgcc/config/riscv/t-elf64 gcc-6.3.0/libgcc/config/riscv/t-elf64
+new file mode 100644
+index 00000000000..f3751234d55
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/t-elf64
+@@ -0,0 +1 @@
++LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
+diff --git original-gcc/libgcc/config/riscv/t-softfp32 gcc-6.3.0/libgcc/config/riscv/t-softfp32
+new file mode 100644
+index 00000000000..1bd51e803d1
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/t-softfp32
+@@ -0,0 +1,26 @@
++ABI_SINGLE:=$(findstring __riscv_float_abi_single,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
++ABI_DOUBLE:=$(findstring __riscv_float_abi_double,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
++ABI_QUAD:=$(findstring __riscv_float_abi_quad,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
++
++softfp_int_modes := si di
++softfp_exclude_libgcc2 := n
++
++ifndef ABI_QUAD
++ifdef ABI_DOUBLE
++
++softfp_float_modes := tf
++softfp_extensions := sftf dftf
++softfp_truncations := tfsf tfdf
++
++else
++
++softfp_float_modes := df tf
++softfp_extensions := sfdf sftf dftf
++softfp_truncations := dfsf tfsf tfdf
++
++ifndef ABI_SINGLE
++softfp_float_modes += sf
++endif
++
++endif
++endif
+diff --git original-gcc/libgcc/config/riscv/t-softfp64 gcc-6.3.0/libgcc/config/riscv/t-softfp64
+new file mode 100644
+index 00000000000..75870951202
+--- /dev/null
++++ gcc-6.3.0/libgcc/config/riscv/t-softfp64
+@@ -0,0 +1,3 @@
++include $(srcdir)/config/riscv/t-softfp32
++
++softfp_int_modes += ti