diff options
Diffstat (limited to 'src/arch/arm/isa/formats/util.isa')
-rw-r--r-- | src/arch/arm/isa/formats/util.isa | 217 |
1 files changed, 217 insertions, 0 deletions
diff --git a/src/arch/arm/isa/formats/util.isa b/src/arch/arm/isa/formats/util.isa new file mode 100644 index 000000000..ee6339c02 --- /dev/null +++ b/src/arch/arm/isa/formats/util.isa @@ -0,0 +1,217 @@ +// -*- mode:c++ -*- + +// Copyright (c) 2007-2008 The Florida State University +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer; +// redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution; +// neither the name of the copyright holders nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Stephen Hines + +let {{ + +# Generic substitutions for Arm instructions +def ArmGenericCodeSubs(code): + # Substitute in the shifted portion of operations + new_code = re.sub(r'Rm_Imm', 'shift_rm_imm(Rm, shift_size, shift, Cpsr<29:>)', code) + new_code = re.sub(r'Rm_Rs', 'shift_rm_rs(Rm, Rs, shift, Cpsr<29:>)', new_code) + new_code = re.sub(r'^', 'Cpsr = Cpsr;', new_code) + return new_code + +def LoadStoreBase(name, Name, ea_code, memacc_code, mem_flags, inst_flags, + postacc_code = '', base_class = 'Memory', + decode_template = BasicDecode, exec_template_base = ''): + # Make sure flags are in lists (convert to lists if not). + mem_flags = makeList(mem_flags) + inst_flags = makeList(inst_flags) + + # add hook to get effective addresses into execution trace output. + ea_code += '\nif (traceData) { traceData->setAddr(EA); }\n' + + # Some CPU models execute the memory operation as an atomic unit, + # while others want to separate them into an effective address + # computation and a memory access operation. As a result, we need + # to generate three StaticInst objects. Note that the latter two + # are nested inside the larger "atomic" one. + + # Generate InstObjParams for each of the three objects. Note that + # they differ only in the set of code objects contained (which in + # turn affects the object's overall operand list). + iop = InstObjParams(name, Name, base_class, + { 'ea_code':ea_code, 'memacc_code':memacc_code, 'postacc_code':postacc_code }, + inst_flags) + ea_iop = InstObjParams(name, Name, base_class, + { 'ea_code':ea_code }, + inst_flags) + memacc_iop = InstObjParams(name, Name, base_class, + { 'memacc_code':memacc_code, 'postacc_code':postacc_code }, + inst_flags) + + if mem_flags: + s = '\n\tmemAccessFlags = ' + string.join(mem_flags, '|') + ';' + iop.constructor += s + memacc_iop.constructor += s + + # select templates + + # The InitiateAcc template is the same for StoreCond templates as the + # corresponding Store template.. + StoreCondInitiateAcc = StoreInitiateAcc + + memAccExecTemplate = eval(exec_template_base + 'MemAccExecute') + fullExecTemplate = eval(exec_template_base + 'Execute') + initiateAccTemplate = eval(exec_template_base + 'InitiateAcc') + completeAccTemplate = eval(exec_template_base + 'CompleteAcc') + + # (header_output, decoder_output, decode_block, exec_output) + return (LoadStoreDeclare.subst(iop), + EACompConstructor.subst(ea_iop) + + MemAccConstructor.subst(memacc_iop) + + LoadStoreConstructor.subst(iop), + decode_template.subst(iop), + EACompExecute.subst(ea_iop) + + memAccExecTemplate.subst(memacc_iop) + + fullExecTemplate.subst(iop) + + initiateAccTemplate.subst(iop) + + completeAccTemplate.subst(iop)) +}}; + + +output header {{ + std::string inst2string(MachInst machInst); + StaticInstPtr gen_ldrstr_uop(uint32_t baseinst, int loadop, uint32_t rd, int32_t disp); + int emit_ldfstf_uops(StaticInstPtr* microOps, int index, uint32_t baseinst, int loadop, int up, int32_t disp); +}}; + +output decoder {{ + + std::string inst2string(MachInst machInst) + { + std::string str = ""; + uint32_t mask = 0x80000000; + + for(int i=0; i < 32; i++) { + if ((machInst & mask) == 0) { + str += "0"; + } else { + str += "1"; + } + + mask = mask >> 1; + } + + return str; + } + + // Generate the bit pattern for an Ldr_uop or Str_uop; + StaticInstPtr + gen_ldrstr_uop(uint32_t baseinst, int loadop, uint32_t rd, int32_t disp) + { + StaticInstPtr newInst; + uint32_t newMachInst = baseinst & 0xffff0000; + newMachInst |= (rd << 12); + newMachInst |= disp; + if (loadop) + newInst = new Ldr_uop(newMachInst); + else + newInst = new Str_uop(newMachInst); + return newInst; + } + + // Emits uops for a double fp move + int + emit_ldfstf_uops(StaticInstPtr* microOps, int index, uint32_t baseinst, int loadop, int up, int32_t disp) + { + StaticInstPtr newInst; + uint32_t newMachInst; + + if (loadop) + { + newMachInst = baseinst & 0xfffff000; + newMachInst |= (disp & 0x0fff); + newInst = new Ldlo_uop(newMachInst); + microOps[index++] = newInst; + + newMachInst = baseinst & 0xfffff000; + if (up) + newMachInst |= ((disp + 4) & 0x0fff); + else + newMachInst |= ((disp - 4) & 0x0fff); + newInst = new Ldhi_uop(newMachInst); + microOps[index++] = newInst; + + newMachInst = baseinst & 0xf000f000; + newInst = new Mvtd_uop(newMachInst); + microOps[index++] = newInst; + } + else + { + newMachInst = baseinst & 0xf000f000; + newInst = new Mvfd_uop(newMachInst); + microOps[index++] = newInst; + + newMachInst = baseinst & 0xfffff000; + newMachInst |= (disp & 0x0fff); + newInst = new Stlo_uop(newMachInst); + microOps[index++] = newInst; + + newMachInst = baseinst & 0xfffff000; + if (up) + newMachInst |= ((disp + 4) & 0x0fff); + else + newMachInst |= ((disp - 4) & 0x0fff); + newInst = new Sthi_uop(newMachInst); + microOps[index++] = newInst; + } + return 3; + } + +}}; + +output exec {{ + + using namespace ArmISA; + + /// CLEAR ALL CPU INST/EXE HAZARDS + inline void + clear_exe_inst_hazards() + { + //CODE HERE + } + +#if FULL_SYSTEM + inline Fault checkFpEnableFault(%(CPU_exec_context)s *xc) + { + return NoFault; + } +#else + inline Fault checkFpEnableFault(%(CPU_exec_context)s *xc) + { + return NoFault; + } +#endif + + +}}; + + |