From ae51f41d14f548d494ac41e0d21137c5a4c3f59c Mon Sep 17 00:00:00 2001 From: Iru Cai Date: Wed, 30 Oct 2019 14:21:52 +0800 Subject: import the U-Boot code and make it compile --- include/linux/apm_bios.h | 211 ++++ include/linux/arm-smccc.h | 125 +++ include/linux/bch.h | 67 ++ include/linux/bitfield.h | 106 ++ include/linux/bitmap.h | 23 + include/linux/bitops.h | 207 ++++ include/linux/bitrev.h | 105 ++ include/linux/bug.h | 52 + include/linux/build_bug.h | 84 ++ include/linux/byteorder/big_endian.h | 110 ++ include/linux/byteorder/generic.h | 207 ++++ include/linux/byteorder/little_endian.h | 110 ++ include/linux/byteorder/swab.h | 156 +++ include/linux/clk-provider.h | 190 ++++ include/linux/clk/analogbits-wrpll-cln28hpc.h | 79 ++ include/linux/compat.h | 378 +++++++ include/linux/compiler-clang.h | 12 + include/linux/compiler-gcc.h | 285 ++++++ include/linux/compiler-intel.h | 45 + include/linux/compiler.h | 559 +++++++++++ include/linux/completion.h | 173 ++++ include/linux/const.h | 34 + include/linux/crc32.h | 27 + include/linux/crc7.h | 14 + include/linux/crc8.h | 23 + include/linux/ctype.h | 60 ++ include/linux/delay.h | 21 + include/linux/dma-direction.h | 13 + include/linux/drm_dp_helper.h | 406 ++++++++ include/linux/edd.h | 185 ++++ include/linux/err.h | 59 ++ include/linux/errno.h | 168 ++++ include/linux/ethtool.h | 725 ++++++++++++++ include/linux/fb.h | 619 ++++++++++++ include/linux/if_ether.h | 178 ++++ include/linux/immap_qe.h | 593 +++++++++++ include/linux/input.h | 152 +++ include/linux/io.h | 72 ++ include/linux/ioctl.h | 6 + include/linux/iopoll.h | 67 ++ include/linux/ioport.h | 192 ++++ include/linux/kbuild.h | 20 + include/linux/kconfig.h | 109 ++ include/linux/kernel.h | 269 +++++ include/linux/libfdt.h | 312 ++++++ include/linux/libfdt_env.h | 31 + include/linux/linkage.h | 74 ++ include/linux/linux_string.h | 8 + include/linux/list.h | 685 +++++++++++++ include/linux/list_sort.h | 11 + include/linux/log2.h | 215 ++++ include/linux/lzo.h | 51 + include/linux/math64.h | 257 +++++ include/linux/mbus.h | 73 ++ include/linux/mc146818rtc.h | 86 ++ include/linux/mdio.h | 310 ++++++ include/linux/mii.h | 228 +++++ include/linux/mtd/bbm.h | 161 +++ include/linux/mtd/cfi.h | 32 + include/linux/mtd/concat.h | 23 + include/linux/mtd/doc2000.h | 207 ++++ include/linux/mtd/flashchip.h | 103 ++ include/linux/mtd/fsl_upm.h | 44 + include/linux/mtd/fsmc_nand.h | 84 ++ include/linux/mtd/mtd.h | 608 +++++++++++ include/linux/mtd/nand.h | 734 ++++++++++++++ include/linux/mtd/nand_bch.h | 68 ++ include/linux/mtd/nand_ecc.h | 27 + include/linux/mtd/ndfc.h | 67 ++ include/linux/mtd/omap_elm.h | 79 ++ include/linux/mtd/omap_gpmc.h | 97 ++ include/linux/mtd/onenand.h | 180 ++++ include/linux/mtd/onenand_regs.h | 208 ++++ include/linux/mtd/partitions.h | 110 ++ include/linux/mtd/rawnand.h | 1332 +++++++++++++++++++++++++ include/linux/mtd/samsung_onenand.h | 116 +++ include/linux/mtd/spi-nor.h | 429 ++++++++ include/linux/mtd/spinand.h | 433 ++++++++ include/linux/mtd/st_smi.h | 100 ++ include/linux/mtd/ubi.h | 289 ++++++ include/linux/netdevice.h | 61 ++ include/linux/poison.h | 11 + include/linux/posix_types.h | 48 + include/linux/printk.h | 79 ++ include/linux/psci.h | 101 ++ include/linux/rbtree.h | 97 ++ include/linux/rbtree_augmented.h | 219 ++++ include/linux/screen_info.h | 84 ++ include/linux/serial_reg.h | 387 +++++++ include/linux/sizes.h | 51 + include/linux/soc/ti/cppi5.h | 995 ++++++++++++++++++ include/linux/soc/ti/k3-navss-ringacc.h | 236 +++++ include/linux/soc/ti/k3-sec-proxy.h | 25 + include/linux/soc/ti/ti-udma.h | 24 + include/linux/soc/ti/ti_sci_protocol.h | 693 +++++++++++++ include/linux/stat.h | 158 +++ include/linux/stddef.h | 20 + include/linux/string.h | 131 +++ include/linux/stringify.h | 12 + include/linux/time.h | 153 +++ include/linux/typecheck.h | 24 + include/linux/types.h | 164 +++ include/linux/unaligned/access_ok.h | 66 ++ include/linux/unaligned/be_byteshift.h | 70 ++ include/linux/unaligned/generic.h | 68 ++ include/linux/unaligned/le_byteshift.h | 70 ++ include/linux/usb/at91_udc.h | 19 + include/linux/usb/atmel_usba_udc.h | 25 + include/linux/usb/cdc.h | 224 +++++ include/linux/usb/ch9.h | 1063 ++++++++++++++++++++ include/linux/usb/composite.h | 346 +++++++ include/linux/usb/dwc3-omap.h | 18 + include/linux/usb/dwc3.h | 223 +++++ include/linux/usb/gadget.h | 947 ++++++++++++++++++ include/linux/usb/musb.h | 156 +++ include/linux/usb/otg.h | 37 + include/linux/usb/xhci-fsl.h | 73 ++ include/linux/usb/xhci-omap.h | 145 +++ include/linux/xxhash.h | 229 +++++ include/linux/zstd.h | 1147 +++++++++++++++++++++ 120 files changed, 23867 insertions(+) create mode 100644 include/linux/apm_bios.h create mode 100644 include/linux/arm-smccc.h create mode 100644 include/linux/bch.h create mode 100644 include/linux/bitfield.h create mode 100644 include/linux/bitmap.h create mode 100644 include/linux/bitops.h create mode 100644 include/linux/bitrev.h create mode 100644 include/linux/bug.h create mode 100644 include/linux/build_bug.h create mode 100644 include/linux/byteorder/big_endian.h create mode 100644 include/linux/byteorder/generic.h create mode 100644 include/linux/byteorder/little_endian.h create mode 100644 include/linux/byteorder/swab.h create mode 100644 include/linux/clk-provider.h create mode 100644 include/linux/clk/analogbits-wrpll-cln28hpc.h create mode 100644 include/linux/compat.h create mode 100644 include/linux/compiler-clang.h create mode 100644 include/linux/compiler-gcc.h create mode 100644 include/linux/compiler-intel.h create mode 100644 include/linux/compiler.h create mode 100644 include/linux/completion.h create mode 100644 include/linux/const.h create mode 100644 include/linux/crc32.h create mode 100644 include/linux/crc7.h create mode 100644 include/linux/crc8.h create mode 100644 include/linux/ctype.h create mode 100644 include/linux/delay.h create mode 100644 include/linux/dma-direction.h create mode 100644 include/linux/drm_dp_helper.h create mode 100644 include/linux/edd.h create mode 100644 include/linux/err.h create mode 100644 include/linux/errno.h create mode 100644 include/linux/ethtool.h create mode 100644 include/linux/fb.h create mode 100644 include/linux/if_ether.h create mode 100644 include/linux/immap_qe.h create mode 100644 include/linux/input.h create mode 100644 include/linux/io.h create mode 100644 include/linux/ioctl.h create mode 100644 include/linux/iopoll.h create mode 100644 include/linux/ioport.h create mode 100644 include/linux/kbuild.h create mode 100644 include/linux/kconfig.h create mode 100644 include/linux/kernel.h create mode 100644 include/linux/libfdt.h create mode 100644 include/linux/libfdt_env.h create mode 100644 include/linux/linkage.h create mode 100644 include/linux/linux_string.h create mode 100644 include/linux/list.h create mode 100644 include/linux/list_sort.h create mode 100644 include/linux/log2.h create mode 100644 include/linux/lzo.h create mode 100644 include/linux/math64.h create mode 100644 include/linux/mbus.h create mode 100644 include/linux/mc146818rtc.h create mode 100644 include/linux/mdio.h create mode 100644 include/linux/mii.h create mode 100644 include/linux/mtd/bbm.h create mode 100644 include/linux/mtd/cfi.h create mode 100644 include/linux/mtd/concat.h create mode 100644 include/linux/mtd/doc2000.h create mode 100644 include/linux/mtd/flashchip.h create mode 100644 include/linux/mtd/fsl_upm.h create mode 100644 include/linux/mtd/fsmc_nand.h create mode 100644 include/linux/mtd/mtd.h create mode 100644 include/linux/mtd/nand.h create mode 100644 include/linux/mtd/nand_bch.h create mode 100644 include/linux/mtd/nand_ecc.h create mode 100644 include/linux/mtd/ndfc.h create mode 100644 include/linux/mtd/omap_elm.h create mode 100644 include/linux/mtd/omap_gpmc.h create mode 100644 include/linux/mtd/onenand.h create mode 100644 include/linux/mtd/onenand_regs.h create mode 100644 include/linux/mtd/partitions.h create mode 100644 include/linux/mtd/rawnand.h create mode 100644 include/linux/mtd/samsung_onenand.h create mode 100644 include/linux/mtd/spi-nor.h create mode 100644 include/linux/mtd/spinand.h create mode 100644 include/linux/mtd/st_smi.h create mode 100644 include/linux/mtd/ubi.h create mode 100644 include/linux/netdevice.h create mode 100644 include/linux/poison.h create mode 100644 include/linux/posix_types.h create mode 100644 include/linux/printk.h create mode 100644 include/linux/psci.h create mode 100644 include/linux/rbtree.h create mode 100644 include/linux/rbtree_augmented.h create mode 100644 include/linux/screen_info.h create mode 100644 include/linux/serial_reg.h create mode 100644 include/linux/sizes.h create mode 100644 include/linux/soc/ti/cppi5.h create mode 100644 include/linux/soc/ti/k3-navss-ringacc.h create mode 100644 include/linux/soc/ti/k3-sec-proxy.h create mode 100644 include/linux/soc/ti/ti-udma.h create mode 100644 include/linux/soc/ti/ti_sci_protocol.h create mode 100644 include/linux/stat.h create mode 100644 include/linux/stddef.h create mode 100644 include/linux/string.h create mode 100644 include/linux/stringify.h create mode 100644 include/linux/time.h create mode 100644 include/linux/typecheck.h create mode 100644 include/linux/types.h create mode 100644 include/linux/unaligned/access_ok.h create mode 100644 include/linux/unaligned/be_byteshift.h create mode 100644 include/linux/unaligned/generic.h create mode 100644 include/linux/unaligned/le_byteshift.h create mode 100644 include/linux/usb/at91_udc.h create mode 100644 include/linux/usb/atmel_usba_udc.h create mode 100644 include/linux/usb/cdc.h create mode 100644 include/linux/usb/ch9.h create mode 100644 include/linux/usb/composite.h create mode 100644 include/linux/usb/dwc3-omap.h create mode 100644 include/linux/usb/dwc3.h create mode 100644 include/linux/usb/gadget.h create mode 100644 include/linux/usb/musb.h create mode 100644 include/linux/usb/otg.h create mode 100644 include/linux/usb/xhci-fsl.h create mode 100644 include/linux/usb/xhci-omap.h create mode 100644 include/linux/xxhash.h create mode 100644 include/linux/zstd.h (limited to 'include/linux') diff --git a/include/linux/apm_bios.h b/include/linux/apm_bios.h new file mode 100644 index 0000000..3dabc3f --- /dev/null +++ b/include/linux/apm_bios.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#ifndef _LINUX_APM_H +#define _LINUX_APM_H + +/* + * Include file for the interface to an APM BIOS + * Copyright 1994-2001 Stephen Rothwell (sfr@canb.auug.org.au) + */ + +#include + +typedef unsigned short apm_event_t; +typedef unsigned short apm_eventinfo_t; + +struct apm_bios_info { + __u16 version; + __u16 cseg; + __u32 offset; + __u16 cseg_16; + __u16 dseg; + __u16 flags; + __u16 cseg_len; + __u16 cseg_16_len; + __u16 dseg_len; +}; + +#ifdef __KERNEL__ + +#define APM_CS (GDT_ENTRY_APMBIOS_BASE * 8) +#define APM_CS_16 (APM_CS + 8) +#define APM_DS (APM_CS_16 + 8) + +/* Results of APM Installation Check */ +#define APM_16_BIT_SUPPORT 0x0001 +#define APM_32_BIT_SUPPORT 0x0002 +#define APM_IDLE_SLOWS_CLOCK 0x0004 +#define APM_BIOS_DISABLED 0x0008 +#define APM_BIOS_DISENGAGED 0x0010 + +/* + * Data for APM that is persistent across module unload/load + */ +struct apm_info { + struct apm_bios_info bios; + unsigned short connection_version; + int get_power_status_broken; + int get_power_status_swabinminutes; + int allow_ints; + int forbid_idle; + int realmode_power_off; + int disabled; +}; + +/* + * The APM function codes + */ +#define APM_FUNC_INST_CHECK 0x5300 +#define APM_FUNC_REAL_CONN 0x5301 +#define APM_FUNC_16BIT_CONN 0x5302 +#define APM_FUNC_32BIT_CONN 0x5303 +#define APM_FUNC_DISCONN 0x5304 +#define APM_FUNC_IDLE 0x5305 +#define APM_FUNC_BUSY 0x5306 +#define APM_FUNC_SET_STATE 0x5307 +#define APM_FUNC_ENABLE_PM 0x5308 +#define APM_FUNC_RESTORE_BIOS 0x5309 +#define APM_FUNC_GET_STATUS 0x530a +#define APM_FUNC_GET_EVENT 0x530b +#define APM_FUNC_GET_STATE 0x530c +#define APM_FUNC_ENABLE_DEV_PM 0x530d +#define APM_FUNC_VERSION 0x530e +#define APM_FUNC_ENGAGE_PM 0x530f +#define APM_FUNC_GET_CAP 0x5310 +#define APM_FUNC_RESUME_TIMER 0x5311 +#define APM_FUNC_RESUME_ON_RING 0x5312 +#define APM_FUNC_TIMER 0x5313 + +/* + * Function code for APM_FUNC_RESUME_TIMER + */ +#define APM_FUNC_DISABLE_TIMER 0 +#define APM_FUNC_GET_TIMER 1 +#define APM_FUNC_SET_TIMER 2 + +/* + * Function code for APM_FUNC_RESUME_ON_RING + */ +#define APM_FUNC_DISABLE_RING 0 +#define APM_FUNC_ENABLE_RING 1 +#define APM_FUNC_GET_RING 2 + +/* + * Function code for APM_FUNC_TIMER_STATUS + */ +#define APM_FUNC_TIMER_DISABLE 0 +#define APM_FUNC_TIMER_ENABLE 1 +#define APM_FUNC_TIMER_GET 2 + +/* + * in arch/i386/kernel/setup.c + */ +extern struct apm_info apm_info; + +#endif /* __KERNEL__ */ + +/* + * Power states + */ +#define APM_STATE_READY 0x0000 +#define APM_STATE_STANDBY 0x0001 +#define APM_STATE_SUSPEND 0x0002 +#define APM_STATE_OFF 0x0003 +#define APM_STATE_BUSY 0x0004 +#define APM_STATE_REJECT 0x0005 +#define APM_STATE_OEM_SYS 0x0020 +#define APM_STATE_OEM_DEV 0x0040 + +#define APM_STATE_DISABLE 0x0000 +#define APM_STATE_ENABLE 0x0001 + +#define APM_STATE_DISENGAGE 0x0000 +#define APM_STATE_ENGAGE 0x0001 + +/* + * Events (results of Get PM Event) + */ +#define APM_SYS_STANDBY 0x0001 +#define APM_SYS_SUSPEND 0x0002 +#define APM_NORMAL_RESUME 0x0003 +#define APM_CRITICAL_RESUME 0x0004 +#define APM_LOW_BATTERY 0x0005 +#define APM_POWER_STATUS_CHANGE 0x0006 +#define APM_UPDATE_TIME 0x0007 +#define APM_CRITICAL_SUSPEND 0x0008 +#define APM_USER_STANDBY 0x0009 +#define APM_USER_SUSPEND 0x000a +#define APM_STANDBY_RESUME 0x000b +#define APM_CAPABILITY_CHANGE 0x000c + +/* + * Error codes + */ +#define APM_SUCCESS 0x00 +#define APM_DISABLED 0x01 +#define APM_CONNECTED 0x02 +#define APM_NOT_CONNECTED 0x03 +#define APM_16_CONNECTED 0x05 +#define APM_16_UNSUPPORTED 0x06 +#define APM_32_CONNECTED 0x07 +#define APM_32_UNSUPPORTED 0x08 +#define APM_BAD_DEVICE 0x09 +#define APM_BAD_PARAM 0x0a +#define APM_NOT_ENGAGED 0x0b +#define APM_BAD_FUNCTION 0x0c +#define APM_RESUME_DISABLED 0x0d +#define APM_NO_ERROR 0x53 +#define APM_BAD_STATE 0x60 +#define APM_NO_EVENTS 0x80 +#define APM_NOT_PRESENT 0x86 + +/* + * APM Device IDs + */ +#define APM_DEVICE_BIOS 0x0000 +#define APM_DEVICE_ALL 0x0001 +#define APM_DEVICE_DISPLAY 0x0100 +#define APM_DEVICE_STORAGE 0x0200 +#define APM_DEVICE_PARALLEL 0x0300 +#define APM_DEVICE_SERIAL 0x0400 +#define APM_DEVICE_NETWORK 0x0500 +#define APM_DEVICE_PCMCIA 0x0600 +#define APM_DEVICE_BATTERY 0x8000 +#define APM_DEVICE_OEM 0xe000 +#define APM_DEVICE_OLD_ALL 0xffff +#define APM_DEVICE_CLASS 0x00ff +#define APM_DEVICE_MASK 0xff00 + +#ifdef __KERNEL__ +/* + * This is the "All Devices" ID communicated to the BIOS + */ +#define APM_DEVICE_BALL ((apm_info.connection_version > 0x0100) ? \ + APM_DEVICE_ALL : APM_DEVICE_OLD_ALL) +#endif + +/* + * Battery status + */ +#define APM_MAX_BATTERIES 2 + +/* + * APM defined capability bit flags + */ +#define APM_CAP_GLOBAL_STANDBY 0x0001 +#define APM_CAP_GLOBAL_SUSPEND 0x0002 +#define APM_CAP_RESUME_STANDBY_TIMER 0x0004 /* Timer resume from standby */ +#define APM_CAP_RESUME_SUSPEND_TIMER 0x0008 /* Timer resume from suspend */ +#define APM_CAP_RESUME_STANDBY_RING 0x0010 /* Resume on Ring fr standby */ +#define APM_CAP_RESUME_SUSPEND_RING 0x0020 /* Resume on Ring fr suspend */ +#define APM_CAP_RESUME_STANDBY_PCMCIA 0x0040 /* Resume on PCMCIA Ring */ +#define APM_CAP_RESUME_SUSPEND_PCMCIA 0x0080 /* Resume on PCMCIA Ring */ + +/* + * ioctl operations + */ +#include + +#define APM_IOC_STANDBY _IO('A', 1) +#define APM_IOC_SUSPEND _IO('A', 2) + +#endif /* LINUX_APM_H */ diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h new file mode 100644 index 0000000..2d1e6cc --- /dev/null +++ b/include/linux/arm-smccc.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2015, Linaro Limited + */ +#ifndef __LINUX_ARM_SMCCC_H +#define __LINUX_ARM_SMCCC_H + +/* + * This file provides common defines for ARM SMC Calling Convention as + * specified in + * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html + */ + +#define ARM_SMCCC_STD_CALL 0 +#define ARM_SMCCC_FAST_CALL 1 +#define ARM_SMCCC_TYPE_SHIFT 31 + +#define ARM_SMCCC_SMC_32 0 +#define ARM_SMCCC_SMC_64 1 +#define ARM_SMCCC_CALL_CONV_SHIFT 30 + +#define ARM_SMCCC_OWNER_MASK 0x3F +#define ARM_SMCCC_OWNER_SHIFT 24 + +#define ARM_SMCCC_FUNC_MASK 0xFFFF + +#define ARM_SMCCC_IS_FAST_CALL(smc_val) \ + ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT)) +#define ARM_SMCCC_IS_64(smc_val) \ + ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT)) +#define ARM_SMCCC_FUNC_NUM(smc_val) ((smc_val) & ARM_SMCCC_FUNC_MASK) +#define ARM_SMCCC_OWNER_NUM(smc_val) \ + (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK) + +#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \ + (((type) << ARM_SMCCC_TYPE_SHIFT) | \ + ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \ + (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \ + ((func_num) & ARM_SMCCC_FUNC_MASK)) + +#define ARM_SMCCC_OWNER_ARCH 0 +#define ARM_SMCCC_OWNER_CPU 1 +#define ARM_SMCCC_OWNER_SIP 2 +#define ARM_SMCCC_OWNER_OEM 3 +#define ARM_SMCCC_OWNER_STANDARD 4 +#define ARM_SMCCC_OWNER_TRUSTED_APP 48 +#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49 +#define ARM_SMCCC_OWNER_TRUSTED_OS 50 +#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63 + +#define ARM_SMCCC_QUIRK_NONE 0 +#define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ + +#ifndef __ASSEMBLY__ + +#include +#include +/** + * struct arm_smccc_res - Result from SMC/HVC call + * @a0-a3 result values from registers 0 to 3 + */ +struct arm_smccc_res { + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; +}; + +/** + * struct arm_smccc_quirk - Contains quirk information + * @id: quirk identification + * @state: quirk specific information + * @a6: Qualcomm quirk entry for returning post-smc call contents of a6 + */ +struct arm_smccc_quirk { + int id; + union { + unsigned long a6; + } state; +}; + +/** + * __arm_smccc_smc() - make SMC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. + * + * This function is used to make SMC calls following SMC Calling Convention. + * The content of the supplied param are copied to registers 0 to 7 prior + * to the SMC instruction. The return values are updated with the content + * from register 0 to 3 on return from the SMC instruction. An optional + * quirk structure provides vendor specific behavior. + */ +asmlinkage void __arm_smccc_smc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +/** + * __arm_smccc_hvc() - make HVC calls + * @a0-a7: arguments passed in registers 0 to 7 + * @res: result values from registers 0 to 3 + * @quirk: points to an arm_smccc_quirk, or NULL when no quirks are required. + * + * This function is used to make HVC calls following SMC Calling + * Convention. The content of the supplied param are copied to registers 0 + * to 7 prior to the HVC instruction. The return values are updated with + * the content from register 0 to 3 on return from the HVC instruction. An + * optional quirk structure provides vendor specific behavior. + */ +asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3, unsigned long a4, + unsigned long a5, unsigned long a6, unsigned long a7, + struct arm_smccc_res *res, struct arm_smccc_quirk *quirk); + +#define arm_smccc_smc(...) __arm_smccc_smc(__VA_ARGS__, NULL) + +#define arm_smccc_smc_quirk(...) __arm_smccc_smc(__VA_ARGS__) + +#define arm_smccc_hvc(...) __arm_smccc_hvc(__VA_ARGS__, NULL) + +#define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) + +#endif /*__ASSEMBLY__*/ +#endif /*__LINUX_ARM_SMCCC_H*/ diff --git a/include/linux/bch.h b/include/linux/bch.h new file mode 100644 index 0000000..1305d2c --- /dev/null +++ b/include/linux/bch.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Generic binary BCH encoding/decoding library + * + * Copyright © 2011 Parrot S.A. + * + * Author: Ivan Djelic + * + * Description: + * + * This library provides runtime configurable encoding/decoding of binary + * Bose-Chaudhuri-Hocquenghem (BCH) codes. +*/ +#ifndef _BCH_H +#define _BCH_H + +#include + +/** + * struct bch_control - BCH control structure + * @m: Galois field order + * @n: maximum codeword size in bits (= 2^m-1) + * @t: error correction capability in bits + * @ecc_bits: ecc exact size in bits, i.e. generator polynomial degree (<=m*t) + * @ecc_bytes: ecc max size (m*t bits) in bytes + * @a_pow_tab: Galois field GF(2^m) exponentiation lookup table + * @a_log_tab: Galois field GF(2^m) log lookup table + * @mod8_tab: remainder generator polynomial lookup tables + * @ecc_buf: ecc parity words buffer + * @ecc_buf2: ecc parity words buffer + * @xi_tab: GF(2^m) base for solving degree 2 polynomial roots + * @syn: syndrome buffer + * @cache: log-based polynomial representation buffer + * @elp: error locator polynomial + * @poly_2t: temporary polynomials of degree 2t + */ +struct bch_control { + unsigned int m; + unsigned int n; + unsigned int t; + unsigned int ecc_bits; + unsigned int ecc_bytes; +/* private: */ + uint16_t *a_pow_tab; + uint16_t *a_log_tab; + uint32_t *mod8_tab; + uint32_t *ecc_buf; + uint32_t *ecc_buf2; + unsigned int *xi_tab; + unsigned int *syn; + int *cache; + struct gf_poly *elp; + struct gf_poly *poly_2t[4]; +}; + +struct bch_control *init_bch(int m, int t, unsigned int prim_poly); + +void free_bch(struct bch_control *bch); + +void encode_bch(struct bch_control *bch, const uint8_t *data, + unsigned int len, uint8_t *ecc); + +int decode_bch(struct bch_control *bch, const uint8_t *data, unsigned int len, + const uint8_t *recv_ecc, const uint8_t *calc_ecc, + const unsigned int *syn, unsigned int *errloc); + +#endif /* _BCH_H */ diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h new file mode 100644 index 0000000..8b9d6ff --- /dev/null +++ b/include/linux/bitfield.h @@ -0,0 +1,106 @@ +/* + * Copyright (C) 2014 Felix Fietkau + * Copyright (C) 2004 - 2009 Ivo van Doorn + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _LINUX_BITFIELD_H +#define _LINUX_BITFIELD_H + +#include + +/* + * Bitfield access macros + * + * FIELD_{GET,PREP} macros take as first parameter shifted mask + * from which they extract the base mask and shift amount. + * Mask must be a compilation time constant. + * + * Example: + * + * #define REG_FIELD_A GENMASK(6, 0) + * #define REG_FIELD_B BIT(7) + * #define REG_FIELD_C GENMASK(15, 8) + * #define REG_FIELD_D GENMASK(31, 16) + * + * Get: + * a = FIELD_GET(REG_FIELD_A, reg); + * b = FIELD_GET(REG_FIELD_B, reg); + * + * Set: + * reg = FIELD_PREP(REG_FIELD_A, 1) | + * FIELD_PREP(REG_FIELD_B, 0) | + * FIELD_PREP(REG_FIELD_C, c) | + * FIELD_PREP(REG_FIELD_D, 0x40); + * + * Modify: + * reg &= ~REG_FIELD_C; + * reg |= FIELD_PREP(REG_FIELD_C, c); + */ + +#define __bf_shf(x) (__builtin_ffsll(x) - 1) + +#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \ + ({ \ + BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \ + _pfx "mask is not constant"); \ + BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \ + BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \ + ~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \ + _pfx "value too large for the field"); \ + BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \ + _pfx "type of reg too small for mask"); \ + __BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \ + (1ULL << __bf_shf(_mask))); \ + }) + +/** + * FIELD_FIT() - check if value fits in the field + * @_mask: shifted mask defining the field's length and position + * @_val: value to test against the field + * + * Return: true if @_val can fit inside @_mask, false if @_val is too big. + */ +#define FIELD_FIT(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_FIT: "); \ + !((((typeof(_mask))_val) << __bf_shf(_mask)) & ~(_mask)); \ + }) + +/** + * FIELD_PREP() - prepare a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_val: value to put in the field + * + * FIELD_PREP() masks and shifts up the value. The result should + * be combined with other fields of the bitfield using logical OR. + */ +#define FIELD_PREP(_mask, _val) \ + ({ \ + __BF_FIELD_CHECK(_mask, 0ULL, _val, "FIELD_PREP: "); \ + ((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask); \ + }) + +/** + * FIELD_GET() - extract a bitfield element + * @_mask: shifted mask defining the field's length and position + * @_reg: 32bit value of entire bitfield + * + * FIELD_GET() extracts the field specified by @_mask from the + * bitfield passed in as @_reg by masking and shifting it down. + */ +#define FIELD_GET(_mask, _reg) \ + ({ \ + __BF_FIELD_CHECK(_mask, _reg, 0U, "FIELD_GET: "); \ + (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ + }) + +#endif diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h new file mode 100644 index 0000000..4a54ae0 --- /dev/null +++ b/include/linux/bitmap.h @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0+ +#ifndef __LINUX_BITMAP_H +#define __LINUX_BITMAP_H + +#include +#include +#include + +#define small_const_nbits(nbits) \ + (__builtin_constant_p(nbits) && (nbits) <= BITS_PER_LONG) + +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (small_const_nbits(nbits)) { + *dst = 0UL; + } else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + + memset(dst, 0, len); + } +} + +#endif /* __LINUX_BITMAP_H */ diff --git a/include/linux/bitops.h b/include/linux/bitops.h new file mode 100644 index 0000000..259df43 --- /dev/null +++ b/include/linux/bitops.h @@ -0,0 +1,207 @@ +#ifndef _LINUX_BITOPS_H +#define _LINUX_BITOPS_H + +#include +#include +#include + +#ifdef __KERNEL__ +#define BIT(nr) (1UL << (nr)) +#define BIT_ULL(nr) (1ULL << (nr)) +#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) +#define BIT_WORD(nr) ((nr) / BITS_PER_LONG) +#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG)) +#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG) +#define BITS_PER_BYTE 8 +#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) +#endif + +/* + * Create a contiguous bitmask starting at bit position @l and ending at + * position @h. For example + * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. + */ +#ifdef CONFIG_SANDBOX +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (CONFIG_SANDBOX_BITS_PER_LONG - 1 - (h)))) +#else +#define GENMASK(h, l) \ + (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) +#endif + +#define GENMASK_ULL(h, l) \ + (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above ffz (man ffs). + */ + +static inline int generic_ffs(int x) +{ + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +/** + * fls - find last (most-significant) bit set + * @x: the word to search + * + * This is defined the same way as ffs. + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. + */ +static inline int generic_fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +static inline unsigned int generic_hweight32(unsigned int w) +{ + unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); + res = (res & 0x33333333) + ((res >> 2) & 0x33333333); + res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); + res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); + return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); +} + +static inline unsigned int generic_hweight16(unsigned int w) +{ + unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); + res = (res & 0x3333) + ((res >> 2) & 0x3333); + res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); + return (res & 0x00FF) + ((res >> 8) & 0x00FF); +} + +static inline unsigned int generic_hweight8(unsigned int w) +{ + unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); + res = (res & 0x33) + ((res >> 2) & 0x33); + return (res & 0x0F) + ((res >> 4) & 0x0F); +} + +#include + +/* linux/include/asm-generic/bitops/non-atomic.h */ + +#ifndef PLATFORM__SET_BIT +# define __set_bit generic_set_bit +#endif + +#ifndef PLATFORM__CLEAR_BIT +# define __clear_bit generic_clear_bit +#endif + +#ifndef PLATFORM_FFS +# define ffs generic_ffs +#endif + +#ifndef PLATFORM_FLS +# define fls generic_fls +#endif + +static inline unsigned fls_long(unsigned long l) +{ + if (sizeof(l) == 4) + return fls(l); + return fls64(l); +} + +/** + * __ffs64 - find first set bit in a 64 bit word + * @word: The 64 bit word + * + * On 64 bit arches this is a synomyn for __ffs + * The result is not defined if no bits are set, so check that @word + * is non-zero before calling this. + */ +static inline unsigned long __ffs64(u64 word) +{ +#if BITS_PER_LONG == 32 + if (((u32)word) == 0UL) + return __ffs((u32)(word >> 32)) + 32; +#elif BITS_PER_LONG != 64 +#error BITS_PER_LONG not 32 or 64 +#endif + return __ffs((unsigned long)word); +} + +/** + * __set_bit - Set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Unlike set_bit(), this function is non-atomic and may be reordered. + * If it's called on the same region of memory simultaneously, the effect + * may be that only one operation succeeds. + */ +static inline void generic_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p |= mask; +} + +static inline void generic_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + + *p &= ~mask; +} + +#endif diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h new file mode 100644 index 0000000..8ec9411 --- /dev/null +++ b/include/linux/bitrev.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BITREV_H +#define _LINUX_BITREV_H + +#include + +#ifdef CONFIG_HAVE_ARCH_BITREVERSE +#include + +#define __bitrev32 __arch_bitrev32 +#define __bitrev16 __arch_bitrev16 +#define __bitrev8 __arch_bitrev8 + +#else +extern u8 const byte_rev_table[256]; +static inline u8 __bitrev8(u8 byte) +{ + return byte_rev_table[byte]; +} + +static inline u16 __bitrev16(u16 x) +{ + return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8); +} + +static inline u32 __bitrev32(u32 x) +{ + return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16); +} + +#endif /* CONFIG_HAVE_ARCH_BITREVERSE */ + +#define __bitrev8x4(x) (__bitrev32(swab32(x))) + +#define __constant_bitrev32(x) \ +({ \ + u32 __x = x; \ + __x = (__x >> 16) | (__x << 16);\ + __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ + __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ + __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ + __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ + __x; \ +}) + +#define __constant_bitrev16(x) \ +({ \ + u16 __x = x; \ + __x = (__x >> 8) | (__x << 8); \ + __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ + __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ + __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ + __x; \ +}) + +#define __constant_bitrev8x4(x) \ +({ \ + u32 __x = x; \ + __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ + __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ + __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ + __x; \ +}) + +#define __constant_bitrev8(x) \ +({ \ + u8 __x = x; \ + __x = (__x >> 4) | (__x << 4); \ + __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ + __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ + __x; \ +}) + +#define bitrev32(x) \ +({ \ + u32 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev32(__x) : \ + __bitrev32(__x); \ +}) + +#define bitrev16(x) \ +({ \ + u16 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev16(__x) : \ + __bitrev16(__x); \ +}) + +#define bitrev8x4(x) \ +({ \ + u32 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev8x4(__x) : \ + __bitrev8x4(__x); \ +}) + +#define bitrev8(x) \ +({ \ + u8 __x = x; \ + __builtin_constant_p(__x) ? \ + __constant_bitrev8(__x) : \ + __bitrev8(__x) ; \ +}) +#endif /* _LINUX_BITREV_H */ diff --git a/include/linux/bug.h b/include/linux/bug.h new file mode 100644 index 0000000..29f8416 --- /dev/null +++ b/include/linux/bug.h @@ -0,0 +1,52 @@ +#ifndef _LINUX_BUG_H +#define _LINUX_BUG_H + +#include /* for panic() */ +#include +#include +#include + +#define BUG() do { \ + printk("BUG at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + panic("BUG!"); \ +} while (0) + +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) + +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printk("WARNING at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + unlikely(__ret_warn_on); \ +}) + +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + printf(format); \ + unlikely(__ret_warn_on); \ +}) + +#define WARN_ON_ONCE(condition) ({ \ + static bool __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#define WARN_ONCE(condition, format...) ({ \ + static bool __warned; \ + int __ret_warn_once = !!(condition); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + __warned = true; \ + WARN(1, format); \ + } \ + unlikely(__ret_warn_once); \ +}) + +#endif /* _LINUX_BUG_H */ diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h new file mode 100644 index 0000000..b7d22d6 --- /dev/null +++ b/include/linux/build_bug.h @@ -0,0 +1,84 @@ +#ifndef _LINUX_BUILD_BUG_H +#define _LINUX_BUILD_BUG_H + +#include + +#ifdef __CHECKER__ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) +#define BUILD_BUG_ON_ZERO(e) (0) +#define BUILD_BUG_ON_NULL(e) ((void *)0) +#define BUILD_BUG_ON_INVALID(e) (0) +#define BUILD_BUG_ON_MSG(cond, msg) (0) +#define BUILD_BUG_ON(condition) (0) +#define BUILD_BUG() (0) +#else /* __CHECKER__ */ + +/* Force a compilation error if a constant expression is not a power of 2 */ +#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON(((n) & ((n) - 1)) != 0) +#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ + BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) + +/* + * Force a compilation error if condition is true, but also produce a + * result (of value 0 and type size_t), so the expression can be used + * e.g. in a structure initializer (or where-ever else comma expressions + * aren't permitted). + */ +#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) +#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); })) + +/* + * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the + * expression but avoids the generation of any code, even if that expression + * has side-effects. + */ +#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e)))) + +/** + * BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied + * error message. + * @condition: the condition which the compiler should know is false. + * + * See BUILD_BUG_ON for description. + */ +#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) + +/** + * BUILD_BUG_ON - break compile if a condition is true. + * @condition: the condition which the compiler should know is false. + * + * If you have some code which relies on certain constants being equal, or + * some other compile-time-evaluated condition, you should use BUILD_BUG_ON to + * detect if someone changes it. + * + * The implementation uses gcc's reluctance to create a negative array, but gcc + * (as of 4.4) only emits that error for obvious cases (e.g. not arguments to + * inline functions). Luckily, in 4.3 they added the "error" function + * attribute just for this type of case. Thus, we use a negative sized array + * (should always create an error on gcc versions older than 4.4) and then call + * an undefined function with the error attribute (should always create an + * error on gcc 4.3 and later). If for some reason, neither creates a + * compile-time error, we'll still have a link-time error, which is harder to + * track down. + */ +#ifndef __OPTIMIZE__ +#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) +#else +#define BUILD_BUG_ON(condition) \ + BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) +#endif + +/** + * BUILD_BUG - break compile if used. + * + * If you have some code that you expect the compiler to eliminate at + * build time, you should use BUILD_BUG to detect if it is + * unexpectedly used. + */ +#define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") + +#endif /* __CHECKER__ */ + +#endif /* _LINUX_BUILD_BUG_H */ diff --git a/include/linux/byteorder/big_endian.h b/include/linux/byteorder/big_endian.h new file mode 100644 index 0000000..aaf7757 --- /dev/null +++ b/include/linux/byteorder/big_endian.h @@ -0,0 +1,110 @@ +#ifndef _LINUX_BYTEORDER_BIG_ENDIAN_H +#define _LINUX_BYTEORDER_BIG_ENDIAN_H + +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN 4321 +#endif +#ifndef __BIG_ENDIAN_BITFIELD +#define __BIG_ENDIAN_BITFIELD +#endif +#define __BYTE_ORDER __BIG_ENDIAN + +#include +#include +#include + +#define __constant_htonl(x) ((__force __be32)(__u32)(x)) +#define __constant_ntohl(x) ((__force __u32)(__be32)(x)) +#define __constant_htons(x) ((__force __be16)(__u16)(x)) +#define __constant_ntohs(x) ((__force __u16)(__be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)___constant_swab64((x))) +#define __constant_le64_to_cpu(x) ___constant_swab64((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)___constant_swab32((x))) +#define __constant_le32_to_cpu(x) ___constant_swab32((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)___constant_swab16((x))) +#define __constant_le16_to_cpu(x) ___constant_swab16((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)(__u64)(x)) +#define __constant_be64_to_cpu(x) ((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)(__u32)(x)) +#define __constant_be32_to_cpu(x) ((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)(__u16)(x)) +#define __constant_be16_to_cpu(x) ((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)__swab64((x))) +#define __le64_to_cpu(x) __swab64((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)__swab32((x))) +#define __le32_to_cpu(x) __swab32((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)__swab16((x))) +#define __le16_to_cpu(x) __swab16((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)(__u64)(x)) +#define __be64_to_cpu(x) ((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)(__u32)(x)) +#define __be32_to_cpu(x) ((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)(__u16)(x)) +#define __be16_to_cpu(x) ((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)__swab64p(p); +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)__swab32p(p); +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)__swab16p(p); +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return __swab16p((__u16 *)p); +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)*p; +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return (__force __u64)*p; +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)*p; +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return (__force __u32)*p; +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)*p; +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return (__force __u16)*p; +} +#define __cpu_to_le64s(x) __swab64s((x)) +#define __le64_to_cpus(x) __swab64s((x)) +#define __cpu_to_le32s(x) __swab32s((x)) +#define __le32_to_cpus(x) __swab32s((x)) +#define __cpu_to_le16s(x) __swab16s((x)) +#define __le16_to_cpus(x) __swab16s((x)) +#define __cpu_to_be64s(x) do { (void)(x); } while (0) +#define __be64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be32s(x) do { (void)(x); } while (0) +#define __be32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be16s(x) do { (void)(x); } while (0) +#define __be16_to_cpus(x) do { (void)(x); } while (0) + +#ifdef __KERNEL__ +#include +#endif + +#endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */ diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h new file mode 100644 index 0000000..8fae186 --- /dev/null +++ b/include/linux/byteorder/generic.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_BYTEORDER_GENERIC_H +#define _LINUX_BYTEORDER_GENERIC_H + +/* + * linux/byteorder/generic.h + * Generic Byte-reordering support + * + * The "... p" macros, like le64_to_cpup, can be used with pointers + * to unaligned data, but there will be a performance penalty on + * some architectures. Use get_unaligned for unaligned data. + * + * Francois-Rene Rideau 19970707 + * gathered all the good ideas from all asm-foo/byteorder.h into one file, + * cleaned them up. + * I hope it is compliant with non-GCC compilers. + * I decided to put __BYTEORDER_HAS_U64__ in byteorder.h, + * because I wasn't sure it would be ok to put it in types.h + * Upgraded it to 2.1.43 + * Francois-Rene Rideau 19971012 + * Upgraded it to 2.1.57 + * to please Linus T., replaced huge #ifdef's between little/big endian + * by nestedly #include'd files. + * Francois-Rene Rideau 19971205 + * Made it to 2.1.71; now a facelift: + * Put files under include/linux/byteorder/ + * Split swab from generic support. + * + * TODO: + * = Regular kernel maintainers could also replace all these manual + * byteswap macros that remain, disseminated among drivers, + * after some grep or the sources... + * = Linus might want to rename all these macros and files to fit his taste, + * to fit his personal naming scheme. + * = it seems that a few drivers would also appreciate + * nybble swapping support... + * = every architecture could add their byteswap macro in asm/byteorder.h + * see how some architectures already do (i386, alpha, ppc, etc) + * = cpu_to_beXX and beXX_to_cpu might some day need to be well + * distinguished throughout the kernel. This is not the case currently, + * since little endian, big endian, and pdp endian machines needn't it. + * But this might be the case for, say, a port of Linux to 20/21 bit + * architectures (and F21 Linux addict around?). + */ + +/* + * The following macros are to be defined by : + * + * Conversion of long and short int between network and host format + * ntohl(__u32 x) + * ntohs(__u16 x) + * htonl(__u32 x) + * htons(__u16 x) + * It seems that some programs (which? where? or perhaps a standard? POSIX?) + * might like the above to be functions, not macros (why?). + * if that's true, then detect them, and take measures. + * Anyway, the measure is: define only ___ntohl as a macro instead, + * and in a separate file, have + * unsigned long inline ntohl(x){return ___ntohl(x);} + * + * The same for constant arguments + * __constant_ntohl(__u32 x) + * __constant_ntohs(__u16 x) + * __constant_htonl(__u32 x) + * __constant_htons(__u16 x) + * + * Conversion of XX-bit integers (16- 32- or 64-) + * between native CPU format and little/big endian format + * 64-bit stuff only defined for proper architectures + * cpu_to_[bl]eXX(__uXX x) + * [bl]eXX_to_cpu(__uXX x) + * + * The same, but takes a pointer to the value to convert + * cpu_to_[bl]eXXp(__uXX x) + * [bl]eXX_to_cpup(__uXX x) + * + * The same, but change in situ + * cpu_to_[bl]eXXs(__uXX x) + * [bl]eXX_to_cpus(__uXX x) + * + * See asm-foo/byteorder.h for examples of how to provide + * architecture-optimized versions + * + */ + +#define cpu_to_le64 __cpu_to_le64 +#define le64_to_cpu __le64_to_cpu +#define cpu_to_le32 __cpu_to_le32 +#define le32_to_cpu __le32_to_cpu +#define cpu_to_le16 __cpu_to_le16 +#define le16_to_cpu __le16_to_cpu +#define cpu_to_be64 __cpu_to_be64 +#define be64_to_cpu __be64_to_cpu +#define cpu_to_be32 __cpu_to_be32 +#define be32_to_cpu __be32_to_cpu +#define cpu_to_be16 __cpu_to_be16 +#define be16_to_cpu __be16_to_cpu +#define cpu_to_le64p __cpu_to_le64p +#define le64_to_cpup __le64_to_cpup +#define cpu_to_le32p __cpu_to_le32p +#define le32_to_cpup __le32_to_cpup +#define cpu_to_le16p __cpu_to_le16p +#define le16_to_cpup __le16_to_cpup +#define cpu_to_be64p __cpu_to_be64p +#define be64_to_cpup __be64_to_cpup +#define cpu_to_be32p __cpu_to_be32p +#define be32_to_cpup __be32_to_cpup +#define cpu_to_be16p __cpu_to_be16p +#define be16_to_cpup __be16_to_cpup +#define cpu_to_le64s __cpu_to_le64s +#define le64_to_cpus __le64_to_cpus +#define cpu_to_le32s __cpu_to_le32s +#define le32_to_cpus __le32_to_cpus +#define cpu_to_le16s __cpu_to_le16s +#define le16_to_cpus __le16_to_cpus +#define cpu_to_be64s __cpu_to_be64s +#define be64_to_cpus __be64_to_cpus +#define cpu_to_be32s __cpu_to_be32s +#define be32_to_cpus __be32_to_cpus +#define cpu_to_be16s __cpu_to_be16s +#define be16_to_cpus __be16_to_cpus + +/* + * They have to be macros in order to do the constant folding + * correctly - if the argument passed into a inline function + * it is no longer constant according to gcc.. + */ + +#undef ntohl +#undef ntohs +#undef htonl +#undef htons + +#define ___htonl(x) __cpu_to_be32(x) +#define ___htons(x) __cpu_to_be16(x) +#define ___ntohl(x) __be32_to_cpu(x) +#define ___ntohs(x) __be16_to_cpu(x) + +#define htonl(x) ___htonl(x) +#define ntohl(x) ___ntohl(x) +#define htons(x) ___htons(x) +#define ntohs(x) ___ntohs(x) + +static inline void le16_add_cpu(__le16 *var, u16 val) +{ + *var = cpu_to_le16(le16_to_cpu(*var) + val); +} + +static inline void le32_add_cpu(__le32 *var, u32 val) +{ + *var = cpu_to_le32(le32_to_cpu(*var) + val); +} + +static inline void le64_add_cpu(__le64 *var, u64 val) +{ + *var = cpu_to_le64(le64_to_cpu(*var) + val); +} + +/* XXX: this stuff can be optimized */ +static inline void le32_to_cpu_array(u32 *buf, unsigned int words) +{ + while (words--) { + __le32_to_cpus(buf); + buf++; + } +} + +static inline void cpu_to_le32_array(u32 *buf, unsigned int words) +{ + while (words--) { + __cpu_to_le32s(buf); + buf++; + } +} + +static inline void be16_add_cpu(__be16 *var, u16 val) +{ + *var = cpu_to_be16(be16_to_cpu(*var) + val); +} + +static inline void be32_add_cpu(__be32 *var, u32 val) +{ + *var = cpu_to_be32(be32_to_cpu(*var) + val); +} + +static inline void be64_add_cpu(__be64 *var, u64 val) +{ + *var = cpu_to_be64(be64_to_cpu(*var) + val); +} + +static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = cpu_to_be32(src[i]); +} + +static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len) +{ + int i; + + for (i = 0; i < len; i++) + dst[i] = be32_to_cpu(src[i]); +} + +#endif /* _LINUX_BYTEORDER_GENERIC_H */ diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h new file mode 100644 index 0000000..a4cb3bf --- /dev/null +++ b/include/linux/byteorder/little_endian.h @@ -0,0 +1,110 @@ +#ifndef _LINUX_BYTEORDER_LITTLE_ENDIAN_H +#define _LINUX_BYTEORDER_LITTLE_ENDIAN_H + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN 1234 +#endif +#ifndef __LITTLE_ENDIAN_BITFIELD +#define __LITTLE_ENDIAN_BITFIELD +#endif +#define __BYTE_ORDER __LITTLE_ENDIAN + +#include +#include +#include + +#define __constant_htonl(x) ((__force __be32)___constant_swab32((x))) +#define __constant_ntohl(x) ___constant_swab32((__force __be32)(x)) +#define __constant_htons(x) ((__force __be16)___constant_swab16((x))) +#define __constant_ntohs(x) ___constant_swab16((__force __be16)(x)) +#define __constant_cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __constant_le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __constant_cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __constant_le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __constant_cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __constant_le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __constant_cpu_to_be64(x) ((__force __be64)___constant_swab64((x))) +#define __constant_be64_to_cpu(x) ___constant_swab64((__force __u64)(__be64)(x)) +#define __constant_cpu_to_be32(x) ((__force __be32)___constant_swab32((x))) +#define __constant_be32_to_cpu(x) ___constant_swab32((__force __u32)(__be32)(x)) +#define __constant_cpu_to_be16(x) ((__force __be16)___constant_swab16((x))) +#define __constant_be16_to_cpu(x) ___constant_swab16((__force __u16)(__be16)(x)) +#define __cpu_to_le64(x) ((__force __le64)(__u64)(x)) +#define __le64_to_cpu(x) ((__force __u64)(__le64)(x)) +#define __cpu_to_le32(x) ((__force __le32)(__u32)(x)) +#define __le32_to_cpu(x) ((__force __u32)(__le32)(x)) +#define __cpu_to_le16(x) ((__force __le16)(__u16)(x)) +#define __le16_to_cpu(x) ((__force __u16)(__le16)(x)) +#define __cpu_to_be64(x) ((__force __be64)__swab64((x))) +#define __be64_to_cpu(x) __swab64((__force __u64)(__be64)(x)) +#define __cpu_to_be32(x) ((__force __be32)__swab32((x))) +#define __be32_to_cpu(x) __swab32((__force __u32)(__be32)(x)) +#define __cpu_to_be16(x) ((__force __be16)__swab16((x))) +#define __be16_to_cpu(x) __swab16((__force __u16)(__be16)(x)) + +static inline __le64 __cpu_to_le64p(const __u64 *p) +{ + return (__force __le64)*p; +} +static inline __u64 __le64_to_cpup(const __le64 *p) +{ + return (__force __u64)*p; +} +static inline __le32 __cpu_to_le32p(const __u32 *p) +{ + return (__force __le32)*p; +} +static inline __u32 __le32_to_cpup(const __le32 *p) +{ + return (__force __u32)*p; +} +static inline __le16 __cpu_to_le16p(const __u16 *p) +{ + return (__force __le16)*p; +} +static inline __u16 __le16_to_cpup(const __le16 *p) +{ + return (__force __u16)*p; +} +static inline __be64 __cpu_to_be64p(const __u64 *p) +{ + return (__force __be64)__swab64p(p); +} +static inline __u64 __be64_to_cpup(const __be64 *p) +{ + return __swab64p((__u64 *)p); +} +static inline __be32 __cpu_to_be32p(const __u32 *p) +{ + return (__force __be32)__swab32p(p); +} +static inline __u32 __be32_to_cpup(const __be32 *p) +{ + return __swab32p((__u32 *)p); +} +static inline __be16 __cpu_to_be16p(const __u16 *p) +{ + return (__force __be16)__swab16p(p); +} +static inline __u16 __be16_to_cpup(const __be16 *p) +{ + return __swab16p((__u16 *)p); +} +#define __cpu_to_le64s(x) do { (void)(x); } while (0) +#define __le64_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le32s(x) do { (void)(x); } while (0) +#define __le32_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_le16s(x) do { (void)(x); } while (0) +#define __le16_to_cpus(x) do { (void)(x); } while (0) +#define __cpu_to_be64s(x) __swab64s((x)) +#define __be64_to_cpus(x) __swab64s((x)) +#define __cpu_to_be32s(x) __swab32s((x)) +#define __be32_to_cpus(x) __swab32s((x)) +#define __cpu_to_be16s(x) __swab16s((x)) +#define __be16_to_cpus(x) __swab16s((x)) + +#ifdef __KERNEL__ +#include +#endif + +#endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */ diff --git a/include/linux/byteorder/swab.h b/include/linux/byteorder/swab.h new file mode 100644 index 0000000..4334fa7 --- /dev/null +++ b/include/linux/byteorder/swab.h @@ -0,0 +1,156 @@ +#ifndef _LINUX_BYTEORDER_SWAB_H +#define _LINUX_BYTEORDER_SWAB_H + +/* + * linux/byteorder/swab.h + * Byte-swapping, independently from CPU endianness + * swabXX[ps]?(foo) + * + * Francois-Rene Rideau 19971205 + * separated swab functions from cpu_to_XX, + * to clean up support for bizarre-endian architectures. + * + * See asm-i386/byteorder.h and suches for examples of how to provide + * architecture-dependent optimized versions + * + */ + +/* casts are necessary for constants, because we never know how for sure + * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. + */ +#define ___swab16(x) \ + ((__u16)( \ + (((__u16)(x) & (__u16)0x00ffU) << 8) | \ + (((__u16)(x) & (__u16)0xff00U) >> 8) )) +#define ___swab32(x) \ + ((__u32)( \ + (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ + (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ + (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ + (((__u32)(x) & (__u32)0xff000000UL) >> 24) )) +#define ___swab64(x) \ + ((__u64)( \ + (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ + (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ + (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ + (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ + (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ + (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ + (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ + (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) )) + +/* + * provide defaults when no architecture-specific optimization is detected + */ +#ifndef __arch__swab16 +# define __arch__swab16(x) ___swab16(x) +#endif +#ifndef __arch__swab32 +# define __arch__swab32(x) ___swab32(x) +#endif +#ifndef __arch__swab64 +# define __arch__swab64(x) ___swab64(x) +#endif + +#ifndef __arch__swab16p +# define __arch__swab16p(x) __swab16(*(x)) +#endif +#ifndef __arch__swab32p +# define __arch__swab32p(x) __swab32(*(x)) +#endif +#ifndef __arch__swab64p +# define __arch__swab64p(x) __swab64(*(x)) +#endif + +#ifndef __arch__swab16s +# define __arch__swab16s(x) do { *(x) = __swab16p((x)); } while (0) +#endif +#ifndef __arch__swab32s +# define __arch__swab32s(x) do { *(x) = __swab32p((x)); } while (0) +#endif +#ifndef __arch__swab64s +# define __arch__swab64s(x) do { *(x) = __swab64p((x)); } while (0) +#endif + + +/* + * Allow constant folding + */ +#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) +# define __swab16(x) \ +(__builtin_constant_p((__u16)(x)) ? \ + ___swab16((x)) : \ + __fswab16((x))) +# define __swab32(x) \ +(__builtin_constant_p((__u32)(x)) ? \ + ___swab32((x)) : \ + __fswab32((x))) +# define __swab64(x) \ +(__builtin_constant_p((__u64)(x)) ? \ + ___swab64((x)) : \ + __fswab64((x))) +#else +# define __swab16(x) __fswab16(x) +# define __swab32(x) __fswab32(x) +# define __swab64(x) __fswab64(x) +#endif /* OPTIMIZE */ + + +static __inline__ __attribute__((const)) __u16 __fswab16(__u16 x) +{ + return __arch__swab16(x); +} +static __inline__ __u16 __swab16p(const __u16 *x) +{ + return __arch__swab16p(x); +} +static __inline__ void __swab16s(__u16 *addr) +{ + __arch__swab16s(addr); +} + +static __inline__ __attribute__((const)) __u32 __fswab32(__u32 x) +{ + return __arch__swab32(x); +} +static __inline__ __u32 __swab32p(const __u32 *x) +{ + return __arch__swab32p(x); +} +static __inline__ void __swab32s(__u32 *addr) +{ + __arch__swab32s(addr); +} + +static __inline__ __attribute__((const)) __u64 __fswab64(__u64 x) +{ +# ifdef __SWAB_64_THRU_32__ + __u32 h = x >> 32; + __u32 l = x & ((1ULL<<32)-1); + return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h))); +# else + return __arch__swab64(x); +# endif +} +static __inline__ __u64 __swab64p(const __u64 *x) +{ + return __arch__swab64p(x); +} +static __inline__ void __swab64s(__u64 *addr) +{ + __arch__swab64s(addr); +} + +#if defined(__KERNEL__) +#define swab16 __swab16 +#define swab32 __swab32 +#define swab64 __swab64 +#define swab16p __swab16p +#define swab32p __swab32p +#define swab64p __swab64p +#define swab16s __swab16s +#define swab32s __swab32s +#define swab64s __swab64s +#endif + +#endif /* _LINUX_BYTEORDER_SWAB_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h new file mode 100644 index 0000000..02ff1a3 --- /dev/null +++ b/include/linux/clk-provider.h @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2019 DENX Software Engineering + * Lukasz Majewski, DENX Software Engineering, lukma@denx.de + * + * Copyright (c) 2010-2011 Jeremy Kerr + * Copyright (C) 2011-2012 Linaro Ltd + */ +#ifndef __LINUX_CLK_PROVIDER_H +#define __LINUX_CLK_PROVIDER_H +#include + +static inline void clk_dm(ulong id, struct clk *clk) +{ + if (!IS_ERR(clk)) + clk->id = id; +} + +/* + * flags used across common struct clk. these flags should only affect the + * top-level framework. custom flags for dealing with hardware specifics + * belong in struct clk_foo + * + * Please update clk_flags[] in drivers/clk/clk.c when making changes here! + */ +#define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ +#define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ +#define CLK_SET_RATE_PARENT BIT(2) /* propagate rate change up one level */ +#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ + /* unused */ +#define CLK_IS_BASIC BIT(5) /* Basic clk, can't do a to_clk_foo() */ +#define CLK_GET_RATE_NOCACHE BIT(6) /* do not use the cached clk rate */ +#define CLK_SET_RATE_NO_REPARENT BIT(7) /* don't re-parent on rate change */ +#define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ +#define CLK_RECALC_NEW_RATES BIT(9) /* recalc rates after notifications */ +#define CLK_SET_RATE_UNGATE BIT(10) /* clock needs to run to set rate */ +#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */ +/* parents need enable during gate/ungate, set rate and re-parent */ +#define CLK_OPS_PARENT_ENABLE BIT(12) +/* duty cycle call may be forwarded to the parent clock */ +#define CLK_DUTY_CYCLE_PARENT BIT(13) + +#define CLK_MUX_INDEX_ONE BIT(0) +#define CLK_MUX_INDEX_BIT BIT(1) +#define CLK_MUX_HIWORD_MASK BIT(2) +#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ +#define CLK_MUX_ROUND_CLOSEST BIT(4) + +struct clk_mux { + struct clk clk; + void __iomem *reg; + u32 *table; + u32 mask; + u8 shift; + u8 flags; + + /* + * Fields from struct clk_init_data - this struct has been + * omitted to avoid too deep level of CCF for bootloader + */ + const char * const *parent_names; + u8 num_parents; +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + u32 io_mux_val; +#endif + +}; + +#define to_clk_mux(_clk) container_of(_clk, struct clk_mux, clk) +extern const struct clk_ops clk_mux_ops; +u8 clk_mux_get_parent(struct clk *clk); + +struct clk_gate { + struct clk clk; + void __iomem *reg; + u8 bit_idx; + u8 flags; +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + u32 io_gate_val; +#endif +}; + +#define to_clk_gate(_clk) container_of(_clk, struct clk_gate, clk) + +#define CLK_GATE_SET_TO_DISABLE BIT(0) +#define CLK_GATE_HIWORD_MASK BIT(1) + +extern const struct clk_ops clk_gate_ops; +struct clk *clk_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, spinlock_t *lock); + +struct clk_div_table { + unsigned int val; + unsigned int div; +}; + +struct clk_divider { + struct clk clk; + void __iomem *reg; + u8 shift; + u8 width; + u8 flags; + const struct clk_div_table *table; +#if CONFIG_IS_ENABLED(SANDBOX_CLK_CCF) + u32 io_divider_val; +#endif +}; + +#define clk_div_mask(width) ((1 << (width)) - 1) +#define to_clk_divider(_clk) container_of(_clk, struct clk_divider, clk) + +#define CLK_DIVIDER_ONE_BASED BIT(0) +#define CLK_DIVIDER_POWER_OF_TWO BIT(1) +#define CLK_DIVIDER_ALLOW_ZERO BIT(2) +#define CLK_DIVIDER_HIWORD_MASK BIT(3) +#define CLK_DIVIDER_ROUND_CLOSEST BIT(4) +#define CLK_DIVIDER_READ_ONLY BIT(5) +#define CLK_DIVIDER_MAX_AT_ZERO BIT(6) +extern const struct clk_ops clk_divider_ops; +unsigned long divider_recalc_rate(struct clk *hw, unsigned long parent_rate, + unsigned int val, + const struct clk_div_table *table, + unsigned long flags, unsigned long width); + +struct clk_fixed_factor { + struct clk clk; + unsigned int mult; + unsigned int div; +}; + +#define to_clk_fixed_factor(_clk) container_of(_clk, struct clk_fixed_factor,\ + clk) + +struct clk_fixed_rate { + struct clk clk; + unsigned long fixed_rate; +}; + +#define to_clk_fixed_rate(dev) ((struct clk_fixed_rate *)dev_get_platdata(dev)) + +struct clk_composite { + struct clk clk; + struct clk_ops ops; + + struct clk *mux; + struct clk *rate; + struct clk *gate; + + const struct clk_ops *mux_ops; + const struct clk_ops *rate_ops; + const struct clk_ops *gate_ops; +}; + +#define to_clk_composite(_clk) container_of(_clk, struct clk_composite, clk) + +struct clk *clk_register_composite(struct device *dev, const char *name, + const char * const *parent_names, int num_parents, + struct clk *mux_clk, const struct clk_ops *mux_ops, + struct clk *rate_clk, const struct clk_ops *rate_ops, + struct clk *gate_clk, const struct clk_ops *gate_ops, + unsigned long flags); + +int clk_register(struct clk *clk, const char *drv_name, const char *name, + const char *parent_name); + +struct clk *clk_register_fixed_factor(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + unsigned int mult, unsigned int div); + +struct clk *clk_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_divider_flags); + +struct clk *clk_register_mux(struct device *dev, const char *name, + const char * const *parent_names, u8 num_parents, + unsigned long flags, + void __iomem *reg, u8 shift, u8 width, + u8 clk_mux_flags); + +const char *clk_hw_get_name(const struct clk *hw); +ulong clk_generic_get_rate(struct clk *clk); + +static inline struct clk *dev_get_clk_ptr(struct udevice *dev) +{ + return (struct clk *)dev_get_uclass_priv(dev); +} +#endif /* __LINUX_CLK_PROVIDER_H */ diff --git a/include/linux/clk/analogbits-wrpll-cln28hpc.h b/include/linux/clk/analogbits-wrpll-cln28hpc.h new file mode 100644 index 0000000..0327909 --- /dev/null +++ b/include/linux/clk/analogbits-wrpll-cln28hpc.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018-2019 SiFive, Inc. + * Wesley Terpstra + * Paul Walmsley + */ + +#ifndef __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H +#define __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H + +#include + +/* DIVQ_VALUES: number of valid DIVQ values */ +#define DIVQ_VALUES 6 + +/* + * Bit definitions for struct wrpll_cfg.flags + * + * WRPLL_FLAGS_BYPASS_FLAG: if set, the PLL is either in bypass, or should be + * programmed to enter bypass + * WRPLL_FLAGS_RESET_FLAG: if set, the PLL is in reset + * WRPLL_FLAGS_INT_FEEDBACK_FLAG: if set, the PLL is configured for internal + * feedback mode + * WRPLL_FLAGS_EXT_FEEDBACK_FLAG: if set, the PLL is configured for external + * feedback mode (not yet supported by this driver) + */ +#define WRPLL_FLAGS_BYPASS_SHIFT 0 +#define WRPLL_FLAGS_BYPASS_MASK BIT(WRPLL_FLAGS_BYPASS_SHIFT) +#define WRPLL_FLAGS_RESET_SHIFT 1 +#define WRPLL_FLAGS_RESET_MASK BIT(WRPLL_FLAGS_RESET_SHIFT) +#define WRPLL_FLAGS_INT_FEEDBACK_SHIFT 2 +#define WRPLL_FLAGS_INT_FEEDBACK_MASK BIT(WRPLL_FLAGS_INT_FEEDBACK_SHIFT) +#define WRPLL_FLAGS_EXT_FEEDBACK_SHIFT 3 +#define WRPLL_FLAGS_EXT_FEEDBACK_MASK BIT(WRPLL_FLAGS_EXT_FEEDBACK_SHIFT) + +/** + * struct wrpll_cfg - WRPLL configuration values + * @divr: reference divider value (6 bits), as presented to the PLL signals + * @divf: feedback divider value (9 bits), as presented to the PLL signals + * @divq: output divider value (3 bits), as presented to the PLL signals + * @flags: PLL configuration flags. See above for more information + * @range: PLL loop filter range. See below for more information + * @output_rate_cache: cached output rates, swept across DIVQ + * @parent_rate: PLL refclk rate for which values are valid + * @max_r: maximum possible R divider value, given @parent_rate + * @init_r: initial R divider value to start the search from + * + * @divr, @divq, @divq, @range represent what the PLL expects to see + * on its input signals. Thus @divr and @divf are the actual divisors + * minus one. @divq is a power-of-two divider; for example, 1 = + * divide-by-2 and 6 = divide-by-64. 0 is an invalid @divq value. + * + * When initially passing a struct wrpll_cfg record, the + * record should be zero-initialized with the exception of the @flags + * field. The only flag bits that need to be set are either + * WRPLL_FLAGS_INT_FEEDBACK or WRPLL_FLAGS_EXT_FEEDBACK. + */ +struct wrpll_cfg { + u8 divr; + u8 divq; + u8 range; + u8 flags; + u16 divf; +/* private: */ + u32 output_rate_cache[DIVQ_VALUES]; + unsigned long parent_rate; + u8 max_r; + u8 init_r; +}; + +int wrpll_configure_for_rate(struct wrpll_cfg *c, u32 target_rate, + unsigned long parent_rate); + +unsigned int wrpll_calc_max_lock_us(const struct wrpll_cfg *c); + +unsigned long wrpll_calc_output_rate(const struct wrpll_cfg *c, + unsigned long parent_rate); + +#endif /* __LINUX_CLK_ANALOGBITS_WRPLL_CLN28HPC_H */ diff --git a/include/linux/compat.h b/include/linux/compat.h new file mode 100644 index 0000000..d0f51ba --- /dev/null +++ b/include/linux/compat.h @@ -0,0 +1,378 @@ +#ifndef _LINUX_COMPAT_H_ +#define _LINUX_COMPAT_H_ + +#include +#include +#include +#include + +struct unused {}; +typedef struct unused unused_t; + +struct p_current{ + int pid; +}; + +extern struct p_current *current; + +/* avoid conflict with */ +#ifdef dev_dbg +#undef dev_dbg +#endif +#ifdef dev_vdbg +#undef dev_vdbg +#endif +#ifdef dev_info +#undef dev_info +#endif +#ifdef dev_err +#undef dev_err +#endif +#ifdef dev_warn +#undef dev_warn +#endif + +#define dev_dbg(dev, fmt, args...) \ + debug(fmt, ##args) +#define dev_vdbg(dev, fmt, args...) \ + debug(fmt, ##args) +#define dev_info(dev, fmt, args...) \ + printf(fmt, ##args) +#define dev_err(dev, fmt, args...) \ + printf(fmt, ##args) +#define dev_warn(dev, fmt, args...) \ + printf(fmt, ##args) + +#define netdev_emerg(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_alert(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_crit(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_err(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_warn(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_notice(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_info(dev, fmt, args...) \ + printf(fmt, ##args) +#define netdev_dbg(dev, fmt, args...) \ + debug(fmt, ##args) +#define netdev_vdbg(dev, fmt, args...) \ + debug(fmt, ##args) + +#define GFP_ATOMIC ((gfp_t) 0) +#define GFP_KERNEL ((gfp_t) 0) +#define GFP_NOFS ((gfp_t) 0) +#define GFP_USER ((gfp_t) 0) +#define __GFP_NOWARN ((gfp_t) 0) +#define __GFP_ZERO ((__force gfp_t)0x8000u) /* Return zeroed page on success */ + +void *kmalloc(size_t size, int flags); + +static inline void *kzalloc(size_t size, gfp_t flags) +{ + return kmalloc(size, flags | __GFP_ZERO); +} + +static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) +{ + if (size != 0 && n > SIZE_MAX / size) + return NULL; + return kmalloc(n * size, flags | __GFP_ZERO); +} + +static inline void *kcalloc(size_t n, size_t size, gfp_t flags) +{ + return kmalloc_array(n, size, flags | __GFP_ZERO); +} + +#define vmalloc(size) kmalloc(size, 0) +#define __vmalloc(size, flags, pgsz) kmalloc(size, flags) +static inline void *vzalloc(unsigned long size) +{ + return kzalloc(size, 0); +} +static inline void kfree(const void *block) +{ + free((void *)block); +} +static inline void vfree(const void *addr) +{ + free((void *)addr); +} + +struct kmem_cache { int sz; }; + +struct kmem_cache *get_mem(int element_sz); +#define kmem_cache_create(a, sz, c, d, e) get_mem(sz) +void *kmem_cache_alloc(struct kmem_cache *obj, int flag); +static inline void kmem_cache_free(struct kmem_cache *cachep, void *obj) +{ + free(obj); +} +static inline void kmem_cache_destroy(struct kmem_cache *cachep) +{ + free(cachep); +} + +#define DECLARE_WAITQUEUE(...) do { } while (0) +#define add_wait_queue(...) do { } while (0) +#define remove_wait_queue(...) do { } while (0) + +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) + +#define PAGE_SIZE 4096 + +/* drivers/char/random.c */ +#define get_random_bytes(...) + +/* include/linux/leds.h */ +struct led_trigger {}; + +#define DEFINE_LED_TRIGGER(x) static struct led_trigger *x; +enum led_brightness { + LED_OFF = 0, + LED_HALF = 127, + LED_FULL = 255, +}; + +static inline void led_trigger_register_simple(const char *name, + struct led_trigger **trigger) {} +static inline void led_trigger_unregister_simple(struct led_trigger *trigger) {} +static inline void led_trigger_event(struct led_trigger *trigger, + enum led_brightness event) {} + +/* uapi/linux/limits.h */ +#define XATTR_LIST_MAX 65536 /* size of extended attribute namelist (64k) */ + +/** + * The type used for indexing onto a disc or disc partition. + * + * Linux always considers sectors to be 512 bytes long independently + * of the devices real block size. + * + * blkcnt_t is the type of the inode's block count. + */ +#ifdef CONFIG_LBDAF +typedef u64 sector_t; +typedef u64 blkcnt_t; +#else +typedef unsigned long sector_t; +typedef unsigned long blkcnt_t; +#endif + +/* module */ +#define THIS_MODULE 0 +#define try_module_get(...) 1 +#define module_put(...) do { } while (0) +#define module_init(...) +#define module_exit(...) +#define EXPORT_SYMBOL(...) +#define EXPORT_SYMBOL_GPL(...) +#define module_param(...) +#define module_param_call(...) +#define MODULE_PARM_DESC(...) +#define MODULE_VERSION(...) +#define MODULE_DESCRIPTION(...) +#define MODULE_AUTHOR(...) +#define MODULE_LICENSE(...) +#define MODULE_ALIAS(...) +#define __module_get(...) + +/* character device */ +#define MKDEV(...) 0 +#define MAJOR(dev) 0 +#define MINOR(dev) 0 + +#define alloc_chrdev_region(...) 0 +#define unregister_chrdev_region(...) + +#define class_create(...) __builtin_return_address(0) +#define class_create_file(...) 0 +#define class_register(...) 0 +#define class_unregister(...) +#define class_remove_file(...) +#define class_destroy(...) +#define misc_register(...) 0 +#define misc_deregister(...) + +#define blocking_notifier_call_chain(...) 0 + +#define __initdata +#define late_initcall(...) + +#define dev_set_name(...) do { } while (0) +#define device_register(...) 0 +#define device_unregister(...) +#define volume_sysfs_init(...) 0 +#define volume_sysfs_close(...) do { } while (0) + +#define init_waitqueue_head(...) do { } while (0) +#define wait_event_interruptible(...) 0 +#define wake_up_interruptible(...) do { } while (0) +#define dump_stack(...) do { } while (0) + +#define task_pid_nr(x) 0 +#define set_freezable(...) do { } while (0) +#define try_to_freeze(...) 0 +#define set_current_state(...) do { } while (0) +#define kthread_should_stop(...) 0 +#define schedule() do { } while (0) + +#define setup_timer(timer, func, data) do {} while (0) +#define del_timer_sync(timer) do {} while (0) +#define schedule_work(work) do {} while (0) +#define INIT_WORK(work, fun) do {} while (0) + +struct work_struct {}; + +unsigned long copy_from_user(void *dest, const void *src, + unsigned long count); + +typedef unused_t spinlock_t; +typedef int wait_queue_head_t; + +#define spin_lock_init(lock) do {} while (0) +#define spin_lock(lock) do {} while (0) +#define spin_unlock(lock) do {} while (0) +#define spin_lock_irqsave(lock, flags) do { debug("%lu\n", flags); } while (0) +#define spin_unlock_irqrestore(lock, flags) do { flags = 0; } while (0) + +#define DEFINE_MUTEX(...) +#define mutex_init(...) +#define mutex_lock(...) +#define mutex_unlock(...) + +#define init_rwsem(...) do { } while (0) +#define down_read(...) do { } while (0) +#define down_write(...) do { } while (0) +#define down_write_trylock(...) 1 +#define up_read(...) do { } while (0) +#define up_write(...) do { } while (0) + +#define cond_resched() do { } while (0) +#define yield() do { } while (0) + +#define __init +#define __exit +#define __devinit +#define __devinitdata +#define __devinitconst + +#define kthread_create(...) __builtin_return_address(0) +#define kthread_stop(...) do { } while (0) +#define wake_up_process(...) do { } while (0) + +struct rw_semaphore { int i; }; +#define down_write(...) do { } while (0) +#define up_write(...) do { } while (0) +#define down_read(...) do { } while (0) +#define up_read(...) do { } while (0) +struct device { + struct device *parent; + struct class *class; + dev_t devt; /* dev_t, creates the sysfs "dev" */ + void (*release)(struct device *dev); + /* This is used from drivers/usb/musb-new subsystem only */ + void *driver_data; /* data private to the driver */ + void *device_data; /* data private to the device */ +}; +struct mutex { int i; }; +struct kernel_param { int i; }; + +struct cdev { + int owner; + dev_t dev; +}; +#define cdev_init(...) do { } while (0) +#define cdev_add(...) 0 +#define cdev_del(...) do { } while (0) + +#define prandom_u32(...) 0 + +typedef struct { + uid_t val; +} kuid_t; + +typedef struct { + gid_t val; +} kgid_t; + +/* from include/linux/types.h */ + +/** + * struct callback_head - callback structure for use with RCU and task_work + * @next: next update requests in a list + * @func: actual update function to call after the grace period. + */ +struct callback_head { + struct callback_head *next; + void (*func)(struct callback_head *head); +}; +#define rcu_head callback_head +enum writeback_sync_modes { + WB_SYNC_NONE, /* Don't wait on anything */ + WB_SYNC_ALL, /* Wait on every mapping */ +}; + +/* from include/linux/writeback.h */ +/* + * A control structure which tells the writeback code what to do. These are + * always on the stack, and hence need no locking. They are always initialised + * in a manner such that unspecified fields are set to zero. + */ +struct writeback_control { + long nr_to_write; /* Write this many pages, and decrement + this for each page written */ + long pages_skipped; /* Pages which were not written */ + + /* + * For a_ops->writepages(): if start or end are non-zero then this is + * a hint that the filesystem need only write out the pages inside that + * byterange. The byte at `end' is included in the writeout request. + */ + loff_t range_start; + loff_t range_end; + + enum writeback_sync_modes sync_mode; + + unsigned for_kupdate:1; /* A kupdate writeback */ + unsigned for_background:1; /* A background writeback */ + unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ + unsigned for_reclaim:1; /* Invoked from the page allocator */ + unsigned range_cyclic:1; /* range_start is cyclic */ + unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ +}; + +void *kmemdup(const void *src, size_t len, gfp_t gfp); + +typedef int irqreturn_t; + +struct timer_list {}; +struct notifier_block {}; + +typedef unsigned long dmaaddr_t; + +#define pm_runtime_get_sync(dev) do {} while (0) +#define pm_runtime_put(dev) do {} while (0) +#define pm_runtime_put_sync(dev) do {} while (0) +#define pm_runtime_use_autosuspend(dev) do {} while (0) +#define pm_runtime_set_autosuspend_delay(dev, delay) do {} while (0) +#define pm_runtime_enable(dev) do {} while (0) + +#define IRQ_NONE 0 +#define IRQ_HANDLED 1 +#define IRQ_WAKE_THREAD 2 + +#define dev_set_drvdata(dev, data) do {} while (0) + +#define enable_irq(...) +#define disable_irq(...) +#define disable_irq_wake(irq) do {} while (0) +#define enable_irq_wake(irq) -EINVAL +#define free_irq(irq, data) do {} while (0) +#define request_irq(nr, f, flags, nm, data) 0 + +#endif diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h new file mode 100644 index 0000000..d1e49d5 --- /dev/null +++ b/include/linux/compiler-clang.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include directly, include instead." +#endif + +/* Some compiler specific definitions are overwritten here + * for Clang compiler + */ + +#ifdef uninitialized_var +#undef uninitialized_var +#define uninitialized_var(x) x = *(&(x)) +#endif diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h new file mode 100644 index 0000000..8d9e079 --- /dev/null +++ b/include/linux/compiler-gcc.h @@ -0,0 +1,285 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include directly, include instead." +#endif + +/* + * Common definitions for all gcc versions go here. + */ +#define GCC_VERSION (__GNUC__ * 10000 \ + + __GNUC_MINOR__ * 100 \ + + __GNUC_PATCHLEVEL__) + +/* Optimization barrier */ + +/* The "volatile" is due to gcc bugs */ +#define barrier() \ + __asm__ __volatile__("": : :"memory") +/* + * This version is i.e. to prevent dead stores elimination on @ptr + * where gcc and llvm may behave differently when otherwise using + * normal barrier(): while gcc behavior gets along with a normal + * barrier(), llvm needs an explicit input variable to be assumed + * clobbered. The issue is as follows: while the inline asm might + * access any memory it wants, the compiler could have fit all of + * @ptr into memory registers instead, and since @ptr never escaped + * from that, it proofed that the inline asm wasn't touching any of + * it. This version works well with both compilers, i.e. we're telling + * the compiler that the inline asm absolutely may see the contents + * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 + */ +#define barrier_data(ptr) \ + __asm__ __volatile__("": :"r"(ptr) :"memory") + +/* + * This macro obfuscates arithmetic on a variable address so that gcc + * shouldn't recognize the original var, and make assumptions about it. + * + * This is needed because the C standard makes it undefined to do + * pointer arithmetic on "objects" outside their boundaries and the + * gcc optimizers assume this is the case. In particular they + * assume such arithmetic does not wrap. + * + * A miscompilation has been observed because of this on PPC. + * To work around it we hide the relationship of the pointer and the object + * using this macro. + * + * Versions of the ppc64 compiler before 4.1 had a bug where use of + * RELOC_HIDE could trash r30. The bug can be worked around by changing + * the inline assembly constraint from =g to =r, in this particular + * case either is valid. + */ +#define RELOC_HIDE(ptr, off) \ +({ \ + unsigned long __ptr; \ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); \ +}) + +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) + +#ifdef __CHECKER__ +#define __must_be_array(a) 0 +#else +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#endif + +/* + * Force always-inline if the user requests it so via the .config, + * or if gcc is too old: + */ +#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ + !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) +#define inline inline __attribute__((always_inline)) notrace +#define __inline__ __inline__ __attribute__((always_inline)) notrace +#define __inline __inline __attribute__((always_inline)) notrace +#else +/* A lot of inline functions can cause havoc with function tracing */ +#define inline inline notrace +#define __inline__ __inline__ notrace +#define __inline __inline notrace +#endif + +#define __always_inline inline __attribute__((always_inline)) +#define noinline __attribute__((noinline)) + +#define __deprecated __attribute__((deprecated)) +#define __packed __attribute__((packed)) +#define __weak __attribute__((weak)) +#define __alias(symbol) __attribute__((alias(#symbol))) + +/* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. + * + * The asm() bodies of naked functions often depend on standard calling + * conventions, therefore they must be noinline and noclone. + * + * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. + * See GCC PR44290. + */ +#define __naked __attribute__((naked)) noinline __noclone notrace + +#define __noreturn __attribute__((noreturn)) + +/* + * From the GCC manual: + * + * Many functions have no effects except the return value and their + * return value depends only on the parameters and/or global + * variables. Such a function can be subject to common subexpression + * elimination and loop optimization just as an arithmetic operator + * would be. + * [...] + */ +#define __pure __attribute__((pure)) +#define __aligned(x) __attribute__((aligned(x))) +#define __printf(a, b) __attribute__((format(printf, a, b))) +#define __scanf(a, b) __attribute__((format(scanf, a, b))) +#define __attribute_const__ __attribute__((__const__)) +#define __maybe_unused __attribute__((unused)) +#define __always_unused __attribute__((unused)) + +/* gcc version specific checks */ + +#if GCC_VERSION < 30200 +# error Sorry, your compiler is too old - please upgrade it. +#endif + +#if GCC_VERSION < 30300 +# define __used __attribute__((__unused__)) +#else +# define __used __attribute__((__used__)) +#endif + +#ifdef CONFIG_GCOV_KERNEL +# if GCC_VERSION < 30400 +# error "GCOV profiling support for gcc versions below 3.4 not included" +# endif /* __GNUC_MINOR__ */ +#endif /* CONFIG_GCOV_KERNEL */ + +#if GCC_VERSION >= 30400 +#define __must_check __attribute__((warn_unused_result)) +#endif + +#if GCC_VERSION >= 40000 + +/* GCC 4.1.[01] miscompiles __weak */ +#ifdef __KERNEL__ +# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 +# error Your version of gcc miscompiles the __weak directive +# endif +#endif + +#define __used __attribute__((__used__)) +#define __compiler_offsetof(a, b) \ + __builtin_offsetof(a, b) + +#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 +# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) +#endif + +#if GCC_VERSION >= 40300 +/* Mark functions as cold. gcc will assume any path leading to a call + * to them will be unlikely. This means a lot of manual unlikely()s + * are unnecessary now for any paths leading to the usual suspects + * like BUG(), printk(), panic() etc. [but let's keep them for now for + * older compilers] + * + * Early snapshots of gcc 4.3 don't support this and we can't detect this + * in the preprocessor, but we can live with this because they're unreleased. + * Maketime probing would be overkill here. + * + * gcc also has a __attribute__((__hot__)) to move hot functions into + * a special section, but I don't see any sense in this right now in + * the kernel context + */ +#define __cold __attribute__((__cold__)) + +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +#ifndef __CHECKER__ +# define __compiletime_warning(message) __attribute__((warning(message))) +# define __compiletime_error(message) __attribute__((error(message))) +#endif /* __CHECKER__ */ +#endif /* GCC_VERSION >= 40300 */ + +#if GCC_VERSION >= 40500 +/* + * Mark a position in code as unreachable. This can be used to + * suppress control flow warnings after asm blocks that transfer + * control elsewhere. + * + * Early snapshots of gcc 4.5 don't support this and we can't detect + * this in the preprocessor, but we can live with this because they're + * unreleased. Really, we need to have autoconf for the kernel. + */ +#define unreachable() __builtin_unreachable() + +/* Mark a function definition as prohibited from being cloned. */ +#define __noclone __attribute__((__noclone__)) + +#endif /* GCC_VERSION >= 40500 */ + +#if GCC_VERSION >= 40600 +/* + * When used with Link Time Optimization, gcc can optimize away C functions or + * variables which are referenced only from assembly code. __visible tells the + * optimizer that something else uses this function or variable, thus preventing + * this. + */ +#define __visible __attribute__((externally_visible)) +#endif + + +#if GCC_VERSION >= 40900 && !defined(__CHECKER__) +/* + * __assume_aligned(n, k): Tell the optimizer that the returned + * pointer can be assumed to be k modulo n. The second argument is + * optional (default 0), so we use a variadic macro to make the + * shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + */ +#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#endif + +/* + * GCC 'asm goto' miscompiles certain code sequences: + * + * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 + * + * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. + * + * (asm goto is automatically volatile - the naming reflects this.) + */ +#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) + +#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP +#if GCC_VERSION >= 40400 +#define __HAVE_BUILTIN_BSWAP32__ +#define __HAVE_BUILTIN_BSWAP64__ +#endif +#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) +#define __HAVE_BUILTIN_BSWAP16__ +#endif +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ + +#if GCC_VERSION >= 50000 +#define KASAN_ABI_VERSION 4 +#elif GCC_VERSION >= 40902 +#define KASAN_ABI_VERSION 3 +#endif + +#if GCC_VERSION >= 40902 +/* + * Tell the compiler that address safety instrumentation (KASAN) + * should not be applied to that function. + * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + */ +#define __no_sanitize_address __attribute__((no_sanitize_address)) +#endif + +#endif /* gcc version >= 40000 specific checks */ + +#if !defined(__noclone) +#define __noclone /* not needed */ +#endif + +#if !defined(__no_sanitize_address) +#define __no_sanitize_address +#endif + +/* + * A trick to suppress uninitialized variable warning without generating any + * code + */ +#define uninitialized_var(x) x = x diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h new file mode 100644 index 0000000..d4c7113 --- /dev/null +++ b/include/linux/compiler-intel.h @@ -0,0 +1,45 @@ +#ifndef __LINUX_COMPILER_H +#error "Please don't include directly, include instead." +#endif + +#ifdef __ECC + +/* Some compiler specific definitions are overwritten here + * for Intel ECC compiler + */ + +#include + +/* Intel ECC compiler doesn't support gcc specific asm stmts. + * It uses intrinsics to do the equivalent things. + */ +#undef barrier +#undef barrier_data +#undef RELOC_HIDE +#undef OPTIMIZER_HIDE_VAR + +#define barrier() __memory_barrier() +#define barrier_data(ptr) barrier() + +#define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) + +/* This should act as an optimization barrier on var. + * Given that this compiler does not have inline assembly, a compiler barrier + * is the best we can do. + */ +#define OPTIMIZER_HIDE_VAR(var) barrier() + +/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ +#define __must_be_array(a) 0 + +#endif + +#ifndef __HAVE_BUILTIN_BSWAP16__ +/* icc has this, but it's called _bswap16 */ +#define __HAVE_BUILTIN_BSWAP16__ +#define __builtin_bswap16 _bswap16 +#endif + diff --git a/include/linux/compiler.h b/include/linux/compiler.h new file mode 100644 index 0000000..0ea6c8f --- /dev/null +++ b/include/linux/compiler.h @@ -0,0 +1,559 @@ +#ifndef __LINUX_COMPILER_H +#define __LINUX_COMPILER_H + +#ifndef __ASSEMBLY__ + +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __kernel __attribute__((address_space(0))) +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __must_hold(x) __attribute__((context(x,1,1))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +# define __pmem __attribute__((noderef, address_space(5))) +#ifdef CONFIG_SPARSE_RCU_POINTER +# define __rcu __attribute__((noderef, address_space(4))) +#else +# define __rcu +#endif +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +#else +# define __user +# define __kernel +# define __safe +# define __force +# define __nocast +# define __iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __percpu +# define __rcu +# define __pmem +#endif + +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + +#ifdef __KERNEL__ + +#ifdef __GNUC__ +#include +#endif + +#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) +#define notrace __attribute__((hotpatch(0,0))) +#else +#define notrace __attribute__((no_instrument_function)) +#endif + +/* Intel compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __INTEL_COMPILER +# include +#endif + +/* Clang compiler defines __GNUC__. So we will overwrite implementations + * coming from above header files here + */ +#ifdef __clang__ +#include +#endif + +/* + * Generic compiler-dependent macros required for kernel + * build go below this comment. Actual compiler/compiler version + * specific implementations come from the above header files + */ + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +/* + * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code + * to disable branch tracing on a per file basis. + */ +#if defined(CONFIG_TRACE_BRANCH_PROFILING) \ + && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) +void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + +#define likely_notrace(x) __builtin_expect(!!(x), 1) +#define unlikely_notrace(x) __builtin_expect(!!(x), 0) + +#define __branch_check__(x, expect) ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_annotated_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = likely_notrace(x); \ + ftrace_likely_update(&______f, ______r, expect); \ + ______r; \ + }) + +/* + * Using __builtin_constant_p(x) to ignore cases where the return + * value is always the same. This idea is taken from a similar patch + * written by Daniel Walker. + */ +# ifndef likely +# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# endif +# ifndef unlikely +# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# endif + +#ifdef CONFIG_PROFILE_ALL_BRANCHES +/* + * "Define 'is'", Bill Clinton + * "Define 'if'", Steven Rostedt + */ +#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) +#define __trace_if(cond) \ + if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ + ({ \ + int ______r; \ + static struct ftrace_branch_data \ + __attribute__((__aligned__(4))) \ + __attribute__((section("_ftrace_branch"))) \ + ______f = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + ______r = !!(cond); \ + ______f.miss_hit[______r]++; \ + ______r; \ + })) +#endif /* CONFIG_PROFILE_ALL_BRANCHES */ + +#else +# define likely(x) __builtin_expect(!!(x), 1) +# define unlikely(x) __builtin_expect(!!(x), 0) +#endif + +/* Optimization barrier */ +#ifndef barrier +# define barrier() __memory_barrier() +#endif + +#ifndef barrier_data +# define barrier_data(ptr) barrier() +#endif + +/* Unreachable code */ +#ifndef unreachable +# define unreachable() do { } while (1) +#endif + +#ifndef RELOC_HIDE +# define RELOC_HIDE(ptr, off) \ + ({ unsigned long __ptr; \ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) +#endif + +#ifndef OPTIMIZER_HIDE_VAR +#define OPTIMIZER_HIDE_VAR(var) barrier() +#endif + +/* Not-quite-unique ID. */ +#ifndef __UNIQUE_ID +# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) +#endif + +#include + +#define __READ_ONCE_SIZE \ +({ \ + switch (size) { \ + case 1: *(__u8 *)res = *(volatile __u8 *)p; break; \ + case 2: *(__u16 *)res = *(volatile __u16 *)p; break; \ + case 4: *(__u32 *)res = *(volatile __u32 *)p; break; \ + case 8: *(__u64 *)res = *(volatile __u64 *)p; break; \ + default: \ + barrier(); \ + __builtin_memcpy((void *)res, (const void *)p, size); \ + barrier(); \ + } \ +}) + +static __always_inline +void __read_once_size(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} + +#ifdef CONFIG_KASAN +/* + * This function is not 'inline' because __no_sanitize_address confilcts + * with inlining. Attempt to inline it may cause a build failure. + * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 + * '__maybe_unused' allows us to avoid defined-but-not-used warnings. + */ +static __no_sanitize_address __maybe_unused +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#else +static __always_inline +void __read_once_size_nocheck(const volatile void *p, void *res, int size) +{ + __READ_ONCE_SIZE; +} +#endif + +static __always_inline void __write_once_size(volatile void *p, void *res, int size) +{ + switch (size) { + case 1: *(volatile __u8 *)p = *(__u8 *)res; break; + case 2: *(volatile __u16 *)p = *(__u16 *)res; break; + case 4: *(volatile __u32 *)p = *(__u32 *)res; break; + case 8: *(volatile __u64 *)p = *(__u64 *)res; break; + default: + barrier(); + __builtin_memcpy((void *)p, (const void *)res, size); + barrier(); + } +} + +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the + * compiler is aware of some particular ordering. One way to make the + * compiler aware of ordering is to put the two invocations of READ_ONCE, + * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * + * In contrast to ACCESS_ONCE these two macros will also work on aggregate + * data types like structs or unions. If the size of the accessed data + * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a + * compile-time warning. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ + +#define __READ_ONCE(x, check) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u; \ + if (check) \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + else \ + __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) +#define READ_ONCE(x) __READ_ONCE(x, 1) + +/* + * Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need + * to hide memory access from KASAN. + */ +#define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (__force typeof(x)) (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +/** + * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + * + * The control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. ACQUIRE. + */ +#define smp_cond_acquire(cond) do { \ + while (!(cond)) \ + cpu_relax(); \ + smp_rmb(); /* ctrl + rmb := acquire */ \ +} while (0) + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +#ifdef __KERNEL__ +/* + * Allow us to mark functions as 'deprecated' and have gcc emit a nice + * warning for each use, in hopes of speeding the functions removal. + * Usage is: + * int __deprecated foo(void) + */ +#ifndef __deprecated +# define __deprecated /* unimplemented */ +#endif + +#ifdef MODULE +#define __deprecated_for_modules __deprecated +#else +#define __deprecated_for_modules +#endif + +#ifndef __must_check +#define __must_check +#endif + +#ifndef CONFIG_ENABLE_MUST_CHECK +#undef __must_check +#define __must_check +#endif +#ifndef CONFIG_ENABLE_WARN_DEPRECATED +#undef __deprecated +#undef __deprecated_for_modules +#define __deprecated +#define __deprecated_for_modules +#endif + +/* + * Allow us to avoid 'defined but not used' warnings on functions and data, + * as well as force them to be emitted to the assembly file. + * + * As of gcc 3.4, static functions that are not marked with attribute((used)) + * may be elided from the assembly file. As of gcc 3.4, static data not so + * marked will not be elided, but this may change in a future gcc version. + * + * NOTE: Because distributions shipped with a backported unit-at-a-time + * compiler in gcc 3.3, we must define __used to be __attribute__((used)) + * for gcc >=3.3 instead of 3.4. + * + * In prior versions of gcc, such functions and data would be emitted, but + * would be warned about except with attribute((unused)). + * + * Mark functions that are referenced only in inline assembly as __used so + * the code is emitted even though it appears to be unreferenced. + */ +#ifndef __used +# define __used /* unimplemented */ +#endif + +#ifndef __maybe_unused +# define __maybe_unused /* unimplemented */ +#endif + +#ifndef __always_unused +# define __always_unused /* unimplemented */ +#endif + +#ifndef noinline +#define noinline +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#ifndef __always_inline +#define __always_inline inline +#endif + +#endif /* __KERNEL__ */ + +/* + * From the GCC manual: + * + * Many functions do not examine any values except their arguments, + * and have no effects except the return value. Basically this is + * just slightly more strict class than the `pure' attribute above, + * since function is not allowed to read global memory. + * + * Note that a function that has pointer arguments and examines the + * data pointed to must _not_ be declared `const'. Likewise, a + * function that calls a non-`const' function usually must not be + * `const'. It does not make sense for a `const' function to return + * `void'. + */ +#ifndef __attribute_const__ +# define __attribute_const__ /* unimplemented */ +#endif + +/* + * Tell gcc if a function is cold. The compiler will assume any path + * directly leading to the call is unlikely. + */ + +#ifndef __cold +#define __cold +#endif + +/* Simple shorthand for a section definition */ +#ifndef __section +# define __section(S) __attribute__ ((__section__(#S))) +#endif + +#ifndef __visible +#define __visible +#endif + +/* + * Assume alignment of return value. + */ +#ifndef __assume_aligned +#define __assume_aligned(a, ...) +#endif + + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#ifndef __same_type +# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) +#endif + +/* Is this type a native word size -- useful for atomic operations */ +#ifndef __native_word +# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) +#endif + +/* Compile time object size, -1 for unknown */ +#ifndef __compiletime_object_size +# define __compiletime_object_size(obj) -1 +#endif +#ifndef __compiletime_warning +# define __compiletime_warning(message) +#endif +#ifndef __compiletime_error +# define __compiletime_error(message) +/* + * Sparse complains of variable sized arrays due to the temporary variable in + * __compiletime_assert. Unfortunately we can't just expand it out to make + * sparse see a constant array size without breaking compiletime_assert on old + * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. + */ +# ifndef __CHECKER__ +# define __compiletime_error_fallback(condition) \ + do { ((void)sizeof(char[1 - 2 * condition])); } while (0) +# endif +#endif +#ifndef __compiletime_error_fallback +# define __compiletime_error_fallback(condition) do { } while (0) +#endif + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + bool __cond = !(condition); \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (__cond) \ + prefix ## suffix(); \ + __compiletime_error_fallback(__cond); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + +/* + * Prevent the compiler from merging or refetching accesses. The compiler + * is also forbidden from reordering successive instances of ACCESS_ONCE(), + * but only when the compiler is aware of some particular ordering. One way + * to make the compiler aware of ordering is to put the two invocations of + * ACCESS_ONCE() in different C statements. + * + * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE + * on a union member will work as long as the size of the member matches the + * size of the union and the size is smaller than word size. + * + * The major use cases of ACCESS_ONCE used to be (1) Mediating communication + * between process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + * + * If possible use READ_ONCE()/WRITE_ONCE() instead. + */ +#define __ACCESS_ONCE(x) ({ \ + __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ + (volatile typeof(x) *)&(x); }) +#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) + +/** + * lockless_dereference() - safely load a pointer for later dereference + * @p: The pointer to load + * + * Similar to rcu_dereference(), but for situations where the pointed-to + * object's lifetime is managed by something other than RCU. That + * "something other" might be reference counting or simple immortality. + */ +#define lockless_dereference(p) \ +({ \ + typeof(p) _________p1 = READ_ONCE(p); \ + smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ + (_________p1); \ +}) + +/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ +#ifdef CONFIG_KPROBES +# define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline +#else +# define __kprobes +# define nokprobe_inline inline +#endif +#endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/completion.h b/include/linux/completion.h new file mode 100644 index 0000000..9835826 --- /dev/null +++ b/include/linux/completion.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPLETION_H +#define __LINUX_COMPLETION_H + +/* + * (C) Copyright 2001 Linus Torvalds + * + * Atomic wait-for-completion handler data structures. + * See kernel/sched/completion.c for details. + */ +#ifndef __UBOOT__ +#include +#endif /* __UBOOT__ */ + +/* + * struct completion - structure used to maintain state for a "completion" + * + * This is the opaque structure used to maintain the state for a "completion". + * Completions currently use a FIFO to queue threads that have to wait for + * the "completion" event. + * + * See also: complete(), wait_for_completion() (and friends _timeout, + * _interruptible, _interruptible_timeout, and _killable), init_completion(), + * reinit_completion(), and macros DECLARE_COMPLETION(), + * DECLARE_COMPLETION_ONSTACK(). + */ +struct completion { + unsigned int done; +#ifndef __UBOOT__ + wait_queue_head_t wait; +#endif /* __UBOOT__ */ +}; + +#define init_completion_map(x, m) __init_completion(x) +#define init_completion(x) __init_completion(x) +static inline void complete_acquire(struct completion *x) {} +static inline void complete_release(struct completion *x) {} + +#define COMPLETION_INITIALIZER(work) \ + { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + +#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ + (*({ init_completion_map(&(work), &(map)); &(work); })) + +#define COMPLETION_INITIALIZER_ONSTACK(work) \ + (*({ init_completion(&work); &work; })) + +/** + * DECLARE_COMPLETION - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure. Generally used + * for static declarations. You should use the _ONSTACK variant for automatic + * variables. + */ +#define DECLARE_COMPLETION(work) \ + struct completion work = COMPLETION_INITIALIZER(work) + +/* + * Lockdep needs to run a non-constant initializer for on-stack + * completions - so we use the _ONSTACK() variant for those that + * are on the kernel stack: + */ +/** + * DECLARE_COMPLETION_ONSTACK - declare and initialize a completion structure + * @work: identifier for the completion structure + * + * This macro declares and initializes a completion structure on the kernel + * stack. + */ +#ifdef CONFIG_LOCKDEP +# define DECLARE_COMPLETION_ONSTACK(work) \ + struct completion work = COMPLETION_INITIALIZER_ONSTACK(work) +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \ + struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) +#else +# define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work) +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work) +#endif + +/** + * init_completion - Initialize a dynamically allocated completion + * @x: pointer to completion structure that is to be initialized + * + * This inline function will initialize a dynamically created completion + * structure. + */ +static inline void __init_completion(struct completion *x) +{ + x->done = 0; +#ifndef __UBOOT__ + init_waitqueue_head(&x->wait); +#endif /* __UBOOT__ */ +} + +/** + * reinit_completion - reinitialize a completion structure + * @x: pointer to completion structure that is to be reinitialized + * + * This inline function should be used to reinitialize a completion structure so it can + * be reused. This is especially important after complete_all() is used. + */ +static inline void reinit_completion(struct completion *x) +{ + x->done = 0; +} + +#ifndef __UBOOT__ +extern void wait_for_completion(struct completion *); +extern void wait_for_completion_io(struct completion *); +extern int wait_for_completion_interruptible(struct completion *x); +extern int wait_for_completion_killable(struct completion *x); +extern unsigned long wait_for_completion_timeout(struct completion *x, + unsigned long timeout); +extern unsigned long wait_for_completion_io_timeout(struct completion *x, + unsigned long timeout); +extern long wait_for_completion_interruptible_timeout( + struct completion *x, unsigned long timeout); +extern long wait_for_completion_killable_timeout( + struct completion *x, unsigned long timeout); +extern bool try_wait_for_completion(struct completion *x); +extern bool completion_done(struct completion *x); + +extern void complete(struct completion *); +extern void complete_all(struct completion *); + +#else /* __UBOOT __ */ + +#define wait_for_completion(x) do {} while (0) +#define wait_for_completion_io(x) do {} while (0) + +inline int wait_for_completion_interruptible(struct completion *x) +{ + return 1; +} +inline int wait_for_completion_killable(struct completion *x) +{ + return 1; +} +inline unsigned long wait_for_completion_timeout(struct completion *x, + unsigned long timeout) +{ + return 1; +} +inline unsigned long wait_for_completion_io_timeout(struct completion *x, + unsigned long timeout) +{ + return 1; +} +inline long wait_for_completion_interruptible_timeout(struct completion *x, + unsigned long timeout) +{ + return 1; +} +inline long wait_for_completion_killable_timeout(struct completion *x, + unsigned long timeout) +{ + return 1; +} +inline bool try_wait_for_completion(struct completion *x) +{ + return 1; +} +inline bool completion_done(struct completion *x) +{ + return 1; +} + +#define complete(x) do {} while (0) +#define complete_all(x) do {} while (0) +#endif /* __UBOOT__ */ + +#endif diff --git a/include/linux/const.h b/include/linux/const.h new file mode 100644 index 0000000..379c889 --- /dev/null +++ b/include/linux/const.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_CONST_H +#define _LINUX_CONST_H + +/* const.h: Macros for dealing with constants. */ + +/* Some constant macros are used in both assembler and + * C code. Therefore we cannot annotate them always with + * 'UL' and other type specifiers unilaterally. We + * use the following macros to deal with this. + * + * Similarly, _AT() will cast an expression with a type in C, but + * leave it unchanged in asm. + */ + +#ifdef __ASSEMBLY__ +#define _AC(X,Y) X +#define _AT(T,X) X +#else +#define __AC(X,Y) (X##Y) +#define _AC(X,Y) __AC(X,Y) +#define _AT(T,X) ((T)(X)) +#endif + +#define _UL(x) (_AC(x, UL)) +#define _ULL(x) (_AC(x, ULL)) + +#define _BITUL(x) (_UL(1) << (x)) +#define _BITULL(x) (_ULL(1) << (x)) + +#define UL(x) (_UL(x)) +#define ULL(x) (_ULL(x)) + +#endif /* _LINUX_CONST_H */ diff --git a/include/linux/crc32.h b/include/linux/crc32.h new file mode 100644 index 0000000..ac4aed1 --- /dev/null +++ b/include/linux/crc32.h @@ -0,0 +1,27 @@ +/* + * crc32.h + * See linux/lib/crc32.c for license and changes + */ +#ifndef _LINUX_CRC32_H +#define _LINUX_CRC32_H + +#include +/* #include */ + +extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); +/* extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); */ + +#define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)data, length) + +/* + * Helpers for hash table generation of ethernet nics: + * + * Ethernet sends the least significant bit of a byte first, thus crc32_le + * is used. The output of crc32_le is bit reversed [most significant bit + * is in bit nr 0], thus it must be reversed before use. Except for + * nics that bit swap the result internally... + */ +/* #define ether_crc(length, data) bitrev32(crc32_le(~0, data, length)) */ +/* #define ether_crc_le(length, data) crc32_le(~0, data, length) */ + +#endif /* _LINUX_CRC32_H */ diff --git a/include/linux/crc7.h b/include/linux/crc7.h new file mode 100644 index 0000000..1786e77 --- /dev/null +++ b/include/linux/crc7.h @@ -0,0 +1,14 @@ +#ifndef _LINUX_CRC7_H +#define _LINUX_CRC7_H +#include + +extern const u8 crc7_syndrome_table[256]; + +static inline u8 crc7_byte(u8 crc, u8 data) +{ + return crc7_syndrome_table[(crc << 1) ^ data]; +} + +extern u8 crc7(u8 crc, const u8 *buffer, size_t len); + +#endif diff --git a/include/linux/crc8.h b/include/linux/crc8.h new file mode 100644 index 0000000..0ab5b9a --- /dev/null +++ b/include/linux/crc8.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) 2013 Google, Inc + */ + + +#ifndef __linux_crc8_h +#define __linux_crc8_h + +/** + * crc8() - Calculate and return CRC-8 of the data + * + * This uses an x^8 + x^2 + x + 1 polynomial. A table-based algorithm would + * be faster, but for only a few bytes it isn't worth the code size + * + * @crc_start: CRC8 start value + * @vptr: Buffer to checksum + * @len: Length of buffer in bytes + * @return CRC8 checksum + */ +unsigned int crc8(unsigned int crc_start, const unsigned char *vptr, int len); + +#endif diff --git a/include/linux/ctype.h b/include/linux/ctype.h new file mode 100644 index 0000000..42f9305 --- /dev/null +++ b/include/linux/ctype.h @@ -0,0 +1,60 @@ +#ifndef _LINUX_CTYPE_H +#define _LINUX_CTYPE_H + +/* + * NOTE! This ctype does not handle EOF like the standard C + * library is required to. + */ + +#define _U 0x01 /* upper */ +#define _L 0x02 /* lower */ +#define _D 0x04 /* digit */ +#define _C 0x08 /* cntrl */ +#define _P 0x10 /* punct */ +#define _S 0x20 /* white space (space/lf/tab) */ +#define _X 0x40 /* hex digit */ +#define _SP 0x80 /* hard space (0x20) */ + +extern const unsigned char _ctype[]; + +#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) + +#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) +#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) +#define iscntrl(c) ((__ismask(c)&(_C)) != 0) +#define isdigit(c) ((__ismask(c)&(_D)) != 0) +#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) +#define islower(c) ((__ismask(c)&(_L)) != 0) +#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) +#define ispunct(c) ((__ismask(c)&(_P)) != 0) +#define isspace(c) ((__ismask(c)&(_S)) != 0) +#define isupper(c) ((__ismask(c)&(_U)) != 0) +#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) + +/* + * Rather than doubling the size of the _ctype lookup table to hold a 'blank' + * flag, just check for space or tab. + */ +#define isblank(c) (c == ' ' || c == '\t') + +#define isascii(c) (((unsigned char)(c))<=0x7f) +#define toascii(c) (((unsigned char)(c))&0x7f) + +static inline unsigned char __tolower(unsigned char c) +{ + if (isupper(c)) + c -= 'A'-'a'; + return c; +} + +static inline unsigned char __toupper(unsigned char c) +{ + if (islower(c)) + c -= 'a'-'A'; + return c; +} + +#define tolower(c) __tolower(c) +#define toupper(c) __toupper(c) + +#endif diff --git a/include/linux/delay.h b/include/linux/delay.h new file mode 100644 index 0000000..71a38e1 --- /dev/null +++ b/include/linux/delay.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _LINUX_DELAY_H +#define _LINUX_DELAY_H + +#include + +void __udelay(unsigned long usec); +void udelay(unsigned long usec); + +static inline void mdelay(unsigned long msec) +{ + udelay(1000 * msec); +} + +static inline void ndelay(unsigned long nsec) +{ + udelay(DIV_ROUND_UP(nsec, 1000)); +} + +#endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h new file mode 100644 index 0000000..95b6a82 --- /dev/null +++ b/include/linux/dma-direction.h @@ -0,0 +1,13 @@ +#ifndef _LINUX_DMA_DIRECTION_H +#define _LINUX_DMA_DIRECTION_H +/* + * These definitions mirror those in pci.h, so they can be used + * interchangeably with their PCI_ counterparts. + */ +enum dma_data_direction { + DMA_BIDIRECTIONAL = 0, + DMA_TO_DEVICE = 1, + DMA_FROM_DEVICE = 2, + DMA_NONE = 3, +}; +#endif diff --git a/include/linux/drm_dp_helper.h b/include/linux/drm_dp_helper.h new file mode 100644 index 0000000..758e4a4 --- /dev/null +++ b/include/linux/drm_dp_helper.h @@ -0,0 +1,406 @@ +/* + * Copyright © 2008 Keith Packard + * + * Permission to use, copy, modify, distribute, and sell this software and its + * documentation for any purpose is hereby granted without fee, provided that + * the above copyright notice appear in all copies and that both that copyright + * notice and this permission notice appear in supporting documentation, and + * that the name of the copyright holders not be used in advertising or + * publicity pertaining to distribution of the software without specific, + * written prior permission. The copyright holders make no representations + * about the suitability of this software for any purpose. It is provided "as + * is" without express or implied warranty. + * + * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, + * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO + * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR + * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, + * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE + * OF THIS SOFTWARE. + */ + +#ifndef _DRM_DP_HELPER_H_ +#define _DRM_DP_HELPER_H_ + +/* + * Unless otherwise noted, all values are from the DP 1.1a spec. Note that + * DP and DPCD versions are independent. Differences from 1.0 are not noted, + * 1.0 devices basically don't exist in the wild. + * + * Abbreviations, in chronological order: + * + * eDP: Embedded DisplayPort version 1 + * DPI: DisplayPort Interoperability Guideline v1.1a + * 1.2: DisplayPort 1.2 + * MST: Multistream Transport - part of DP 1.2a + * + * 1.2 formally includes both eDP and DPI definitions. + */ + +#define DP_AUX_I2C_WRITE 0x0 +#define DP_AUX_I2C_READ 0x1 +#define DP_AUX_I2C_STATUS 0x2 +#define DP_AUX_I2C_MOT 0x4 +#define DP_AUX_NATIVE_WRITE 0x8 +#define DP_AUX_NATIVE_READ 0x9 + +#define DP_AUX_NATIVE_REPLY_ACK (0x0 << 0) +#define DP_AUX_NATIVE_REPLY_NACK (0x1 << 0) +#define DP_AUX_NATIVE_REPLY_DEFER (0x2 << 0) +#define DP_AUX_NATIVE_REPLY_MASK (0x3 << 0) + +#define DP_AUX_I2C_REPLY_ACK (0x0 << 2) +#define DP_AUX_I2C_REPLY_NACK (0x1 << 2) +#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2) +#define DP_AUX_I2C_REPLY_MASK (0x3 << 2) + +/* AUX CH addresses */ +/* DPCD */ +#define DP_DPCD_REV 0x000 + +#define DP_MAX_LINK_RATE 0x001 + +#define DP_MAX_LANE_COUNT 0x002 +# define DP_MAX_LANE_COUNT_MASK 0x1f +# define DP_TPS3_SUPPORTED (1 << 6) /* 1.2 */ +# define DP_ENHANCED_FRAME_CAP (1 << 7) + +#define DP_MAX_DOWNSPREAD 0x003 +# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6) + +#define DP_NORP 0x004 + +#define DP_DOWNSTREAMPORT_PRESENT 0x005 +# define DP_DWN_STRM_PORT_PRESENT (1 << 0) +# define DP_DWN_STRM_PORT_TYPE_MASK 0x06 +# define DP_DWN_STRM_PORT_TYPE_DP (0 << 1) +# define DP_DWN_STRM_PORT_TYPE_ANALOG (1 << 1) +# define DP_DWN_STRM_PORT_TYPE_TMDS (2 << 1) +# define DP_DWN_STRM_PORT_TYPE_OTHER (3 << 1) +# define DP_FORMAT_CONVERSION (1 << 3) +# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */ + +#define DP_MAIN_LINK_CHANNEL_CODING 0x006 + +#define DP_DOWN_STREAM_PORT_COUNT 0x007 +# define DP_PORT_COUNT_MASK 0x0f +# define DP_MSA_TIMING_PAR_IGNORED (1 << 6) /* eDP */ +# define DP_OUI_SUPPORT (1 << 7) + +#define DP_I2C_SPEED_CAP 0x00c /* DPI */ +# define DP_I2C_SPEED_1K 0x01 +# define DP_I2C_SPEED_5K 0x02 +# define DP_I2C_SPEED_10K 0x04 +# define DP_I2C_SPEED_100K 0x08 +# define DP_I2C_SPEED_400K 0x10 +# define DP_I2C_SPEED_1M 0x20 + +#define DP_EDP_CONFIGURATION_CAP 0x00d /* XXX 1.2? */ +#define DP_TRAINING_AUX_RD_INTERVAL 0x00e /* XXX 1.2? */ + +/* Multiple stream transport */ +#define DP_FAUX_CAP 0x020 /* 1.2 */ +# define DP_FAUX_CAP_1 (1 << 0) + +#define DP_MSTM_CAP 0x021 /* 1.2 */ +# define DP_MST_CAP (1 << 0) + +#define DP_GUID 0x030 /* 1.2 */ + +#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */ +# define DP_PSR_IS_SUPPORTED 1 +#define DP_PSR_CAPS 0x071 /* XXX 1.2? */ +# define DP_PSR_NO_TRAIN_ON_EXIT 1 +# define DP_PSR_SETUP_TIME_330 (0 << 1) +# define DP_PSR_SETUP_TIME_275 (1 << 1) +# define DP_PSR_SETUP_TIME_220 (2 << 1) +# define DP_PSR_SETUP_TIME_165 (3 << 1) +# define DP_PSR_SETUP_TIME_110 (4 << 1) +# define DP_PSR_SETUP_TIME_55 (5 << 1) +# define DP_PSR_SETUP_TIME_0 (6 << 1) +# define DP_PSR_SETUP_TIME_MASK (7 << 1) +# define DP_PSR_SETUP_TIME_SHIFT 1 + +/* + * 0x80-0x8f describe downstream port capabilities, but there are two layouts + * based on whether DP_DETAILED_CAP_INFO_AVAILABLE was set. If it was not, + * each port's descriptor is one byte wide. If it was set, each port's is + * four bytes wide, starting with the one byte from the base info. As of + * DP interop v1.1a only VGA defines additional detail. + */ + +/* offset 0 */ +#define DP_DOWNSTREAM_PORT_0 0x80 +# define DP_DS_PORT_TYPE_MASK (7 << 0) +# define DP_DS_PORT_TYPE_DP 0 +# define DP_DS_PORT_TYPE_VGA 1 +# define DP_DS_PORT_TYPE_DVI 2 +# define DP_DS_PORT_TYPE_HDMI 3 +# define DP_DS_PORT_TYPE_NON_EDID 4 +# define DP_DS_PORT_HPD (1 << 3) +/* offset 1 for VGA is maximum megapixels per second / 8 */ +/* offset 2 */ +# define DP_DS_VGA_MAX_BPC_MASK (3 << 0) +# define DP_DS_VGA_8BPC 0 +# define DP_DS_VGA_10BPC 1 +# define DP_DS_VGA_12BPC 2 +# define DP_DS_VGA_16BPC 3 + +/* link configuration */ +#define DP_LINK_BW_SET 0x100 +# define DP_LINK_BW_1_62 0x06 +# define DP_LINK_BW_2_7 0x0a +# define DP_LINK_BW_5_4 0x14 /* 1.2 */ + +#define DP_LANE_COUNT_SET 0x101 +# define DP_LANE_COUNT_MASK 0x0f +# define DP_LANE_COUNT_ENHANCED_FRAME_EN (1 << 7) + +#define DP_TRAINING_PATTERN_SET 0x102 +# define DP_TRAINING_PATTERN_DISABLE 0 +# define DP_TRAINING_PATTERN_1 1 +# define DP_TRAINING_PATTERN_2 2 +# define DP_TRAINING_PATTERN_3 3 /* 1.2 */ +# define DP_TRAINING_PATTERN_MASK 0x3 + +# define DP_LINK_QUAL_PATTERN_DISABLE (0 << 2) +# define DP_LINK_QUAL_PATTERN_D10_2 (1 << 2) +# define DP_LINK_QUAL_PATTERN_ERROR_RATE (2 << 2) +# define DP_LINK_QUAL_PATTERN_PRBS7 (3 << 2) +# define DP_LINK_QUAL_PATTERN_MASK (3 << 2) + +# define DP_RECOVERED_CLOCK_OUT_EN (1 << 4) +# define DP_LINK_SCRAMBLING_DISABLE (1 << 5) + +# define DP_SYMBOL_ERROR_COUNT_BOTH (0 << 6) +# define DP_SYMBOL_ERROR_COUNT_DISPARITY (1 << 6) +# define DP_SYMBOL_ERROR_COUNT_SYMBOL (2 << 6) +# define DP_SYMBOL_ERROR_COUNT_MASK (3 << 6) + +#define DP_TRAINING_LANE0_SET 0x103 +#define DP_TRAINING_LANE1_SET 0x104 +#define DP_TRAINING_LANE2_SET 0x105 +#define DP_TRAINING_LANE3_SET 0x106 + +# define DP_TRAIN_VOLTAGE_SWING_MASK 0x3 +# define DP_TRAIN_VOLTAGE_SWING_SHIFT 0 +# define DP_TRAIN_MAX_SWING_REACHED (1 << 2) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_0 (0 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_1 (1 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_2 (2 << 0) +# define DP_TRAIN_VOLTAGE_SWING_LEVEL_3 (3 << 0) + +# define DP_TRAIN_PRE_EMPHASIS_MASK (3 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_0 (0 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_1 (1 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_2 (2 << 3) +# define DP_TRAIN_PRE_EMPH_LEVEL_3 (3 << 3) + +# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3 +# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5) + +#define DP_DOWNSPREAD_CTRL 0x107 +# define DP_SPREAD_AMP_0_5 (1 << 4) +# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */ + +#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 +# define DP_SET_ANSI_8B10B (1 << 0) + +#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */ +/* bitmask as for DP_I2C_SPEED_CAP */ + +#define DP_EDP_CONFIGURATION_SET 0x10a /* XXX 1.2? */ + +#define DP_MSTM_CTRL 0x111 /* 1.2 */ +# define DP_MST_EN (1 << 0) +# define DP_UP_REQ_EN (1 << 1) +# define DP_UPSTREAM_IS_SRC (1 << 2) + +#define DP_PSR_EN_CFG 0x170 /* XXX 1.2? */ +# define DP_PSR_ENABLE (1 << 0) +# define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) +# define DP_PSR_CRC_VERIFICATION (1 << 2) +# define DP_PSR_FRAME_CAPTURE (1 << 3) + +#define DP_ADAPTER_CTRL 0x1a0 +# define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) + +#define DP_BRANCH_DEVICE_CTRL 0x1a1 +# define DP_BRANCH_DEVICE_IRQ_HPD (1 << 0) + +#define DP_PAYLOAD_ALLOCATE_SET 0x1c0 +#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1 +#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2 + +#define DP_SINK_COUNT 0x200 +/* prior to 1.2 bit 7 was reserved mbz */ +# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f)) +# define DP_SINK_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 +# define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) +# define DP_AUTOMATED_TEST_REQUEST (1 << 1) +# define DP_CP_IRQ (1 << 2) +# define DP_MCCS_IRQ (1 << 3) +# define DP_DOWN_REP_MSG_RDY (1 << 4) /* 1.2 MST */ +# define DP_UP_REQ_MSG_RDY (1 << 5) /* 1.2 MST */ +# define DP_SINK_SPECIFIC_IRQ (1 << 6) + +#define DP_LANE0_1_STATUS 0x202 +#define DP_LANE2_3_STATUS 0x203 +# define DP_LANE_CR_DONE (1 << 0) +# define DP_LANE_CHANNEL_EQ_DONE (1 << 1) +# define DP_LANE_SYMBOL_LOCKED (1 << 2) + +#define DP_CHANNEL_EQ_BITS (DP_LANE_CR_DONE | \ + DP_LANE_CHANNEL_EQ_DONE | \ + DP_LANE_SYMBOL_LOCKED) + +#define DP_LANE_ALIGN_STATUS_UPDATED 0x204 + +#define DP_INTERLANE_ALIGN_DONE (1 << 0) +#define DP_DOWNSTREAM_PORT_STATUS_CHANGED (1 << 6) +#define DP_LINK_STATUS_UPDATED (1 << 7) + +#define DP_SINK_STATUS 0x205 +#define DP_SINK_STATUS_PORT0_IN_SYNC (1 << 0) + +#define DP_RECEIVE_PORT_0_STATUS (1 << 0) +#define DP_RECEIVE_PORT_1_STATUS (1 << 1) + +#define DP_ADJUST_REQUEST_LANE0_1 0x206 +#define DP_ADJUST_REQUEST_LANE2_3 0x207 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_MASK 0x03 +# define DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT 0 +# define DP_ADJUST_PRE_EMPHASIS_LANE0_MASK 0x0c +# define DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT 2 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_MASK 0x30 +# define DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT 4 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 +# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 + +#define DP_TEST_REQUEST 0x218 +# define DP_TEST_LINK_TRAINING (1 << 0) +# define DP_TEST_LINK_VIDEO_PATTERN (1 << 1) +# define DP_TEST_LINK_EDID_READ (1 << 2) +# define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ +# define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ + +#define DP_TEST_LINK_RATE 0x219 +# define DP_LINK_RATE_162 (0x6) +# define DP_LINK_RATE_27 (0xa) + +#define DP_TEST_LANE_COUNT 0x220 + +#define DP_TEST_PATTERN 0x221 + +#define DP_TEST_CRC_R_CR 0x240 +#define DP_TEST_CRC_G_Y 0x242 +#define DP_TEST_CRC_B_CB 0x244 + +#define DP_TEST_SINK_MISC 0x246 +#define DP_TEST_CRC_SUPPORTED (1 << 5) + +#define DP_TEST_RESPONSE 0x260 +# define DP_TEST_ACK (1 << 0) +# define DP_TEST_NAK (1 << 1) +# define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) + +#define DP_TEST_EDID_CHECKSUM 0x261 + +#define DP_TEST_SINK 0x270 +#define DP_TEST_SINK_START (1 << 0) + +#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ +# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) +# define DP_PAYLOAD_ACT_HANDLED (1 << 1) + +#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */ +/* up to ID_SLOT_63 at 0x2ff */ + +#define DP_SOURCE_OUI 0x300 +#define DP_SINK_OUI 0x400 +#define DP_BRANCH_OUI 0x500 + +#define DP_SET_POWER 0x600 +# define DP_SET_POWER_D0 0x1 +# define DP_SET_POWER_D3 0x2 +# define DP_SET_POWER_MASK 0x3 + +#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */ +#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */ + +#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */ +/* 0-5 sink count */ +# define DP_SINK_COUNT_CP_READY (1 << 6) + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 0x2003 /* 1.2 */ + +#define DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 0x2004 /* 1.2 */ + +#define DP_LINK_SERVICE_IRQ_VECTOR_ESI0 0x2005 /* 1.2 */ + +#define DP_PSR_ERROR_STATUS 0x2006 /* XXX 1.2? */ +# define DP_PSR_LINK_CRC_ERROR (1 << 0) +# define DP_PSR_RFB_STORAGE_ERROR (1 << 1) + +#define DP_PSR_ESI 0x2007 /* XXX 1.2? */ +# define DP_PSR_CAPS_CHANGE (1 << 0) + +#define DP_PSR_STATUS 0x2008 /* XXX 1.2? */ +# define DP_PSR_SINK_INACTIVE 0 +# define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 +# define DP_PSR_SINK_ACTIVE_RFB 2 +# define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3 +# define DP_PSR_SINK_ACTIVE_RESYNC 4 +# define DP_PSR_SINK_INTERNAL_ERROR 7 +# define DP_PSR_SINK_STATE_MASK 0x07 + +/* DP 1.2 Sideband message defines */ +/* peer device type - DP 1.2a Table 2-92 */ +#define DP_PEER_DEVICE_NONE 0x0 +#define DP_PEER_DEVICE_SOURCE_OR_SST 0x1 +#define DP_PEER_DEVICE_MST_BRANCHING 0x2 +#define DP_PEER_DEVICE_SST_SINK 0x3 +#define DP_PEER_DEVICE_DP_LEGACY_CONV 0x4 + +/* DP 1.2 MST sideband request names DP 1.2a Table 2-80 */ +#define DP_LINK_ADDRESS 0x01 +#define DP_CONNECTION_STATUS_NOTIFY 0x02 +#define DP_ENUM_PATH_RESOURCES 0x10 +#define DP_ALLOCATE_PAYLOAD 0x11 +#define DP_QUERY_PAYLOAD 0x12 +#define DP_RESOURCE_STATUS_NOTIFY 0x13 +#define DP_CLEAR_PAYLOAD_ID_TABLE 0x14 +#define DP_REMOTE_DPCD_READ 0x20 +#define DP_REMOTE_DPCD_WRITE 0x21 +#define DP_REMOTE_I2C_READ 0x22 +#define DP_REMOTE_I2C_WRITE 0x23 +#define DP_POWER_UP_PHY 0x24 +#define DP_POWER_DOWN_PHY 0x25 +#define DP_SINK_EVENT_NOTIFY 0x30 +#define DP_QUERY_STREAM_ENC_STATUS 0x38 + +/* DP 1.2 MST sideband nak reasons - table 2.84 */ +#define DP_NAK_WRITE_FAILURE 0x01 +#define DP_NAK_INVALID_READ 0x02 +#define DP_NAK_CRC_FAILURE 0x03 +#define DP_NAK_BAD_PARAM 0x04 +#define DP_NAK_DEFER 0x05 +#define DP_NAK_LINK_FAILURE 0x06 +#define DP_NAK_NO_RESOURCES 0x07 +#define DP_NAK_DPCD_FAIL 0x08 +#define DP_NAK_I2C_NAK 0x09 +#define DP_NAK_ALLOCATE_FAIL 0x0a + +#define MODE_I2C_START 1 +#define MODE_I2C_WRITE 2 +#define MODE_I2C_READ 4 +#define MODE_I2C_STOP 8 + +/* Rest of file omitted as it is not used in U-Boot */ + +#endif /* _DRM_DP_HELPER_H_ */ diff --git a/include/linux/edd.h b/include/linux/edd.h new file mode 100644 index 0000000..992ce7c --- /dev/null +++ b/include/linux/edd.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * linux/include/linux/edd.h + * Copyright (C) 2002, 2003, 2004 Dell Inc. + * by Matt Domsch + * + * structures and definitions for the int 13h, ax={41,48}h + * BIOS Enhanced Disk Drive Services + * This is based on the T13 group document D1572 Revision 0 (August 14 2002) + * available at http://www.t13.org/docs2002/d1572r0.pdf. It is + * very similar to D1484 Revision 3 http://www.t13.org/docs2002/d1484r3.pdf + * + * In a nutshell, arch/{i386,x86_64}/boot/setup.S populates a scratch + * table in the boot_params that contains a list of BIOS-enumerated + * boot devices. + * In arch/{i386,x86_64}/kernel/setup.c, this information is + * transferred into the edd structure, and in drivers/firmware/edd.c, that + * information is used to identify BIOS boot disk. The code in setup.S + * is very sensitive to the size of these structures. + */ +#ifndef _LINUX_EDD_H +#define _LINUX_EDD_H + +#include + +#define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF + in boot_params - treat this as 1 byte */ +#define EDDBUF 0xd00 /* addr of edd_info structs in boot_params */ +#define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */ +#define EDDEXTSIZE 8 /* change these if you muck with the structures */ +#define EDDPARMSIZE 74 +#define CHECKEXTENSIONSPRESENT 0x41 +#define GETDEVICEPARAMETERS 0x48 +#define LEGACYGETDEVICEPARAMETERS 0x08 +#define EDDMAGIC1 0x55AA +#define EDDMAGIC2 0xAA55 + + +#define READ_SECTORS 0x02 /* int13 AH=0x02 is READ_SECTORS command */ +#define EDD_MBR_SIG_OFFSET 0x1B8 /* offset of signature in the MBR */ +#define EDD_MBR_SIG_BUF 0x290 /* addr in boot params */ +#define EDD_MBR_SIG_MAX 16 /* max number of signatures to store */ +#define EDD_MBR_SIG_NR_BUF 0x1ea /* addr of number of MBR signtaures at EDD_MBR_SIG_BUF + in boot_params - treat this as 1 byte */ + +#ifndef __ASSEMBLY__ + +#define EDD_EXT_FIXED_DISK_ACCESS (1 << 0) +#define EDD_EXT_DEVICE_LOCKING_AND_EJECTING (1 << 1) +#define EDD_EXT_ENHANCED_DISK_DRIVE_SUPPORT (1 << 2) +#define EDD_EXT_64BIT_EXTENSIONS (1 << 3) + +#define EDD_INFO_DMA_BOUNDARY_ERROR_TRANSPARENT (1 << 0) +#define EDD_INFO_GEOMETRY_VALID (1 << 1) +#define EDD_INFO_REMOVABLE (1 << 2) +#define EDD_INFO_WRITE_VERIFY (1 << 3) +#define EDD_INFO_MEDIA_CHANGE_NOTIFICATION (1 << 4) +#define EDD_INFO_LOCKABLE (1 << 5) +#define EDD_INFO_NO_MEDIA_PRESENT (1 << 6) +#define EDD_INFO_USE_INT13_FN50 (1 << 7) + +struct edd_device_params { + __u16 length; + __u16 info_flags; + __u32 num_default_cylinders; + __u32 num_default_heads; + __u32 sectors_per_track; + __u64 number_of_sectors; + __u16 bytes_per_sector; + __u32 dpte_ptr; /* 0xFFFFFFFF for our purposes */ + __u16 key; /* = 0xBEDD */ + __u8 device_path_info_length; /* = 44 */ + __u8 reserved2; + __u16 reserved3; + __u8 host_bus_type[4]; + __u8 interface_type[8]; + union { + struct { + __u16 base_address; + __u16 reserved1; + __u32 reserved2; + } __attribute__ ((packed)) isa; + struct { + __u8 bus; + __u8 slot; + __u8 function; + __u8 channel; + __u32 reserved; + } __attribute__ ((packed)) pci; + /* pcix is same as pci */ + struct { + __u64 reserved; + } __attribute__ ((packed)) ibnd; + struct { + __u64 reserved; + } __attribute__ ((packed)) xprs; + struct { + __u64 reserved; + } __attribute__ ((packed)) htpt; + struct { + __u64 reserved; + } __attribute__ ((packed)) unknown; + } interface_path; + union { + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) ata; + struct { + __u8 device; + __u8 lun; + __u8 reserved1; + __u8 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) atapi; + struct { + __u16 id; + __u64 lun; + __u16 reserved1; + __u32 reserved2; + } __attribute__ ((packed)) scsi; + struct { + __u64 serial_number; + __u64 reserved; + } __attribute__ ((packed)) usb; + struct { + __u64 eui; + __u64 reserved; + } __attribute__ ((packed)) i1394; + struct { + __u64 wwid; + __u64 lun; + } __attribute__ ((packed)) fibre; + struct { + __u64 identity_tag; + __u64 reserved; + } __attribute__ ((packed)) i2o; + struct { + __u32 array_number; + __u32 reserved1; + __u64 reserved2; + } __attribute__ ((packed)) raid; + struct { + __u8 device; + __u8 reserved1; + __u16 reserved2; + __u32 reserved3; + __u64 reserved4; + } __attribute__ ((packed)) sata; + struct { + __u64 reserved1; + __u64 reserved2; + } __attribute__ ((packed)) unknown; + } device_path; + __u8 reserved4; + __u8 checksum; +} __attribute__ ((packed)); + +struct edd_info { + __u8 device; + __u8 version; + __u16 interface_support; + __u16 legacy_max_cylinder; + __u8 legacy_max_head; + __u8 legacy_sectors_per_track; + struct edd_device_params params; +} __attribute__ ((packed)); + +struct edd { + unsigned int mbr_signature[EDD_MBR_SIG_MAX]; + struct edd_info edd_info[EDDMAXNR]; + unsigned char mbr_signature_nr; + unsigned char edd_info_nr; +}; + +#ifdef __KERNEL__ +extern struct edd edd; +#endif /* __KERNEL__ */ +#endif /*!__ASSEMBLY__ */ + +#endif /* _LINUX_EDD_H */ diff --git a/include/linux/err.h b/include/linux/err.h new file mode 100644 index 0000000..22e5756 --- /dev/null +++ b/include/linux/err.h @@ -0,0 +1,59 @@ +#ifndef _LINUX_ERR_H +#define _LINUX_ERR_H + +#include +#include + +#include + + +/* + * Kernel pointers have redundant information, so we can use a + * scheme where we can return either an error code or a dentry + * pointer with the same return value. + * + * This should be a per-architecture thing, to allow different + * error and pointer decisions. + */ +#define MAX_ERRNO 4095 + +#ifndef __ASSEMBLY__ + +#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO) + +static inline void *ERR_PTR(long error) +{ + return (void *) error; +} + +static inline long PTR_ERR(const void *ptr) +{ + return (long) ptr; +} + +static inline long IS_ERR(const void *ptr) +{ + return IS_ERR_VALUE((unsigned long)ptr); +} + +static inline bool IS_ERR_OR_NULL(const void *ptr) +{ + return !ptr || IS_ERR_VALUE((unsigned long)ptr); +} + +/** + * ERR_CAST - Explicitly cast an error-valued pointer to another pointer type + * @ptr: The pointer to cast. + * + * Explicitly cast an error-valued pointer to another pointer type in such a + * way as to make it clear that's what's going on. + */ +static inline void * __must_check ERR_CAST(__force const void *ptr) +{ + /* cast away the const */ + return (void *) ptr; +} + +#endif + +#endif /* _LINUX_ERR_H */ diff --git a/include/linux/errno.h b/include/linux/errno.h new file mode 100644 index 0000000..3d94207 --- /dev/null +++ b/include/linux/errno.h @@ -0,0 +1,168 @@ +#ifndef _LINUX_ERRNO_H +#define _LINUX_ERRNO_H + +#define EPERM 1 /* Operation not permitted */ +#define ENOENT 2 /* No such file or directory */ +#define ESRCH 3 /* No such process */ +#define EINTR 4 /* Interrupted system call */ +#define EIO 5 /* I/O error */ +#define ENXIO 6 /* No such device or address */ +#define E2BIG 7 /* Argument list too long */ +#define ENOEXEC 8 /* Exec format error */ +#define EBADF 9 /* Bad file number */ +#define ECHILD 10 /* No child processes */ +#define EAGAIN 11 /* Try again */ +#define ENOMEM 12 /* Out of memory */ +#define EACCES 13 /* Permission denied */ +#define EFAULT 14 /* Bad address */ +#define ENOTBLK 15 /* Block device required */ +#define EBUSY 16 /* Device or resource busy */ +#define EEXIST 17 /* File exists */ +#define EXDEV 18 /* Cross-device link */ +#define ENODEV 19 /* No such device */ +#define ENOTDIR 20 /* Not a directory */ +#define EISDIR 21 /* Is a directory */ +#define EINVAL 22 /* Invalid argument */ +#define ENFILE 23 /* File table overflow */ +#define EMFILE 24 /* Too many open files */ +#define ENOTTY 25 /* Not a typewriter */ +#define ETXTBSY 26 /* Text file busy */ +#define EFBIG 27 /* File too large */ +#define ENOSPC 28 /* No space left on device */ +#define ESPIPE 29 /* Illegal seek */ +#define EROFS 30 /* Read-only file system */ +#define EMLINK 31 /* Too many links */ +#define EPIPE 32 /* Broken pipe */ +#define EDOM 33 /* Math argument out of domain of func */ +#define ERANGE 34 /* Math result not representable */ + +#define EDEADLK 35 /* Resource deadlock would occur */ +#define ENAMETOOLONG 36 /* File name too long */ +#define ENOLCK 37 /* No record locks available */ + +#define ENOSYS 38 /* Invalid system call number */ + +#define ENOTEMPTY 39 /* Directory not empty */ +#define ELOOP 40 /* Too many symbolic links encountered */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define ENOMSG 42 /* No message of desired type */ +#define EIDRM 43 /* Identifier removed */ +#define ECHRNG 44 /* Channel number out of range */ +#define EL2NSYNC 45 /* Level 2 not synchronized */ +#define EL3HLT 46 /* Level 3 halted */ +#define EL3RST 47 /* Level 3 reset */ +#define ELNRNG 48 /* Link number out of range */ +#define EUNATCH 49 /* Protocol driver not attached */ +#define ENOCSI 50 /* No CSI structure available */ +#define EL2HLT 51 /* Level 2 halted */ +#define EBADE 52 /* Invalid exchange */ +#define EBADR 53 /* Invalid request descriptor */ +#define EXFULL 54 /* Exchange full */ +#define ENOANO 55 /* No anode */ +#define EBADRQC 56 /* Invalid request code */ +#define EBADSLT 57 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 59 /* Bad font file format */ +#define ENOSTR 60 /* Device not a stream */ +#define ENODATA 61 /* No data available */ +#define ETIME 62 /* Timer expired */ +#define ENOSR 63 /* Out of streams resources */ +#define ENONET 64 /* Machine is not on the network */ +#define ENOPKG 65 /* Package not installed */ +#define EREMOTE 66 /* Object is remote */ +#define ENOLINK 67 /* Link has been severed */ +#define EADV 68 /* Advertise error */ +#define ESRMNT 69 /* Srmount error */ +#define ECOMM 70 /* Communication error on send */ +#define EPROTO 71 /* Protocol error */ +#define EMULTIHOP 72 /* Multihop attempted */ +#define EDOTDOT 73 /* RFS specific error */ +#define EBADMSG 74 /* Not a data message */ +#define EOVERFLOW 75 /* Value too large for defined data type */ +#define ENOTUNIQ 76 /* Name not unique on network */ +#define EBADFD 77 /* File descriptor in bad state */ +#define EREMCHG 78 /* Remote address changed */ +#define ELIBACC 79 /* Can not access a needed shared library */ +#define ELIBBAD 80 /* Accessing a corrupted shared library */ +#define ELIBSCN 81 /* .lib section in a.out corrupted */ +#define ELIBMAX 82 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 83 /* Cannot exec a shared library directly */ +#define EILSEQ 84 /* Illegal byte sequence */ +#define ERESTART 85 /* Interrupted system call should be restarted */ +#define ESTRPIPE 86 /* Streams pipe error */ +#define EUSERS 87 /* Too many users */ +#define ENOTSOCK 88 /* Socket operation on non-socket */ +#define EDESTADDRREQ 89 /* Destination address required */ +#define EMSGSIZE 90 /* Message too long */ +#define EPROTOTYPE 91 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 92 /* Protocol not available */ +#define EPROTONOSUPPORT 93 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 94 /* Socket type not supported */ +#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 96 /* Protocol family not supported */ +#define EAFNOSUPPORT 97 /* Address family not supported by protocol */ +#define EADDRINUSE 98 /* Address already in use */ +#define EADDRNOTAVAIL 99 /* Cannot assign requested address */ +#define ENETDOWN 100 /* Network is down */ +#define ENETUNREACH 101 /* Network is unreachable */ +#define ENETRESET 102 /* Network dropped connection because of reset */ +#define ECONNABORTED 103 /* Software caused connection abort */ +#define ECONNRESET 104 /* Connection reset by peer */ +#define ENOBUFS 105 /* No buffer space available */ +#define EISCONN 106 /* Transport endpoint is already connected */ +#define ENOTCONN 107 /* Transport endpoint is not connected */ +#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 109 /* Too many references: cannot splice */ +#define ETIMEDOUT 110 /* Connection timed out */ +#define ECONNREFUSED 111 /* Connection refused */ +#define EHOSTDOWN 112 /* Host is down */ +#define EHOSTUNREACH 113 /* No route to host */ +#define EALREADY 114 /* Operation already in progress */ +#define EINPROGRESS 115 /* Operation now in progress */ +#define ESTALE 116 /* Stale file handle */ +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ +#define EDQUOT 122 /* Quota exceeded */ + +#define ENOMEDIUM 123 /* No medium found */ +#define EMEDIUMTYPE 124 /* Wrong medium type */ +#define ECANCELED 125 /* Operation Canceled */ +#define ENOKEY 126 /* Required key not available */ +#define EKEYEXPIRED 127 /* Key has expired */ +#define EKEYREVOKED 128 /* Key has been revoked */ +#define EKEYREJECTED 129 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 130 /* Owner died */ +#define ENOTRECOVERABLE 131 /* State not recoverable */ + +#define ERFKILL 132 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 133 /* Memory page has hardware error */ + +#define ERESTARTSYS 512 +#define ERESTARTNOINTR 513 +#define ERESTARTNOHAND 514 /* restart if no handler.. */ +#define ENOIOCTLCMD 515 /* No ioctl command */ +#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ +#define EPROBE_DEFER 517 /* Driver requests probe retry */ +#define EOPENSTALE 518 /* open found a stale dentry */ + +/* Defined for the NFSv3 protocol */ +#define EBADHANDLE 521 /* Illegal NFS file handle */ +#define ENOTSYNC 522 /* Update synchronization mismatch */ +#define EBADCOOKIE 523 /* Cookie is stale */ +#define ENOTSUPP 524 /* Operation is not supported */ +#define ETOOSMALL 525 /* Buffer or request is too small */ +#define ESERVERFAULT 526 /* An untranslatable error occurred */ +#define EBADTYPE 527 /* Type not supported by server */ +#define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ +#define EIOCBQUEUED 529 /* iocb queued, will get completion event */ +#define ERECALLCONFLICT 530 /* conflict with recalled state */ + +#endif diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h new file mode 100644 index 0000000..f6dbdb0 --- /dev/null +++ b/include/linux/ethtool.h @@ -0,0 +1,725 @@ +/* + * ethtool.h: Defines for Linux ethtool. + * + * Copyright (C) 1998 David S. Miller (davem@redhat.com) + * Copyright 2001 Jeff Garzik + * Portions Copyright 2001 Sun Microsystems (thockin@sun.com) + * Portions Copyright 2002 Intel (eli.kupermann@intel.com, + * christopher.leech@intel.com, + * scott.feldman@intel.com) + * Portions Copyright (C) Sun Microsystems 2008 + */ + +#ifndef _LINUX_ETHTOOL_H +#define _LINUX_ETHTOOL_H + +#include + +/* This should work for both 32 and 64 bit userland. */ +struct ethtool_cmd { + __u32 cmd; + __u32 supported; /* Features this interface supports */ + __u32 advertising; /* Features this interface advertises */ + __u16 speed; /* The forced speed, 10Mb, 100Mb, gigabit */ + __u8 duplex; /* Duplex, half or full */ + __u8 port; /* Which connector port */ + __u8 phy_address; + __u8 transceiver; /* Which transceiver to use */ + __u8 autoneg; /* Enable or disable autonegotiation */ + __u8 mdio_support; + __u32 maxtxpkt; /* Tx pkts before generating tx int */ + __u32 maxrxpkt; /* Rx pkts before generating rx int */ + __u16 speed_hi; + __u8 eth_tp_mdix; + __u8 reserved2; + __u32 lp_advertising; /* Features the link partner advertises */ + __u32 reserved[2]; +}; + +static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep, + __u32 speed) +{ + + ep->speed = (__u16)speed; + ep->speed_hi = (__u16)(speed >> 16); +} + +static inline __u32 ethtool_cmd_speed(struct ethtool_cmd *ep) +{ + return (ep->speed_hi << 16) | ep->speed; +} + +#define ETHTOOL_FWVERS_LEN 32 +#define ETHTOOL_BUSINFO_LEN 32 +/* these strings are set to whatever the driver author decides... */ +struct ethtool_drvinfo { + __u32 cmd; + char driver[32]; /* driver short name, "tulip", "eepro100" */ + char version[32]; /* driver version string */ + char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */ + char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */ + /* For PCI devices, use pci_name(pci_dev). */ + char reserved1[32]; + char reserved2[12]; + /* + * Some struct members below are filled in + * using ops->get_sset_count(). Obtaining + * this info from ethtool_drvinfo is now + * deprecated; Use ETHTOOL_GSSET_INFO + * instead. + */ + __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */ + __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */ + __u32 testinfo_len; + __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */ + __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */ +}; + +#define SOPASS_MAX 6 +/* wake-on-lan settings */ +struct ethtool_wolinfo { + __u32 cmd; + __u32 supported; + __u32 wolopts; + __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ +}; + +/* for passing single values */ +struct ethtool_value { + __u32 cmd; + __u32 data; +}; + +/* for passing big chunks of data */ +struct ethtool_regs { + __u32 cmd; + __u32 version; /* driver-specific, indicates different chips/revs */ + __u32 len; /* bytes */ + __u8 data[0]; +}; + +/* for passing EEPROM chunks */ +struct ethtool_eeprom { + __u32 cmd; + __u32 magic; + __u32 offset; /* in bytes */ + __u32 len; /* in bytes */ + __u8 data[0]; +}; + +/* for configuring coalescing parameters of chip */ +struct ethtool_coalesce { + __u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + __u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + __u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + __u32 rx_coalesce_usecs_irq; + __u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + __u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + __u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + __u32 tx_coalesce_usecs_irq; + __u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + __u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + __u32 use_adaptive_rx_coalesce; + __u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + __u32 pkt_rate_low; + __u32 rx_coalesce_usecs_low; + __u32 rx_max_coalesced_frames_low; + __u32 tx_coalesce_usecs_low; + __u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + __u32 pkt_rate_high; + __u32 rx_coalesce_usecs_high; + __u32 rx_max_coalesced_frames_high; + __u32 tx_coalesce_usecs_high; + __u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + __u32 rate_sample_interval; +}; + +/* for configuring RX/TX ring parameters */ +struct ethtool_ringparam { + __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + __u32 rx_max_pending; + __u32 rx_mini_max_pending; + __u32 rx_jumbo_max_pending; + __u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + __u32 rx_pending; + __u32 rx_mini_pending; + __u32 rx_jumbo_pending; + __u32 tx_pending; +}; + +/* for configuring link flow control parameters */ +struct ethtool_pauseparam { + __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autonet' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + __u32 autoneg; + __u32 rx_pause; + __u32 tx_pause; +}; + +#define ETH_GSTRING_LEN 32 +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, + ETH_SS_PRIV_FLAGS, + ETH_SS_NTUPLE_FILTERS, + ETH_SS_FEATURES, +}; + +/* for passing string sets for data tagging */ +struct ethtool_gstrings { + __u32 cmd; /* ETHTOOL_GSTRINGS */ + __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + __u32 len; /* number of strings in the string set */ + __u8 data[0]; +}; + +struct ethtool_sset_info { + __u32 cmd; /* ETHTOOL_GSSET_INFO */ + __u32 reserved; + __u64 sset_mask; /* input: each bit selects an sset to query */ + /* output: each bit a returned sset */ + __u32 data[0]; /* ETH_SS_xxx count, in order, based on bits + in sset_mask. One bit implies one + __u32, two bits implies two + __u32's, etc. */ +}; + +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), /* online / offline */ + ETH_TEST_FL_FAILED = (1 << 1), /* test passed / failed */ +}; + +/* for requesting NIC test and getting results*/ +struct ethtool_test { + __u32 cmd; /* ETHTOOL_TEST */ + __u32 flags; /* ETH_TEST_FL_xxx */ + __u32 reserved; + __u32 len; /* result length, in number of u64 elements */ + __u64 data[0]; +}; + +/* for dumping NIC-specific statistics */ +struct ethtool_stats { + __u32 cmd; /* ETHTOOL_GSTATS */ + __u32 n_stats; /* number of u64's being returned */ + __u64 data[0]; +}; + +struct ethtool_perm_addr { + __u32 cmd; /* ETHTOOL_GPERMADDR */ + __u32 size; + __u8 data[0]; +}; + +/* boolean flags controlling per-interface behavior characteristics. + * When reading, the flag indicates whether or not a certain behavior + * is enabled/present. When writing, the flag indicates whether + * or not the driver should turn on (set) or off (clear) a behavior. + * + * Some behaviors may read-only (unconditionally absent or present). + * If such is the case, return EINVAL in the set-flags operation if the + * flag differs from the read-only value. + */ +enum ethtool_flags { + ETH_FLAG_TXVLAN = (1 << 7), /* TX VLAN offload enabled */ + ETH_FLAG_RXVLAN = (1 << 8), /* RX VLAN offload enabled */ + ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ + ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ + ETH_FLAG_RXHASH = (1 << 28), +}; + +/* The following structures are for supporting RX network flow + * classification and RX n-tuple configuration. Note, all multibyte + * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to + * be in network byte order. + */ + +/** + * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc. + * @ip4src: Source host + * @ip4dst: Destination host + * @psrc: Source port + * @pdst: Destination port + * @tos: Type-of-service + * + * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow. + */ +struct ethtool_tcpip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be16 psrc; + __be16 pdst; + __u8 tos; +}; + +/** + * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @spi: Security parameters index + * @tos: Type-of-service + * + * This can be used to specify an IPsec transport or tunnel over IPv4. + */ +struct ethtool_ah_espip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 spi; + __u8 tos; +}; + +#define ETH_RX_NFC_IP4 1 + +/** + * struct ethtool_usrip4_spec - general flow specification for IPv4 + * @ip4src: Source host + * @ip4dst: Destination host + * @l4_4_bytes: First 4 bytes of transport (layer 4) header + * @tos: Type-of-service + * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0 + * @proto: Transport protocol number; mask must be 0 + */ +struct ethtool_usrip4_spec { + __be32 ip4src; + __be32 ip4dst; + __be32 l4_4_bytes; + __u8 tos; + __u8 ip_ver; + __u8 proto; +}; + + +/** + * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection + * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR + * @size: On entry, the array size of the user buffer. On return from + * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. + * @ring_index: RX ring/queue index for each hash value + */ +struct ethtool_rxfh_indir { + __u32 cmd; + __u32 size; + __u32 ring_index[0]; +}; + +#define ETHTOOL_FLASH_MAX_FILENAME 128 +enum ethtool_flash_op_type { + ETHTOOL_FLASH_ALL_REGIONS = 0, +}; + +/* for passing firmware flashing related parameters */ +struct ethtool_flash { + __u32 cmd; + __u32 region; + char data[ETHTOOL_FLASH_MAX_FILENAME]; +}; + +/* for returning and changing feature sets */ + +/** + * struct ethtool_get_features_block - block with state of 32 features + * @available: mask of changeable features + * @requested: mask of features requested to be enabled if possible + * @active: mask of currently enabled features + * @never_changed: mask of features not changeable for any device + */ +struct ethtool_get_features_block { + __u32 available; + __u32 requested; + __u32 active; + __u32 never_changed; +}; + +/** + * struct ethtool_gfeatures - command to get state of device's features + * @cmd: command number = %ETHTOOL_GFEATURES + * @size: in: number of elements in the features[] array; + * out: number of elements in features[] needed to hold all features + * @features: state of features + */ +struct ethtool_gfeatures { + __u32 cmd; + __u32 size; + struct ethtool_get_features_block features[0]; +}; + +/** + * struct ethtool_set_features_block - block with request for 32 features + * @valid: mask of features to be changed + * @requested: values of features to be changed + */ +struct ethtool_set_features_block { + __u32 valid; + __u32 requested; +}; + +/** + * struct ethtool_sfeatures - command to request change in device's features + * @cmd: command number = %ETHTOOL_SFEATURES + * @size: array size of the features[] array + * @features: feature change masks + */ +struct ethtool_sfeatures { + __u32 cmd; + __u32 size; + struct ethtool_set_features_block features[0]; +}; + +/* + * %ETHTOOL_SFEATURES changes features present in features[].valid to the + * values of corresponding bits in features[].requested. Bits in .requested + * not set in .valid or not changeable are ignored. + * + * Returns %EINVAL when .valid contains undefined or never-changable bits + * or size is not equal to required number of features words (32-bit blocks). + * Returns >= 0 if request was completed; bits set in the value mean: + * %ETHTOOL_F_UNSUPPORTED - there were bits set in .valid that are not + * changeable (not present in %ETHTOOL_GFEATURES' features[].available) + * those bits were ignored. + * %ETHTOOL_F_WISH - some or all changes requested were recorded but the + * resulting state of bits masked by .valid is not equal to .requested. + * Probably there are other device-specific constraints on some features + * in the set. When %ETHTOOL_F_UNSUPPORTED is set, .valid is considered + * here as though ignored bits were cleared. + * %ETHTOOL_F_COMPAT - some or all changes requested were made by calling + * compatibility functions. Requested offload state cannot be properly + * managed by kernel. + * + * Meaning of bits in the masks are obtained by %ETHTOOL_GSSET_INFO (number of + * bits in the arrays - always multiple of 32) and %ETHTOOL_GSTRINGS commands + * for ETH_SS_FEATURES string set. First entry in the table corresponds to least + * significant bit in features[0] fields. Empty strings mark undefined features. + */ +enum ethtool_sfeatures_retval_bits { + ETHTOOL_F_UNSUPPORTED__BIT, + ETHTOOL_F_WISH__BIT, + ETHTOOL_F_COMPAT__BIT, +}; + +#define ETHTOOL_F_UNSUPPORTED (1 << ETHTOOL_F_UNSUPPORTED__BIT) +#define ETHTOOL_F_WISH (1 << ETHTOOL_F_WISH__BIT) +#define ETHTOOL_F_COMPAT (1 << ETHTOOL_F_COMPAT__BIT) + +/* CMDs currently supported */ +#define ETHTOOL_GSET 0x00000001 /* Get settings. */ +#define ETHTOOL_SSET 0x00000002 /* Set settings. */ +#define ETHTOOL_GDRVINFO 0x00000003 /* Get driver info. */ +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ +#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ +#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ +/* Get link status for host, i.e. whether the interface *and* the + * physical port (if there is one) are up (ethtool_value). */ +#define ETHTOOL_GLINK 0x0000000a +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable + * (ethtool_value) */ +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable + * (ethtool_value). */ +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#define ETHTOOL_GPERMADDR 0x00000020 /* Get permanent hardware address */ +#define ETHTOOL_GUFO 0x00000021 /* Get UFO enable (ethtool_value) */ +#define ETHTOOL_SUFO 0x00000022 /* Set UFO enable (ethtool_value) */ +#define ETHTOOL_GGSO 0x00000023 /* Get GSO enable (ethtool_value) */ +#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ +#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ +#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ +#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ +#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ + +#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ +#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ +#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ +#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ +#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */ +#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */ +#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */ +#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */ +#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */ +#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */ +#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ +#define ETHTOOL_RESET 0x00000034 /* Reset hardware */ +#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ +#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */ +#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ +#define ETHTOOL_GRXFHINDIR 0x00000038 /* Get RX flow hash indir'n table */ +#define ETHTOOL_SRXFHINDIR 0x00000039 /* Set RX flow hash indir'n table */ + +#define ETHTOOL_GFEATURES 0x0000003a /* Get device offload settings */ +#define ETHTOOL_SFEATURES 0x0000003b /* Change device offload settings */ + +/* compatibility with older code */ +#define SPARC_ETH_GSET ETHTOOL_GSET +#define SPARC_ETH_SSET ETHTOOL_SSET + +/* Indicates what features are supported by the interface. */ +#define SUPPORTED_10baseT_Half (1 << 0) +#define SUPPORTED_10baseT_Full (1 << 1) +#define SUPPORTED_100baseT_Half (1 << 2) +#define SUPPORTED_100baseT_Full (1 << 3) +#define SUPPORTED_1000baseT_Half (1 << 4) +#define SUPPORTED_1000baseT_Full (1 << 5) +#define SUPPORTED_Autoneg (1 << 6) +#define SUPPORTED_TP (1 << 7) +#define SUPPORTED_AUI (1 << 8) +#define SUPPORTED_MII (1 << 9) +#define SUPPORTED_FIBRE (1 << 10) +#define SUPPORTED_BNC (1 << 11) +#define SUPPORTED_10000baseT_Full (1 << 12) +#define SUPPORTED_Pause (1 << 13) +#define SUPPORTED_Asym_Pause (1 << 14) +#define SUPPORTED_2500baseX_Full (1 << 15) +#define SUPPORTED_Backplane (1 << 16) +#define SUPPORTED_1000baseKX_Full (1 << 17) +#define SUPPORTED_10000baseKX4_Full (1 << 18) +#define SUPPORTED_10000baseKR_Full (1 << 19) +#define SUPPORTED_10000baseR_FEC (1 << 20) +#define SUPPORTED_1000baseX_Half (1 << 21) +#define SUPPORTED_1000baseX_Full (1 << 22) + +/* Indicates what features are advertised by the interface. */ +#define ADVERTISED_10baseT_Half (1 << 0) +#define ADVERTISED_10baseT_Full (1 << 1) +#define ADVERTISED_100baseT_Half (1 << 2) +#define ADVERTISED_100baseT_Full (1 << 3) +#define ADVERTISED_1000baseT_Half (1 << 4) +#define ADVERTISED_1000baseT_Full (1 << 5) +#define ADVERTISED_Autoneg (1 << 6) +#define ADVERTISED_TP (1 << 7) +#define ADVERTISED_AUI (1 << 8) +#define ADVERTISED_MII (1 << 9) +#define ADVERTISED_FIBRE (1 << 10) +#define ADVERTISED_BNC (1 << 11) +#define ADVERTISED_10000baseT_Full (1 << 12) +#define ADVERTISED_Pause (1 << 13) +#define ADVERTISED_Asym_Pause (1 << 14) +#define ADVERTISED_2500baseX_Full (1 << 15) +#define ADVERTISED_Backplane (1 << 16) +#define ADVERTISED_1000baseKX_Full (1 << 17) +#define ADVERTISED_10000baseKX4_Full (1 << 18) +#define ADVERTISED_10000baseKR_Full (1 << 19) +#define ADVERTISED_10000baseR_FEC (1 << 20) +#define ADVERTISED_1000baseX_Half (1 << 21) +#define ADVERTISED_1000baseX_Full (1 << 22) + +/* The following are all involved in forcing a particular link + * mode for the device for setting things. When getting the + * devices settings, these indicate the current mode and whether + * it was foced up into this mode or autonegotiated. + */ + +/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define SPEED_2500 2500 +#define SPEED_10000 10000 + +/* Duplex, half or full. */ +#define DUPLEX_HALF 0x00 +#define DUPLEX_FULL 0x01 + +/* Which connector port. */ +#define PORT_TP 0x00 +#define PORT_AUI 0x01 +#define PORT_MII 0x02 +#define PORT_FIBRE 0x03 +#define PORT_BNC 0x04 +#define PORT_DA 0x05 +#define PORT_NONE 0xef +#define PORT_OTHER 0xff + +/* Which transceiver to use. */ +#define XCVR_INTERNAL 0x00 +#define XCVR_EXTERNAL 0x01 +#define XCVR_DUMMY1 0x02 +#define XCVR_DUMMY2 0x03 +#define XCVR_DUMMY3 0x04 + +/* Enable or disable autonegotiation. If this is set to enable, + * the forced link modes above are completely ignored. + */ +#define AUTONEG_DISABLE 0x00 +#define AUTONEG_ENABLE 0x01 + +/* Mode MDI or MDI-X */ +#define ETH_TP_MDI_INVALID 0x00 +#define ETH_TP_MDI 0x01 +#define ETH_TP_MDI_X 0x02 + +/* Wake-On-Lan options. */ +#define WAKE_PHY (1 << 0) +#define WAKE_UCAST (1 << 1) +#define WAKE_MCAST (1 << 2) +#define WAKE_BCAST (1 << 3) +#define WAKE_ARP (1 << 4) +#define WAKE_MAGIC (1 << 5) +#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ + +/* L2-L4 network traffic flow types */ +#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ +#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ +#define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ +#define AH_ESP_V4_FLOW 0x04 /* hash only */ +#define TCP_V6_FLOW 0x05 /* hash only */ +#define UDP_V6_FLOW 0x06 /* hash only */ +#define SCTP_V6_FLOW 0x07 /* hash only */ +#define AH_ESP_V6_FLOW 0x08 /* hash only */ +#define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ +#define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ +#define AH_V6_FLOW 0x0b /* hash only */ +#define ESP_V6_FLOW 0x0c /* hash only */ +#define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ +#define IPV4_FLOW 0x10 /* hash only */ +#define IPV6_FLOW 0x11 /* hash only */ +#define ETHER_FLOW 0x12 /* spec only (ether_spec) */ + +/* L3-L4 network traffic flow hash options */ +#define RXH_L2DA (1 << 1) +#define RXH_VLAN (1 << 2) +#define RXH_L3_PROTO (1 << 3) +#define RXH_IP_SRC (1 << 4) +#define RXH_IP_DST (1 << 5) +#define RXH_L4_B_0_1 (1 << 6) /* src port in case of TCP/UDP/SCTP */ +#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */ +#define RXH_DISCARD (1 << 31) + +#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL + +/* Reset flags */ +/* The reset() operation must clear the flags for the components which + * were actually reset. On successful return, the flags indicate the + * components which were not reset, either because they do not exist + * in the hardware or because they cannot be reset independently. The + * driver must never reset any components that were not requested. + */ +enum ethtool_reset_flags { + /* These flags represent components dedicated to the interface + * the command is addressed to. Shift any flag left by + * ETH_RESET_SHARED_SHIFT to reset a shared component of the + * same type. + */ + ETH_RESET_MGMT = 1 << 0, /* Management processor */ + ETH_RESET_IRQ = 1 << 1, /* Interrupt requester */ + ETH_RESET_DMA = 1 << 2, /* DMA engine */ + ETH_RESET_FILTER = 1 << 3, /* Filtering/flow direction */ + ETH_RESET_OFFLOAD = 1 << 4, /* Protocol offload */ + ETH_RESET_MAC = 1 << 5, /* Media access controller */ + ETH_RESET_PHY = 1 << 6, /* Transceiver/PHY */ + ETH_RESET_RAM = 1 << 7, /* RAM shared between + * multiple components */ + + ETH_RESET_DEDICATED = 0x0000ffff, /* All components dedicated to + * this interface */ + ETH_RESET_ALL = 0xffffffff, /* All components used by this + * interface, even if shared */ +}; +#define ETH_RESET_SHARED_SHIFT 16 + +#endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/fb.h b/include/linux/fb.h new file mode 100644 index 0000000..c820a4f --- /dev/null +++ b/include/linux/fb.h @@ -0,0 +1,619 @@ +#ifndef _LINUX_FB_H +#define _LINUX_FB_H + +#include +#include + +/* Definitions of frame buffers */ + +#define FB_MAX 32 /* sufficient for now */ + +#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ + +#define FB_VISUAL_MONO01 0 /* Monochr. 1=Black 0=White */ +#define FB_VISUAL_MONO10 1 /* Monochr. 1=White 0=Black */ +#define FB_VISUAL_TRUECOLOR 2 /* True color */ +#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */ +#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */ +#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */ + +#define FB_ACCEL_NONE 0 /* no hardware accelerator */ + +struct fb_fix_screeninfo { + char id[16]; /* identification string eg "TT Builtin" */ + unsigned long smem_start; /* Start of frame buffer mem */ + /* (physical address) */ + __u32 smem_len; /* Length of frame buffer mem */ + __u32 type; /* see FB_TYPE_* */ + __u32 type_aux; /* Interleave for interleaved Planes */ + __u32 visual; /* see FB_VISUAL_* */ + __u16 xpanstep; /* zero if no hardware panning */ + __u16 ypanstep; /* zero if no hardware panning */ + __u16 ywrapstep; /* zero if no hardware ywrap */ + __u32 line_length; /* length of a line in bytes */ + unsigned long mmio_start; /* Start of Memory Mapped I/O */ + /* (physical address) */ + __u32 mmio_len; /* Length of Memory Mapped I/O */ + __u32 accel; /* Indicate to driver which */ + /* specific chip/card we have */ + __u16 reserved[3]; /* Reserved for future compatibility */ +}; + +/* + * Interpretation of offset for color fields: All offsets are from the right, + * inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you + * can use the offset as right argument to <<). A pixel afterwards is a bit + * stream and is written to video memory as that unmodified. + * + * For pseudocolor: offset and length should be the same for all color + * components. Offset specifies the position of the least significant bit + * of the pallette index in a pixel value. Length indicates the number + * of available palette entries (i.e. # of entries = 1 << length). + */ +struct fb_bitfield { + __u32 offset; /* beginning of bitfield */ + __u32 length; /* length of bitfield */ + __u32 msb_right; + +}; + +#define FB_NONSTD_HAM 1 /* Hold-And-Modify (HAM) */ +#define FB_NONSTD_REV_PIX_IN_B 2 /* order of pixels in each byte is reversed */ + +#define FB_ACTIVATE_NOW 0 /* set values immediately (or vbl)*/ +#define FB_ACTIVATE_NXTOPEN 1 /* activate on next open */ +#define FB_ACTIVATE_TEST 2 /* don't set, round up impossible */ +#define FB_ACTIVATE_MASK 15 + /* values */ +#define FB_ACTIVATE_VBL 16 /* activate values on next vbl */ +#define FB_CHANGE_CMAP_VBL 32 /* change colormap on vbl */ +#define FB_ACTIVATE_ALL 64 /* change all VCs on this fb */ +#define FB_ACTIVATE_FORCE 128 /* force apply even when no change*/ +#define FB_ACTIVATE_INV_MODE 256 /* invalidate videomode */ + +#define FB_SYNC_HOR_HIGH_ACT 1 /* horizontal sync high active */ +#define FB_SYNC_VERT_HIGH_ACT 2 /* vertical sync high active */ +#define FB_SYNC_EXT 4 /* external sync */ +#define FB_SYNC_COMP_HIGH_ACT 8 /* composite sync high active */ +#define FB_SYNC_BROADCAST 16 /* broadcast video timings */ + /* vtotal = 144d/288n/576i => PAL */ + /* vtotal = 121d/242n/484i => NTSC */ +#define FB_SYNC_ON_GREEN 32 /* sync on green */ + +#define FB_VMODE_NONINTERLACED 0 /* non interlaced */ +#define FB_VMODE_INTERLACED 1 /* interlaced */ +#define FB_VMODE_DOUBLE 2 /* double scan */ +#define FB_VMODE_ODD_FLD_FIRST 4 /* interlaced: top line first */ +#define FB_VMODE_MASK 255 + +#define FB_VMODE_YWRAP 256 /* ywrap instead of panning */ +#define FB_VMODE_SMOOTH_XPAN 512 /* smooth xpan possible (internally used) */ +#define FB_VMODE_CONUPDATE 512 /* don't update x/yoffset */ + +/* + * Display rotation support + */ +#define FB_ROTATE_UR 0 +#define FB_ROTATE_CW 1 +#define FB_ROTATE_UD 2 +#define FB_ROTATE_CCW 3 + +#define PICOS2KHZ(a) (1000000000UL/(a)) +#define KHZ2PICOS(a) (1000000000UL/(a)) + +struct fb_var_screeninfo { + __u32 xres; /* visible resolution */ + __u32 yres; + __u32 xres_virtual; /* virtual resolution */ + __u32 yres_virtual; + __u32 xoffset; /* offset from virtual to visible */ + __u32 yoffset; /* resolution */ + + __u32 bits_per_pixel; /* guess what */ + __u32 grayscale; /* != 0 Graylevels instead of colors */ + + struct fb_bitfield red; /* bitfield in fb mem if true color, */ + struct fb_bitfield green; /* else only length is significant */ + struct fb_bitfield blue; + struct fb_bitfield transp; /* transparency */ + + __u32 nonstd; /* != 0 Non standard pixel format */ + + __u32 activate; /* see FB_ACTIVATE_* */ + + __u32 height; /* height of picture in mm */ + __u32 width; /* width of picture in mm */ + + __u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ + + /* Timing: All values in pixclocks, except pixclock (of course) */ + __u32 pixclock; /* pixel clock in ps (pico seconds) */ + __u32 left_margin; /* time from sync to picture */ + __u32 right_margin; /* time from picture to sync */ + __u32 upper_margin; /* time from sync to picture */ + __u32 lower_margin; + __u32 hsync_len; /* length of horizontal sync */ + __u32 vsync_len; /* length of vertical sync */ + __u32 sync; /* see FB_SYNC_* */ + __u32 vmode; /* see FB_VMODE_* */ + __u32 rotate; /* angle we rotate counter clockwise */ + __u32 reserved[5]; /* Reserved for future compatibility */ +}; + +struct fb_cmap { + __u32 start; /* First entry */ + __u32 len; /* Number of entries */ + __u16 *red; /* Red values */ + __u16 *green; + __u16 *blue; + __u16 *transp; /* transparency, can be NULL */ +}; + +struct fb_con2fbmap { + __u32 console; + __u32 framebuffer; +}; + +/* VESA Blanking Levels */ +#define VESA_NO_BLANKING 0 +#define VESA_VSYNC_SUSPEND 1 +#define VESA_HSYNC_SUSPEND 2 +#define VESA_POWERDOWN 3 + + +enum { + /* screen: unblanked, hsync: on, vsync: on */ + FB_BLANK_UNBLANK = VESA_NO_BLANKING, + + /* screen: blanked, hsync: on, vsync: on */ + FB_BLANK_NORMAL = VESA_NO_BLANKING + 1, + + /* screen: blanked, hsync: on, vsync: off */ + FB_BLANK_VSYNC_SUSPEND = VESA_VSYNC_SUSPEND + 1, + + /* screen: blanked, hsync: off, vsync: on */ + FB_BLANK_HSYNC_SUSPEND = VESA_HSYNC_SUSPEND + 1, + + /* screen: blanked, hsync: off, vsync: off */ + FB_BLANK_POWERDOWN = VESA_POWERDOWN + 1 +}; + +#define FB_VBLANK_VBLANKING 0x001 /* currently in a vertical blank */ +#define FB_VBLANK_HBLANKING 0x002 /* currently in a horizontal blank */ +#define FB_VBLANK_HAVE_VBLANK 0x004 /* vertical blanks can be detected */ +#define FB_VBLANK_HAVE_HBLANK 0x008 /* horizontal blanks can be detected */ +#define FB_VBLANK_HAVE_COUNT 0x010 /* global retrace counter is available */ +#define FB_VBLANK_HAVE_VCOUNT 0x020 /* the vcount field is valid */ +#define FB_VBLANK_HAVE_HCOUNT 0x040 /* the hcount field is valid */ +#define FB_VBLANK_VSYNCING 0x080 /* currently in a vsync */ +#define FB_VBLANK_HAVE_VSYNC 0x100 /* verical syncs can be detected */ + +struct fb_vblank { + __u32 flags; /* FB_VBLANK flags */ + __u32 count; /* counter of retraces since boot */ + __u32 vcount; /* current scanline position */ + __u32 hcount; /* current scandot position */ + __u32 reserved[4]; /* reserved for future compatibility */ +}; + +/* Internal HW accel */ +#define ROP_COPY 0 +#define ROP_XOR 1 + +struct fb_copyarea { + __u32 dx; + __u32 dy; + __u32 width; + __u32 height; + __u32 sx; + __u32 sy; +}; + +struct fb_fillrect { + __u32 dx; /* screen-relative */ + __u32 dy; + __u32 width; + __u32 height; + __u32 color; + __u32 rop; +}; + +struct fb_image { + __u32 dx; /* Where to place image */ + __u32 dy; + __u32 width; /* Size of image */ + __u32 height; + __u32 fg_color; /* Only used when a mono bitmap */ + __u32 bg_color; + __u8 depth; /* Depth of the image */ + const char *data; /* Pointer to image data */ + struct fb_cmap cmap; /* color map info */ +}; + +/* + * hardware cursor control + */ + +#define FB_CUR_SETIMAGE 0x01 +#define FB_CUR_SETPOS 0x02 +#define FB_CUR_SETHOT 0x04 +#define FB_CUR_SETCMAP 0x08 +#define FB_CUR_SETSHAPE 0x10 +#define FB_CUR_SETSIZE 0x20 +#define FB_CUR_SETALL 0xFF + +struct fbcurpos { + __u16 x, y; +}; + +struct fb_cursor { + __u16 set; /* what to set */ + __u16 enable; /* cursor on/off */ + __u16 rop; /* bitop operation */ + const char *mask; /* cursor mask bits */ + struct fbcurpos hot; /* cursor hot spot */ + struct fb_image image; /* Cursor image */ +}; + +#ifdef CONFIG_FB_BACKLIGHT +/* Settings for the generic backlight code */ +#define FB_BACKLIGHT_LEVELS 128 +#define FB_BACKLIGHT_MAX 0xFF +#endif + +#ifdef __KERNEL__ + +struct vm_area_struct; +struct fb_info; +struct device; +struct file; + +/* Definitions below are used in the parsed monitor specs */ +#define FB_DPMS_ACTIVE_OFF 1 +#define FB_DPMS_SUSPEND 2 +#define FB_DPMS_STANDBY 4 + +#define FB_DISP_DDI 1 +#define FB_DISP_ANA_700_300 2 +#define FB_DISP_ANA_714_286 4 +#define FB_DISP_ANA_1000_400 8 +#define FB_DISP_ANA_700_000 16 + +#define FB_DISP_MONO 32 +#define FB_DISP_RGB 64 +#define FB_DISP_MULTI 128 +#define FB_DISP_UNKNOWN 256 + +#define FB_SIGNAL_NONE 0 +#define FB_SIGNAL_BLANK_BLANK 1 +#define FB_SIGNAL_SEPARATE 2 +#define FB_SIGNAL_COMPOSITE 4 +#define FB_SIGNAL_SYNC_ON_GREEN 8 +#define FB_SIGNAL_SERRATION_ON 16 + +#define FB_MISC_PRIM_COLOR 1 +#define FB_MISC_1ST_DETAIL 2 /* First Detailed Timing is preferred */ +struct fb_chroma { + __u32 redx; /* in fraction of 1024 */ + __u32 greenx; + __u32 bluex; + __u32 whitex; + __u32 redy; + __u32 greeny; + __u32 bluey; + __u32 whitey; +}; + +struct fb_monspecs { + struct fb_chroma chroma; + struct fb_videomode *modedb; /* mode database */ + __u8 manufacturer[4]; /* Manufacturer */ + __u8 monitor[14]; /* Monitor String */ + __u8 serial_no[14]; /* Serial Number */ + __u8 ascii[14]; /* ? */ + __u32 modedb_len; /* mode database length */ + __u32 model; /* Monitor Model */ + __u32 serial; /* Serial Number - Integer */ + __u32 year; /* Year manufactured */ + __u32 week; /* Week Manufactured */ + __u32 hfmin; /* hfreq lower limit (Hz) */ + __u32 hfmax; /* hfreq upper limit (Hz) */ + __u32 dclkmin; /* pixelclock lower limit (Hz) */ + __u32 dclkmax; /* pixelclock upper limit (Hz) */ + __u16 input; /* display type - see FB_DISP_* */ + __u16 dpms; /* DPMS support - see FB_DPMS_ */ + __u16 signal; /* Signal Type - see FB_SIGNAL_* */ + __u16 vfmin; /* vfreq lower limit (Hz) */ + __u16 vfmax; /* vfreq upper limit (Hz) */ + __u16 gamma; /* Gamma - in fractions of 100 */ + __u16 gtf : 1; /* supports GTF */ + __u16 misc; /* Misc flags - see FB_MISC_* */ + __u8 version; /* EDID version... */ + __u8 revision; /* ...and revision */ + __u8 max_x; /* Maximum horizontal size (cm) */ + __u8 max_y; /* Maximum vertical size (cm) */ +}; + +struct fb_cmap_user { + __u32 start; /* First entry */ + __u32 len; /* Number of entries */ + __u16 *red; /* Red values */ + __u16 *green; + __u16 *blue; + __u16 *transp; /* transparency, can be NULL */ +}; + +struct fb_image_user { + __u32 dx; /* Where to place image */ + __u32 dy; + __u32 width; /* Size of image */ + __u32 height; + __u32 fg_color; /* Only used when a mono bitmap */ + __u32 bg_color; + __u8 depth; /* Depth of the image */ + const char *data; /* Pointer to image data */ + struct fb_cmap_user cmap; /* color map info */ +}; + +struct fb_cursor_user { + __u16 set; /* what to set */ + __u16 enable; /* cursor on/off */ + __u16 rop; /* bitop operation */ + const char *mask; /* cursor mask bits */ + struct fbcurpos hot; /* cursor hot spot */ + struct fb_image_user image; /* Cursor image */ +}; + +/* + * Register/unregister for framebuffer events + */ + +/* The resolution of the passed in fb_info about to change */ +#define FB_EVENT_MODE_CHANGE 0x01 +/* The display on this fb_info is beeing suspended, no access to the + * framebuffer is allowed any more after that call returns + */ +#define FB_EVENT_SUSPEND 0x02 +/* The display on this fb_info was resumed, you can restore the display + * if you own it + */ +#define FB_EVENT_RESUME 0x03 +/* An entry from the modelist was removed */ +#define FB_EVENT_MODE_DELETE 0x04 +/* A driver registered itself */ +#define FB_EVENT_FB_REGISTERED 0x05 +/* A driver unregistered itself */ +#define FB_EVENT_FB_UNREGISTERED 0x06 +/* CONSOLE-SPECIFIC: get console to framebuffer mapping */ +#define FB_EVENT_GET_CONSOLE_MAP 0x07 +/* CONSOLE-SPECIFIC: set console to framebuffer mapping */ +#define FB_EVENT_SET_CONSOLE_MAP 0x08 +/* A hardware display blank change occurred */ +#define FB_EVENT_BLANK 0x09 +/* Private modelist is to be replaced */ +#define FB_EVENT_NEW_MODELIST 0x0A +/* The resolution of the passed in fb_info about to change and + all vc's should be changed */ +#define FB_EVENT_MODE_CHANGE_ALL 0x0B +/* A software display blank change occurred */ +#define FB_EVENT_CONBLANK 0x0C +/* Get drawing requirements */ +#define FB_EVENT_GET_REQ 0x0D +/* Unbind from the console if possible */ +#define FB_EVENT_FB_UNBIND 0x0E + +struct fb_event { + struct fb_info *info; + void *data; +}; + +struct fb_blit_caps { + u32 x; + u32 y; + u32 len; + u32 flags; +}; + +/* + * Pixmap structure definition + * + * The purpose of this structure is to translate data + * from the hardware independent format of fbdev to what + * format the hardware needs. + */ + +#define FB_PIXMAP_DEFAULT 1 /* used internally by fbcon */ +#define FB_PIXMAP_SYSTEM 2 /* memory is in system RAM */ +#define FB_PIXMAP_IO 4 /* memory is iomapped */ +#define FB_PIXMAP_SYNC 256 /* set if GPU can DMA */ + +struct fb_pixmap { + u8 *addr; /* pointer to memory */ + u32 size; /* size of buffer in bytes */ + u32 offset; /* current offset to buffer */ + u32 buf_align; /* byte alignment of each bitmap */ + u32 scan_align; /* alignment per scanline */ + u32 access_align; /* alignment per read/write (bits) */ + u32 flags; /* see FB_PIXMAP_* */ + u32 blit_x; /* supported bit block dimensions (1-32)*/ + u32 blit_y; /* Format: blit_x = 1 << (width - 1) */ + /* blit_y = 1 << (height - 1) */ + /* if 0, will be set to 0xffffffff (all)*/ + /* access methods */ + void (*writeio)(struct fb_info *info, void *dst, void *src, unsigned int size); + void (*readio) (struct fb_info *info, void *dst, void *src, unsigned int size); +}; + +#ifdef CONFIG_FB_DEFERRED_IO +struct fb_deferred_io { + /* delay between mkwrite and deferred handler */ + unsigned long delay; + struct mutex lock; /* mutex that protects the page list */ + struct list_head pagelist; /* list of touched pages */ + /* callback */ + void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); +}; +#endif + +/* FBINFO_* = fb_info.flags bit flags */ +#define FBINFO_MODULE 0x0001 /* Low-level driver is a module */ +#define FBINFO_HWACCEL_DISABLED 0x0002 + /* When FBINFO_HWACCEL_DISABLED is set: + * Hardware acceleration is turned off. Software implementations + * of required functions (copyarea(), fillrect(), and imageblit()) + * takes over; acceleration engine should be in a quiescent state */ + +/* hints */ +#define FBINFO_PARTIAL_PAN_OK 0x0040 /* otw use pan only for double-buffering */ +#define FBINFO_READS_FAST 0x0080 /* soft-copy faster than rendering */ + +/* + * A driver may set this flag to indicate that it does want a set_par to be + * called every time when fbcon_switch is executed. The advantage is that with + * this flag set you can really be sure that set_par is always called before + * any of the functions dependant on the correct hardware state or altering + * that state, even if you are using some broken X releases. The disadvantage + * is that it introduces unwanted delays to every console switch if set_par + * is slow. It is a good idea to try this flag in the drivers initialization + * code whenever there is a bug report related to switching between X and the + * framebuffer console. + */ +#define FBINFO_MISC_ALWAYS_SETPAR 0x40000 + +/* + * Host and GPU endianness differ. + */ +#define FBINFO_FOREIGN_ENDIAN 0x100000 +/* + * Big endian math. This is the same flags as above, but with different + * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag + * and host endianness. Drivers should not use this flag. + */ +#define FBINFO_BE_MATH 0x100000 + +struct fb_info { + int node; + int flags; + struct fb_var_screeninfo var; /* Current var */ + struct fb_fix_screeninfo fix; /* Current fix */ + struct fb_monspecs monspecs; /* Current Monitor specs */ + struct fb_pixmap pixmap; /* Image hardware mapper */ + struct fb_pixmap sprite; /* Cursor hardware mapper */ + struct fb_cmap cmap; /* Current cmap */ + struct list_head modelist; /* mode list */ + struct fb_videomode *mode; /* current mode */ + + char *screen_base; /* Virtual address */ + unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ + void *pseudo_palette; /* Fake palette of 16 colors */ +#define FBINFO_STATE_RUNNING 0 +#define FBINFO_STATE_SUSPENDED 1 + u32 state; /* Hardware state i.e suspend */ + void *fbcon_par; /* fbcon use-only private area */ + /* From here on everything is device dependent */ + void *par; +}; + +#define FBINFO_DEFAULT 0 + +#define FBINFO_FLAG_MODULE FBINFO_MODULE +#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT + +/* This will go away */ +#if defined(__sparc__) + +/* We map all of our framebuffers such that big-endian accesses + * are what we want, so the following is sufficient. + */ + +/* This will go away */ +#define fb_readb sbus_readb +#define fb_readw sbus_readw +#define fb_readl sbus_readl +#define fb_readq sbus_readq +#define fb_writeb sbus_writeb +#define fb_writew sbus_writew +#define fb_writel sbus_writel +#define fb_writeq sbus_writeq +#define fb_memset sbus_memset_io + +#elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__bfin__) + +#define fb_readb __raw_readb +#define fb_readw __raw_readw +#define fb_readl __raw_readl +#define fb_readq __raw_readq +#define fb_writeb __raw_writeb +#define fb_writew __raw_writew +#define fb_writel __raw_writel +#define fb_writeq __raw_writeq +#define fb_memset memset_io + +#else + +#define fb_readb(addr) (*(volatile u8 *) (addr)) +#define fb_readw(addr) (*(volatile u16 *) (addr)) +#define fb_readl(addr) (*(volatile u32 *) (addr)) +#define fb_readq(addr) (*(volatile u64 *) (addr)) +#define fb_writeb(b,addr) (*(volatile u8 *) (addr) = (b)) +#define fb_writew(b,addr) (*(volatile u16 *) (addr) = (b)) +#define fb_writel(b,addr) (*(volatile u32 *) (addr) = (b)) +#define fb_writeq(b,addr) (*(volatile u64 *) (addr) = (b)) +#define fb_memset memset + +#endif + +#define FB_LEFT_POS(p, bpp) (fb_be_math(p) ? (32 - (bpp)) : 0) +#define FB_SHIFT_HIGH(p, val, bits) (fb_be_math(p) ? (val) >> (bits) : \ + (val) << (bits)) +#define FB_SHIFT_LOW(p, val, bits) (fb_be_math(p) ? (val) << (bits) : \ + (val) >> (bits)) +/* drivers/video/fbmon.c */ +#define FB_MAXTIMINGS 0 +#define FB_VSYNCTIMINGS 1 +#define FB_HSYNCTIMINGS 2 +#define FB_DCLKTIMINGS 3 +#define FB_IGNOREMON 0x100 + +#define FB_MODE_IS_UNKNOWN 0 +#define FB_MODE_IS_DETAILED 1 +#define FB_MODE_IS_STANDARD 2 +#define FB_MODE_IS_VESA 4 +#define FB_MODE_IS_CALCULATED 8 +#define FB_MODE_IS_FIRST 16 +#define FB_MODE_IS_FROM_VAR 32 + + +/* drivers/video/fbcmap.c */ + +extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); +extern void fb_dealloc_cmap(struct fb_cmap *cmap); +extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); +extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); +extern int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *fb_info); +extern int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *fb_info); +extern const struct fb_cmap *fb_default_cmap(int len); +extern void fb_invert_cmaps(void); + +struct fb_videomode { + const char *name; /* optional */ + u32 refresh; /* optional */ + u32 xres; + u32 yres; + u32 pixclock; + u32 left_margin; + u32 right_margin; + u32 upper_margin; + u32 lower_margin; + u32 hsync_len; + u32 vsync_len; + u32 sync; + u32 vmode; + u32 flag; +}; + +int board_video_skip(void); + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_FB_H */ diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h new file mode 100644 index 0000000..0d62aef --- /dev/null +++ b/include/linux/if_ether.h @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Ethernet IEEE 802.3 interface. + * + * Version: @(#)if_ether.h 1.0.1a 02/08/94 + * + * Author: Fred N. van Kempen, + * Donald Becker, + * Alan Cox, + * Steve Whitehouse, + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _UAPI_LINUX_IF_ETHER_H +#define _UAPI_LINUX_IF_ETHER_H + +#include + +/* + * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble + * and FCS/CRC (frame check sequence). + */ + +#define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_TLEN 2 /* Octets in ethernet type field */ +#define ETH_HLEN 14 /* Total octets in header. */ +#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ +#define ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ +#define ETH_FCS_LEN 4 /* Octets in the FCS */ + +#define ETH_MIN_MTU 68 /* Min IPv4 MTU per RFC791 */ +#define ETH_MAX_MTU 0xFFFFU /* 65535, same as IP_MAX_MTU */ + +/* + * These are the defined Ethernet Protocol ID's. + */ + +#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */ +#define ETH_P_PUP 0x0200 /* Xerox PUP packet */ +#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */ +#define ETH_P_TSN 0x22F0 /* TSN (IEEE 1722) packet */ +#define ETH_P_ERSPAN2 0x22EB /* ERSPAN version 2 (type III) */ +#define ETH_P_IP 0x0800 /* Internet Protocol packet */ +#define ETH_P_X25 0x0805 /* CCITT X.25 */ +#define ETH_P_ARP 0x0806 /* Address Resolution packet */ +#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_IEEEPUP 0x0a00 /* Xerox IEEE802.3 PUP packet */ +#define ETH_P_IEEEPUPAT 0x0a01 /* Xerox IEEE802.3 PUP Addr Trans packet */ +#define ETH_P_BATMAN 0x4305 /* B.A.T.M.A.N.-Advanced packet */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_DEC 0x6000 /* DEC Assigned proto */ +#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */ +#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */ +#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */ +#define ETH_P_LAT 0x6004 /* DEC LAT */ +#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */ +#define ETH_P_CUST 0x6006 /* DEC Customer use */ +#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */ +#define ETH_P_TEB 0x6558 /* Trans Ether Bridging */ +#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */ +#define ETH_P_ATALK 0x809B /* Appletalk DDP */ +#define ETH_P_AARP 0x80F3 /* Appletalk AARP */ +#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */ +#define ETH_P_ERSPAN 0x88BE /* ERSPAN type II */ +#define ETH_P_IPX 0x8137 /* IPX over DIX */ +#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ +#define ETH_P_PAUSE 0x8808 /* IEEE Pause frames. See 802.3 31B */ +#define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */ +#define ETH_P_WCCP 0x883E /* Web-cache coordination */ + /* protocol defined in */ + /* draft-wilson-wrec-wccp-v2-00.txt */ +#define ETH_P_MPLS_UC 0x8847 /* MPLS Unicast traffic */ +#define ETH_P_MPLS_MC 0x8848 /* MPLS Multicast traffic */ +#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */ +#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */ +#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */ +#define ETH_P_LINK_CTL 0x886c /* HPNA, wlan link local tunnel */ +#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport over Ethernet */ +#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */ +#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */ +#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN */ +#define ETH_P_802_EX1 0x88B5 /* 802.1 Local Experimental 1. */ +#define ETH_P_PREAUTH 0x88C7 /* 802.11 Preauthentication */ +#define ETH_P_TIPC 0x88CA /* TIPC */ +#define ETH_P_MACSEC 0x88E5 /* 802.1ae MACsec */ +#define ETH_P_8021AH 0x88E7 /* 802.1ah Backbone Service Tag */ +#define ETH_P_MVRP 0x88F5 /* 802.1Q MVRP */ +#define ETH_P_1588 0x88F7 /* IEEE 1588 Timesync */ +#define ETH_P_NCSI 0x88F8 /* NCSI protocol */ +#define ETH_P_PRP 0x88FB /* IEC 62439-3 PRP/HSRv0 */ +#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */ +#define ETH_P_IBOE 0x8915 /* Infiniband over Ethernet */ +#define ETH_P_TDLS 0x890D /* TDLS */ +#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */ +#define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent */ + /* Handover Protocol */ +#define ETH_P_HSR 0x892F /* IEC 62439-3 HSRv1 */ +#define ETH_P_NSH 0x894F /* Network Service Header */ +#define ETH_P_LOOPBACK 0x9000 /* Ethernet loopback packet, per IEEE 802.3 */ +#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN] */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_EDSA 0xDADA /* Ethertype DSA */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_IFE 0xED3E /* ForCES inter-FE LFB type */ +#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv */ + /* [ NOT AN OFFICIALLY REGISTERED ID ] */ + +#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less */ + /* than this value then the frame is Ethernet */ + /* II. Else it is 802.3 */ + +/* + * Non DIX types. Won't clash for 1500 types. + */ + +#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */ +#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */ +#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */ +#define ETH_P_802_2 0x0004 /* 802.2 frames */ +#define ETH_P_SNAP 0x0005 /* Internal only */ +#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */ +#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/ +#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */ +#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */ +#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */ +#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/ +#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/ +#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */ +#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */ +#define ETH_P_CONTROL 0x0016 /* Card specific control frames */ +#define ETH_P_IRDA 0x0017 /* Linux-IrDA */ +#define ETH_P_ECONET 0x0018 /* Acorn Econet */ +#define ETH_P_HDLC 0x0019 /* HDLC frames */ +#define ETH_P_ARCNET 0x001A /* 1A for ArcNet :-) */ +#define ETH_P_DSA 0x001B /* Distributed Switch Arch */ +#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ +#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ +#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ +#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */ +#define ETH_P_XDSA 0x00F8 /* Multiplexed DSA protocol */ +#define ETH_P_MAP 0x00F9 /* Qualcomm multiplexing and */ + /* aggregation protocol */ + +/* The following macros come from Linux kernel include/linux/if_vlan.h */ + +#define VLAN_HLEN 4 /* The additional bytes required by VLAN */ + /* (in addition to the Ethernet header) */ +#define VLAN_ETH_HLEN 18 /* Total octets in header. */ +#define VLAN_ETH_ZLEN 64 /* Min. octets in frame sans FCS */ + +/* + * According to 802.3ac, the packet can be 4 bytes longer. --Klika Jan + */ +#define VLAN_ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define VLAN_ETH_FRAME_LEN 1518 /* Max. octets in frame sans FCS */ + +#define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ +#define VLAN_PRIO_SHIFT 13 +#define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +#define VLAN_TAG_PRESENT VLAN_CFI_MASK +#define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ +#define VLAN_N_VID 4096 + +#endif /* _UAPI_LINUX_IF_ETHER_H */ diff --git a/include/linux/immap_qe.h b/include/linux/immap_qe.h new file mode 100644 index 0000000..022771f --- /dev/null +++ b/include/linux/immap_qe.h @@ -0,0 +1,593 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * QUICC Engine (QE) Internal Memory Map. + * The Internal Memory Map for devices with QE on them. This + * is the superset of all QE devices (8360, etc.). + * + * Copyright (c) 2006-2009, 2011 Freescale Semiconductor, Inc. + * Author: Shlomi Gridih + */ + +#ifndef __IMMAP_QE_H__ +#define __IMMAP_QE_H__ + +#ifdef CONFIG_MPC83xx +#if defined(CONFIG_ARCH_MPC8360) +#define QE_MURAM_SIZE 0xc000UL +#define MAX_QE_RISC 2 +#define QE_NUM_OF_SNUM 28 +#elif defined(CONFIG_ARCH_MPC832X) || defined(CONFIG_ARCH_MPC8309) +#define QE_MURAM_SIZE 0x4000UL +#define MAX_QE_RISC 1 +#define QE_NUM_OF_SNUM 28 +#endif +#endif + +#ifdef CONFIG_ARCH_LS1021A +#define QE_MURAM_SIZE 0x6000UL +#define MAX_QE_RISC 1 +#define QE_NUM_OF_SNUM 28 +#endif + +#ifdef CONFIG_PPC +#define QE_IMMR_OFFSET 0x00140000 +#else +#define QE_IMMR_OFFSET 0x01400000 +#endif + +/* QE I-RAM */ +typedef struct qe_iram { + u32 iadd; /* I-RAM Address Register */ + u32 idata; /* I-RAM Data Register */ + u8 res0[0x4]; + u32 iready; + u8 res1[0x70]; +} __attribute__ ((packed)) qe_iram_t; + +/* QE Interrupt Controller */ +typedef struct qe_ic { + u32 qicr; + u32 qivec; + u32 qripnr; + u32 qipnr; + u32 qipxcc; + u32 qipycc; + u32 qipwcc; + u32 qipzcc; + u32 qimr; + u32 qrimr; + u32 qicnr; + u8 res0[0x4]; + u32 qiprta; + u32 qiprtb; + u8 res1[0x4]; + u32 qricr; + u8 res2[0x20]; + u32 qhivec; + u8 res3[0x1C]; +} __attribute__ ((packed)) qe_ic_t; + +/* Communications Processor */ +typedef struct cp_qe { + u32 cecr; /* QE command register */ + u32 ceccr; /* QE controller configuration register */ + u32 cecdr; /* QE command data register */ + u8 res0[0xA]; + u16 ceter; /* QE timer event register */ + u8 res1[0x2]; + u16 cetmr; /* QE timers mask register */ + u32 cetscr; /* QE time-stamp timer control register */ + u32 cetsr1; /* QE time-stamp register 1 */ + u32 cetsr2; /* QE time-stamp register 2 */ + u8 res2[0x8]; + u32 cevter; /* QE virtual tasks event register */ + u32 cevtmr; /* QE virtual tasks mask register */ + u16 cercr; /* QE RAM control register */ + u8 res3[0x2]; + u8 res4[0x24]; + u16 ceexe1; /* QE external request 1 event register */ + u8 res5[0x2]; + u16 ceexm1; /* QE external request 1 mask register */ + u8 res6[0x2]; + u16 ceexe2; /* QE external request 2 event register */ + u8 res7[0x2]; + u16 ceexm2; /* QE external request 2 mask register */ + u8 res8[0x2]; + u16 ceexe3; /* QE external request 3 event register */ + u8 res9[0x2]; + u16 ceexm3; /* QE external request 3 mask register */ + u8 res10[0x2]; + u16 ceexe4; /* QE external request 4 event register */ + u8 res11[0x2]; + u16 ceexm4; /* QE external request 4 mask register */ + u8 res12[0x2]; + u8 res13[0x280]; +} __attribute__ ((packed)) cp_qe_t; + +/* QE Multiplexer */ +typedef struct qe_mux { + u32 cmxgcr; /* CMX general clock route register */ + u32 cmxsi1cr_l; /* CMX SI1 clock route low register */ + u32 cmxsi1cr_h; /* CMX SI1 clock route high register */ + u32 cmxsi1syr; /* CMX SI1 SYNC route register */ + u32 cmxucr1; /* CMX UCC1, UCC3 clock route register */ + u32 cmxucr2; /* CMX UCC5, UCC7 clock route register */ + u32 cmxucr3; /* CMX UCC2, UCC4 clock route register */ + u32 cmxucr4; /* CMX UCC6, UCC8 clock route register */ + u32 cmxupcr; /* CMX UPC clock route register */ + u8 res0[0x1C]; +} __attribute__ ((packed)) qe_mux_t; + +/* QE Timers */ +typedef struct qe_timers { + u8 gtcfr1; /* Timer 1 2 global configuration register */ + u8 res0[0x3]; + u8 gtcfr2; /* Timer 3 4 global configuration register */ + u8 res1[0xB]; + u16 gtmdr1; /* Timer 1 mode register */ + u16 gtmdr2; /* Timer 2 mode register */ + u16 gtrfr1; /* Timer 1 reference register */ + u16 gtrfr2; /* Timer 2 reference register */ + u16 gtcpr1; /* Timer 1 capture register */ + u16 gtcpr2; /* Timer 2 capture register */ + u16 gtcnr1; /* Timer 1 counter */ + u16 gtcnr2; /* Timer 2 counter */ + u16 gtmdr3; /* Timer 3 mode register */ + u16 gtmdr4; /* Timer 4 mode register */ + u16 gtrfr3; /* Timer 3 reference register */ + u16 gtrfr4; /* Timer 4 reference register */ + u16 gtcpr3; /* Timer 3 capture register */ + u16 gtcpr4; /* Timer 4 capture register */ + u16 gtcnr3; /* Timer 3 counter */ + u16 gtcnr4; /* Timer 4 counter */ + u16 gtevr1; /* Timer 1 event register */ + u16 gtevr2; /* Timer 2 event register */ + u16 gtevr3; /* Timer 3 event register */ + u16 gtevr4; /* Timer 4 event register */ + u16 gtps; /* Timer 1 prescale register */ + u8 res2[0x46]; +} __attribute__ ((packed)) qe_timers_t; + +/* BRG */ +typedef struct qe_brg { + u32 brgc1; /* BRG1 configuration register */ + u32 brgc2; /* BRG2 configuration register */ + u32 brgc3; /* BRG3 configuration register */ + u32 brgc4; /* BRG4 configuration register */ + u32 brgc5; /* BRG5 configuration register */ + u32 brgc6; /* BRG6 configuration register */ + u32 brgc7; /* BRG7 configuration register */ + u32 brgc8; /* BRG8 configuration register */ + u32 brgc9; /* BRG9 configuration register */ + u32 brgc10; /* BRG10 configuration register */ + u32 brgc11; /* BRG11 configuration register */ + u32 brgc12; /* BRG12 configuration register */ + u32 brgc13; /* BRG13 configuration register */ + u32 brgc14; /* BRG14 configuration register */ + u32 brgc15; /* BRG15 configuration register */ + u32 brgc16; /* BRG16 configuration register */ + u8 res0[0x40]; +} __attribute__ ((packed)) qe_brg_t; + +/* SPI */ +typedef struct spi { + u8 res0[0x20]; + u32 spmode; /* SPI mode register */ + u8 res1[0x2]; + u8 spie; /* SPI event register */ + u8 res2[0x1]; + u8 res3[0x2]; + u8 spim; /* SPI mask register */ + u8 res4[0x1]; + u8 res5[0x1]; + u8 spcom; /* SPI command register */ + u8 res6[0x2]; + u32 spitd; /* SPI transmit data register (cpu mode) */ + u32 spird; /* SPI receive data register (cpu mode) */ + u8 res7[0x8]; +} __attribute__ ((packed)) spi_t; + +/* SI */ +typedef struct si1 { + u16 siamr1; /* SI1 TDMA mode register */ + u16 sibmr1; /* SI1 TDMB mode register */ + u16 sicmr1; /* SI1 TDMC mode register */ + u16 sidmr1; /* SI1 TDMD mode register */ + u8 siglmr1_h; /* SI1 global mode register high */ + u8 res0[0x1]; + u8 sicmdr1_h; /* SI1 command register high */ + u8 res2[0x1]; + u8 sistr1_h; /* SI1 status register high */ + u8 res3[0x1]; + u16 sirsr1_h; /* SI1 RAM shadow address register high */ + u8 sitarc1; /* SI1 RAM counter Tx TDMA */ + u8 sitbrc1; /* SI1 RAM counter Tx TDMB */ + u8 sitcrc1; /* SI1 RAM counter Tx TDMC */ + u8 sitdrc1; /* SI1 RAM counter Tx TDMD */ + u8 sirarc1; /* SI1 RAM counter Rx TDMA */ + u8 sirbrc1; /* SI1 RAM counter Rx TDMB */ + u8 sircrc1; /* SI1 RAM counter Rx TDMC */ + u8 sirdrc1; /* SI1 RAM counter Rx TDMD */ + u8 res4[0x8]; + u16 siemr1; /* SI1 TDME mode register 16 bits */ + u16 sifmr1; /* SI1 TDMF mode register 16 bits */ + u16 sigmr1; /* SI1 TDMG mode register 16 bits */ + u16 sihmr1; /* SI1 TDMH mode register 16 bits */ + u8 siglmg1_l; /* SI1 global mode register low 8 bits */ + u8 res5[0x1]; + u8 sicmdr1_l; /* SI1 command register low 8 bits */ + u8 res6[0x1]; + u8 sistr1_l; /* SI1 status register low 8 bits */ + u8 res7[0x1]; + u16 sirsr1_l; /* SI1 RAM shadow address register low 16 bits */ + u8 siterc1; /* SI1 RAM counter Tx TDME 8 bits */ + u8 sitfrc1; /* SI1 RAM counter Tx TDMF 8 bits */ + u8 sitgrc1; /* SI1 RAM counter Tx TDMG 8 bits */ + u8 sithrc1; /* SI1 RAM counter Tx TDMH 8 bits */ + u8 sirerc1; /* SI1 RAM counter Rx TDME 8 bits */ + u8 sirfrc1; /* SI1 RAM counter Rx TDMF 8 bits */ + u8 sirgrc1; /* SI1 RAM counter Rx TDMG 8 bits */ + u8 sirhrc1; /* SI1 RAM counter Rx TDMH 8 bits */ + u8 res8[0x8]; + u32 siml1; /* SI1 multiframe limit register */ + u8 siedm1; /* SI1 extended diagnostic mode register */ + u8 res9[0xBB]; +} __attribute__ ((packed)) si1_t; + +/* SI Routing Tables */ +typedef struct sir { + u8 tx[0x400]; + u8 rx[0x400]; + u8 res0[0x800]; +} __attribute__ ((packed)) sir_t; + +/* USB Controller. */ +typedef struct usb_ctlr { + u8 usb_usmod; + u8 usb_usadr; + u8 usb_uscom; + u8 res1[1]; + u16 usb_usep1; + u16 usb_usep2; + u16 usb_usep3; + u16 usb_usep4; + u8 res2[4]; + u16 usb_usber; + u8 res3[2]; + u16 usb_usbmr; + u8 res4[1]; + u8 usb_usbs; + u16 usb_ussft; + u8 res5[2]; + u16 usb_usfrn; + u8 res6[0x22]; +} __attribute__ ((packed)) usb_t; + +/* MCC */ +typedef struct mcc { + u32 mcce; /* MCC event register */ + u32 mccm; /* MCC mask register */ + u32 mccf; /* MCC configuration register */ + u32 merl; /* MCC emergency request level register */ + u8 res0[0xF0]; +} __attribute__ ((packed)) mcc_t; + +/* QE UCC Slow */ +typedef struct ucc_slow { + u32 gumr_l; /* UCCx general mode register (low) */ + u32 gumr_h; /* UCCx general mode register (high) */ + u16 upsmr; /* UCCx protocol-specific mode register */ + u8 res0[0x2]; + u16 utodr; /* UCCx transmit on demand register */ + u16 udsr; /* UCCx data synchronization register */ + u16 ucce; /* UCCx event register */ + u8 res1[0x2]; + u16 uccm; /* UCCx mask register */ + u8 res2[0x1]; + u8 uccs; /* UCCx status register */ + u8 res3[0x24]; + u16 utpt; + u8 guemr; /* UCC general extended mode register */ + u8 res4[0x200 - 0x091]; +} __attribute__ ((packed)) ucc_slow_t; + +typedef struct ucc_mii_mng { + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ + u32 ifctl; /* interface control reg */ + u32 ifstat; /* interface statux reg */ +} __attribute__ ((packed))uec_mii_t; + +typedef struct ucc_ethernet { + u32 maccfg1; /* mac configuration reg. 1 */ + u32 maccfg2; /* mac configuration reg. 2 */ + u32 ipgifg; /* interframe gap reg. */ + u32 hafdup; /* half-duplex reg. */ + u8 res1[0x10]; + u32 miimcfg; /* MII management configuration reg */ + u32 miimcom; /* MII management command reg */ + u32 miimadd; /* MII management address reg */ + u32 miimcon; /* MII management control reg */ + u32 miimstat; /* MII management status reg */ + u32 miimind; /* MII management indication reg */ + u32 ifctl; /* interface control reg */ + u32 ifstat; /* interface statux reg */ + u32 macstnaddr1; /* mac station address part 1 reg */ + u32 macstnaddr2; /* mac station address part 2 reg */ + u8 res2[0x8]; + u32 uempr; /* UCC Ethernet Mac parameter reg */ + u32 utbipar; /* UCC tbi address reg */ + u16 uescr; /* UCC Ethernet statistics control reg */ + u8 res3[0x180 - 0x15A]; + u32 tx64; /* Total number of frames (including bad + * frames) transmitted that were exactly + * of the minimal length (64 for un tagged, + * 68 for tagged, or with length exactly + * equal to the parameter MINLength */ + u32 tx127; /* Total number of frames (including bad + * frames) transmitted that were between + * MINLength (Including FCS length==4) + * and 127 octets */ + u32 tx255; /* Total number of frames (including bad + * frames) transmitted that were between + * 128 (Including FCS length==4) and 255 + * octets */ + u32 rx64; /* Total number of frames received including + * bad frames that were exactly of the + * mninimal length (64 bytes) */ + u32 rx127; /* Total number of frames (including bad + * frames) received that were between + * MINLength (Including FCS length==4) + * and 127 octets */ + u32 rx255; /* Total number of frames (including + * bad frames) received that were between + * 128 (Including FCS length==4) and 255 + * octets */ + u32 txok; /* Total number of octets residing in frames + * that where involved in succesfull + * transmission */ + u16 txcf; /* Total number of PAUSE control frames + * transmitted by this MAC */ + u8 res4[0x2]; + u32 tmca; /* Total number of frames that were transmitted + * succesfully with the group address bit set + * that are not broadcast frames */ + u32 tbca; /* Total number of frames transmitted + * succesfully that had destination address + * field equal to the broadcast address */ + u32 rxfok; /* Total number of frames received OK */ + u32 rxbok; /* Total number of octets received OK */ + u32 rbyt; /* Total number of octets received including + * octets in bad frames. Must be implemented + * in HW because it includes octets in frames + * that never even reach the UCC */ + u32 rmca; /* Total number of frames that were received + * succesfully with the group address bit set + * that are not broadcast frames */ + u32 rbca; /* Total number of frames received succesfully + * that had destination address equal to the + * broadcast address */ + u32 scar; /* Statistics carry register */ + u32 scam; /* Statistics caryy mask register */ + u8 res5[0x200 - 0x1c4]; +} __attribute__ ((packed)) uec_t; + +/* QE UCC Fast */ +typedef struct ucc_fast { + u32 gumr; /* UCCx general mode register */ + u32 upsmr; /* UCCx protocol-specific mode register */ + u16 utodr; /* UCCx transmit on demand register */ + u8 res0[0x2]; + u16 udsr; /* UCCx data synchronization register */ + u8 res1[0x2]; + u32 ucce; /* UCCx event register */ + u32 uccm; /* UCCx mask register. */ + u8 uccs; /* UCCx status register */ + u8 res2[0x7]; + u32 urfb; /* UCC receive FIFO base */ + u16 urfs; /* UCC receive FIFO size */ + u8 res3[0x2]; + u16 urfet; /* UCC receive FIFO emergency threshold */ + u16 urfset; /* UCC receive FIFO special emergency + * threshold */ + u32 utfb; /* UCC transmit FIFO base */ + u16 utfs; /* UCC transmit FIFO size */ + u8 res4[0x2]; + u16 utfet; /* UCC transmit FIFO emergency threshold */ + u8 res5[0x2]; + u16 utftt; /* UCC transmit FIFO transmit threshold */ + u8 res6[0x2]; + u16 utpt; /* UCC transmit polling timer */ + u8 res7[0x2]; + u32 urtry; /* UCC retry counter register */ + u8 res8[0x4C]; + u8 guemr; /* UCC general extended mode register */ + u8 res9[0x100 - 0x091]; + uec_t ucc_eth; +} __attribute__ ((packed)) ucc_fast_t; + +/* QE UCC */ +typedef struct ucc_common { + u8 res1[0x90]; + u8 guemr; + u8 res2[0x200 - 0x091]; +} __attribute__ ((packed)) ucc_common_t; + +typedef struct ucc { + union { + ucc_slow_t slow; + ucc_fast_t fast; + ucc_common_t common; + }; +} __attribute__ ((packed)) ucc_t; + +/* MultiPHY UTOPIA POS Controllers (UPC) */ +typedef struct upc { + u32 upgcr; /* UTOPIA/POS general configuration register */ + u32 uplpa; /* UTOPIA/POS last PHY address */ + u32 uphec; /* ATM HEC register */ + u32 upuc; /* UTOPIA/POS UCC configuration */ + u32 updc1; /* UTOPIA/POS device 1 configuration */ + u32 updc2; /* UTOPIA/POS device 2 configuration */ + u32 updc3; /* UTOPIA/POS device 3 configuration */ + u32 updc4; /* UTOPIA/POS device 4 configuration */ + u32 upstpa; /* UTOPIA/POS STPA threshold */ + u8 res0[0xC]; + u32 updrs1_h; /* UTOPIA/POS device 1 rate select */ + u32 updrs1_l; /* UTOPIA/POS device 1 rate select */ + u32 updrs2_h; /* UTOPIA/POS device 2 rate select */ + u32 updrs2_l; /* UTOPIA/POS device 2 rate select */ + u32 updrs3_h; /* UTOPIA/POS device 3 rate select */ + u32 updrs3_l; /* UTOPIA/POS device 3 rate select */ + u32 updrs4_h; /* UTOPIA/POS device 4 rate select */ + u32 updrs4_l; /* UTOPIA/POS device 4 rate select */ + u32 updrp1; /* UTOPIA/POS device 1 receive priority low */ + u32 updrp2; /* UTOPIA/POS device 2 receive priority low */ + u32 updrp3; /* UTOPIA/POS device 3 receive priority low */ + u32 updrp4; /* UTOPIA/POS device 4 receive priority low */ + u32 upde1; /* UTOPIA/POS device 1 event */ + u32 upde2; /* UTOPIA/POS device 2 event */ + u32 upde3; /* UTOPIA/POS device 3 event */ + u32 upde4; /* UTOPIA/POS device 4 event */ + u16 uprp1; + u16 uprp2; + u16 uprp3; + u16 uprp4; + u8 res1[0x8]; + u16 uptirr1_0; /* Device 1 transmit internal rate 0 */ + u16 uptirr1_1; /* Device 1 transmit internal rate 1 */ + u16 uptirr1_2; /* Device 1 transmit internal rate 2 */ + u16 uptirr1_3; /* Device 1 transmit internal rate 3 */ + u16 uptirr2_0; /* Device 2 transmit internal rate 0 */ + u16 uptirr2_1; /* Device 2 transmit internal rate 1 */ + u16 uptirr2_2; /* Device 2 transmit internal rate 2 */ + u16 uptirr2_3; /* Device 2 transmit internal rate 3 */ + u16 uptirr3_0; /* Device 3 transmit internal rate 0 */ + u16 uptirr3_1; /* Device 3 transmit internal rate 1 */ + u16 uptirr3_2; /* Device 3 transmit internal rate 2 */ + u16 uptirr3_3; /* Device 3 transmit internal rate 3 */ + u16 uptirr4_0; /* Device 4 transmit internal rate 0 */ + u16 uptirr4_1; /* Device 4 transmit internal rate 1 */ + u16 uptirr4_2; /* Device 4 transmit internal rate 2 */ + u16 uptirr4_3; /* Device 4 transmit internal rate 3 */ + u32 uper1; /* Device 1 port enable register */ + u32 uper2; /* Device 2 port enable register */ + u32 uper3; /* Device 3 port enable register */ + u32 uper4; /* Device 4 port enable register */ + u8 res2[0x150]; +} __attribute__ ((packed)) upc_t; + +/* SDMA */ +typedef struct sdma { + u32 sdsr; /* Serial DMA status register */ + u32 sdmr; /* Serial DMA mode register */ + u32 sdtr1; /* SDMA system bus threshold register */ + u32 sdtr2; /* SDMA secondary bus threshold register */ + u32 sdhy1; /* SDMA system bus hysteresis register */ + u32 sdhy2; /* SDMA secondary bus hysteresis register */ + u32 sdta1; /* SDMA system bus address register */ + u32 sdta2; /* SDMA secondary bus address register */ + u32 sdtm1; /* SDMA system bus MSNUM register */ + u32 sdtm2; /* SDMA secondary bus MSNUM register */ + u8 res0[0x10]; + u32 sdaqr; /* SDMA address bus qualify register */ + u32 sdaqmr; /* SDMA address bus qualify mask register */ + u8 res1[0x4]; + u32 sdwbcr; /* SDMA CAM entries base register */ + u8 res2[0x38]; +} __attribute__ ((packed)) sdma_t; + +/* Debug Space */ +typedef struct dbg { + u32 bpdcr; /* Breakpoint debug command register */ + u32 bpdsr; /* Breakpoint debug status register */ + u32 bpdmr; /* Breakpoint debug mask register */ + u32 bprmrr0; /* Breakpoint request mode risc register 0 */ + u32 bprmrr1; /* Breakpoint request mode risc register 1 */ + u8 res0[0x8]; + u32 bprmtr0; /* Breakpoint request mode trb register 0 */ + u32 bprmtr1; /* Breakpoint request mode trb register 1 */ + u8 res1[0x8]; + u32 bprmir; /* Breakpoint request mode immediate register */ + u32 bprmsr; /* Breakpoint request mode serial register */ + u32 bpemr; /* Breakpoint exit mode register */ + u8 res2[0x48]; +} __attribute__ ((packed)) dbg_t; + +/* + * RISC Special Registers (Trap and Breakpoint). These are described in + * the QE Developer's Handbook. +*/ +typedef struct rsp { + u32 tibcr[16]; /* Trap/instruction breakpoint control regs */ + u8 res0[64]; + u32 ibcr0; + u32 ibs0; + u32 ibcnr0; + u8 res1[4]; + u32 ibcr1; + u32 ibs1; + u32 ibcnr1; + u32 npcr; + u32 dbcr; + u32 dbar; + u32 dbamr; + u32 dbsr; + u32 dbcnr; + u8 res2[12]; + u32 dbdr_h; + u32 dbdr_l; + u32 dbdmr_h; + u32 dbdmr_l; + u32 bsr; + u32 bor; + u32 bior; + u8 res3[4]; + u32 iatr[4]; + u32 eccr; /* Exception control configuration register */ + u32 eicr; + u8 res4[0x100-0xf8]; +} __attribute__ ((packed)) rsp_t; + +typedef struct qe_immap { + qe_iram_t iram; /* I-RAM */ + qe_ic_t ic; /* Interrupt Controller */ + cp_qe_t cp; /* Communications Processor */ + qe_mux_t qmx; /* QE Multiplexer */ + qe_timers_t qet; /* QE Timers */ + spi_t spi[0x2]; /* spi */ + mcc_t mcc; /* mcc */ + qe_brg_t brg; /* brg */ + usb_t usb; /* USB */ + si1_t si1; /* SI */ + u8 res11[0x800]; + sir_t sir; /* SI Routing Tables */ + ucc_t ucc1; /* ucc1 */ + ucc_t ucc3; /* ucc3 */ + ucc_t ucc5; /* ucc5 */ + ucc_t ucc7; /* ucc7 */ + u8 res12[0x600]; + upc_t upc1; /* MultiPHY UTOPIA POS Controller 1 */ + ucc_t ucc2; /* ucc2 */ + ucc_t ucc4; /* ucc4 */ + ucc_t ucc6; /* ucc6 */ + ucc_t ucc8; /* ucc8 */ + u8 res13[0x600]; + upc_t upc2; /* MultiPHY UTOPIA POS Controller 2 */ + sdma_t sdma; /* SDMA */ + dbg_t dbg; /* Debug Space */ + rsp_t rsp[0x2]; /* RISC Special Registers + * (Trap and Breakpoint) */ + u8 res14[0x300]; + u8 res15[0x3A00]; + u8 res16[0x8000]; /* 0x108000 - 0x110000 */ + u8 muram[QE_MURAM_SIZE]; +} __attribute__ ((packed)) qe_map_t; + +extern qe_map_t *qe_immr; + +#endif /* __IMMAP_QE_H__ */ diff --git a/include/linux/input.h b/include/linux/input.h new file mode 100644 index 0000000..c7ee22a --- /dev/null +++ b/include/linux/input.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 1999-2002 Vojtech Pavlik + */ + +#ifndef _LINUX_INPUT_H +#define _LINUX_INPUT_H + +/* + * Keys and buttons + * + * Most of the keys/buttons are modeled after USB HUT 1.12 + * (see http://www.usb.org/developers/hidpage). + * Abbreviations in the comments: + * AC - Application Control + * AL - Application Launch Button + * SC - System Control + */ + +#define KEY_RESERVED 0 +#define KEY_ESC 1 +#define KEY_1 2 +#define KEY_2 3 +#define KEY_3 4 +#define KEY_4 5 +#define KEY_5 6 +#define KEY_6 7 +#define KEY_7 8 +#define KEY_8 9 +#define KEY_9 10 +#define KEY_0 11 +#define KEY_MINUS 12 +#define KEY_EQUAL 13 +#define KEY_BACKSPACE 14 +#define KEY_TAB 15 +#define KEY_Q 16 +#define KEY_W 17 +#define KEY_E 18 +#define KEY_R 19 +#define KEY_T 20 +#define KEY_Y 21 +#define KEY_U 22 +#define KEY_I 23 +#define KEY_O 24 +#define KEY_P 25 +#define KEY_LEFTBRACE 26 +#define KEY_RIGHTBRACE 27 +#define KEY_ENTER 28 +#define KEY_LEFTCTRL 29 +#define KEY_A 30 +#define KEY_S 31 +#define KEY_D 32 +#define KEY_F 33 +#define KEY_G 34 +#define KEY_H 35 +#define KEY_J 36 +#define KEY_K 37 +#define KEY_L 38 +#define KEY_SEMICOLON 39 +#define KEY_APOSTROPHE 40 +#define KEY_GRAVE 41 +#define KEY_LEFTSHIFT 42 +#define KEY_BACKSLASH 43 +#define KEY_Z 44 +#define KEY_X 45 +#define KEY_C 46 +#define KEY_V 47 +#define KEY_B 48 +#define KEY_N 49 +#define KEY_M 50 +#define KEY_COMMA 51 +#define KEY_DOT 52 +#define KEY_SLASH 53 +#define KEY_RIGHTSHIFT 54 +#define KEY_KPASTERISK 55 +#define KEY_LEFTALT 56 +#define KEY_SPACE 57 +#define KEY_CAPSLOCK 58 +#define KEY_F1 59 +#define KEY_F2 60 +#define KEY_F3 61 +#define KEY_F4 62 +#define KEY_F5 63 +#define KEY_F6 64 +#define KEY_F7 65 +#define KEY_F8 66 +#define KEY_F9 67 +#define KEY_F10 68 +#define KEY_NUMLOCK 69 +#define KEY_SCROLLLOCK 70 +#define KEY_KP7 71 +#define KEY_KP8 72 +#define KEY_KP9 73 +#define KEY_KPMINUS 74 +#define KEY_KP4 75 +#define KEY_KP5 76 +#define KEY_KP6 77 +#define KEY_KPPLUS 78 +#define KEY_KP1 79 +#define KEY_KP2 80 +#define KEY_KP3 81 +#define KEY_KP0 82 +#define KEY_KPDOT 83 + +#define KEY_ZENKAKUHANKAKU 85 +#define KEY_102ND 86 +#define KEY_F11 87 +#define KEY_F12 88 +#define KEY_RO 89 +#define KEY_KATAKANA 90 +#define KEY_HIRAGANA 91 +#define KEY_HENKAN 92 +#define KEY_KATAKANAHIRAGANA 93 +#define KEY_MUHENKAN 94 +#define KEY_KPJPCOMMA 95 +#define KEY_KPENTER 96 +#define KEY_RIGHTCTRL 97 +#define KEY_KPSLASH 98 +#define KEY_SYSRQ 99 +#define KEY_RIGHTALT 100 +#define KEY_LINEFEED 101 +#define KEY_HOME 102 +#define KEY_UP 103 +#define KEY_PAGEUP 104 +#define KEY_LEFT 105 +#define KEY_RIGHT 106 +#define KEY_END 107 +#define KEY_DOWN 108 +#define KEY_PAGEDOWN 109 +#define KEY_INSERT 110 +#define KEY_DELETE 111 +#define KEY_MACRO 112 +#define KEY_MUTE 113 +#define KEY_VOLUMEDOWN 114 +#define KEY_VOLUMEUP 115 +#define KEY_POWER 116 /* SC System Power Down */ +#define KEY_KPEQUAL 117 +#define KEY_KPPLUSMINUS 118 +#define KEY_PAUSE 119 +#define KEY_SCALE 120 /* AL Compiz Scale (Expose) */ + +#define KEY_KPCOMMA 121 +#define KEY_HANGEUL 122 +#define KEY_HANGUEL KEY_HANGEUL +#define KEY_HANJA 123 +#define KEY_YEN 124 +#define KEY_LEFTMETA 125 +#define KEY_RIGHTMETA 126 +#define KEY_COMPOSE 127 +#define KEY_FN 0x1d0 + +#endif diff --git a/include/linux/io.h b/include/linux/io.h new file mode 100644 index 0000000..7984788 --- /dev/null +++ b/include/linux/io.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _LINUX_IO_H +#define _LINUX_IO_H + +#include +#include +#include + +#ifndef CONFIG_HAVE_ARCH_IOMAP +static inline u8 ioread8(const volatile void __iomem *addr) +{ + return readb(addr); +} + +static inline u16 ioread16(const volatile void __iomem *addr) +{ + return readw(addr); +} + +static inline u32 ioread32(const volatile void __iomem *addr) +{ + return readl(addr); +} +#endif /* !CONFIG_HAVE_ARCH_IOMAP */ + +#ifdef CONFIG_64BIT +static inline u64 ioread64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#endif /* CONFIG_64BIT */ + +#ifndef CONFIG_HAVE_ARCH_IOMAP +static inline void iowrite8(u8 value, volatile void __iomem *addr) +{ + writeb(value, addr); +} + +static inline void iowrite16(u16 value, volatile void __iomem *addr) +{ + writew(value, addr); +} + +static inline void iowrite32(u32 value, volatile void __iomem *addr) +{ + writel(value, addr); +} +#endif /* !CONFIG_HAVE_ARCH_IOMAP */ + +#ifdef CONFIG_64BIT +static inline void iowrite64(u64 value, volatile void __iomem *addr) +{ + writeq(value, addr); +} +#endif /* CONFIG_64BIT */ + +#ifndef CONFIG_HAVE_ARCH_IOREMAP +static inline void __iomem *ioremap(resource_size_t offset, + resource_size_t size) +{ + return (void __iomem *)(unsigned long)offset; +} + +static inline void iounmap(void __iomem *addr) +{ +} +#endif + +#define devm_ioremap(dev, offset, size) ioremap(offset, size) + +#endif /* _LINUX_IO_H */ diff --git a/include/linux/ioctl.h b/include/linux/ioctl.h new file mode 100644 index 0000000..7e55c36 --- /dev/null +++ b/include/linux/ioctl.h @@ -0,0 +1,6 @@ +#ifndef _LINUX_IOCTL_H +#define _LINUX_IOCTL_H + +#include + +#endif /* _LINUX_IOCTL_H */ diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h new file mode 100644 index 0000000..ab0ae19 --- /dev/null +++ b/include/linux/iopoll.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. + */ + +#ifndef _LINUX_IOPOLL_H +#define _LINUX_IOPOLL_H + +#include +#include +#include + +/** + * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs + * @op: accessor function (takes @addr as its only argument) + * @addr: Address to poll + * @val: Variable to read the value into + * @cond: Break condition (usually involving @val) + * @timeout_us: Timeout in us, 0 means never timeout + * + * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. + * + * When available, you'll probably want to use one of the specialized + * macros defined below rather than this macro directly. + */ +#define readx_poll_timeout(op, addr, val, cond, timeout_us) \ +({ \ + unsigned long timeout = timer_get_us() + timeout_us; \ + for (;;) { \ + (val) = op(addr); \ + if (cond) \ + break; \ + if (timeout_us && time_after(timer_get_us(), timeout)) { \ + (val) = op(addr); \ + break; \ + } \ + } \ + (cond) ? 0 : -ETIMEDOUT; \ +}) + + +#define readb_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readb, addr, val, cond, timeout_us) + +#define readw_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readw, addr, val, cond, timeout_us) + +#define readl_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readl, addr, val, cond, timeout_us) + +#define readq_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readq, addr, val, cond, timeout_us) + +#define readb_relaxed_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readb_relaxed, addr, val, cond, timeout_us) + +#define readw_relaxed_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readw_relaxed, addr, val, cond, timeout_us) + +#define readl_relaxed_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readl_relaxed, addr, val, cond, timeout_us) + +#define readq_relaxed_poll_timeout(addr, val, cond, timeout_us) \ + readx_poll_timeout(readq_relaxed, addr, val, cond, timeout_us) + +#endif /* _LINUX_IOPOLL_H */ diff --git a/include/linux/ioport.h b/include/linux/ioport.h new file mode 100644 index 0000000..7129504 --- /dev/null +++ b/include/linux/ioport.h @@ -0,0 +1,192 @@ +/* + * ioport.h Definitions of routines for detecting, reserving and + * allocating system resources. + * + * Authors: Linus Torvalds + */ + +#ifndef _LINUX_IOPORT_H +#define _LINUX_IOPORT_H + +#ifndef __ASSEMBLY__ +#include +#include +/* + * Resources are tree-like, allowing + * nesting etc.. + */ +struct resource { + resource_size_t start; + resource_size_t end; + const char *name; + unsigned long flags; + struct resource *parent, *sibling, *child; +}; + +struct resource_list { + struct resource_list *next; + struct resource *res; + struct pci_dev *dev; +}; + +/* + * IO resources have these defined flags. + */ +#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ + +#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */ +#define IORESOURCE_IO 0x00000100 +#define IORESOURCE_MEM 0x00000200 +#define IORESOURCE_IRQ 0x00000400 +#define IORESOURCE_DMA 0x00000800 + +#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ +#define IORESOURCE_READONLY 0x00002000 +#define IORESOURCE_CACHEABLE 0x00004000 +#define IORESOURCE_RANGELENGTH 0x00008000 +#define IORESOURCE_SHADOWABLE 0x00010000 + +#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ +#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ + +#define IORESOURCE_MEM_64 0x00100000 + +#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ +#define IORESOURCE_DISABLED 0x10000000 +#define IORESOURCE_UNSET 0x20000000 +#define IORESOURCE_AUTO 0x40000000 +#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */ + +/* PnP IRQ specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IRQ_HIGHEDGE (1<<0) +#define IORESOURCE_IRQ_LOWEDGE (1<<1) +#define IORESOURCE_IRQ_HIGHLEVEL (1<<2) +#define IORESOURCE_IRQ_LOWLEVEL (1<<3) +#define IORESOURCE_IRQ_SHAREABLE (1<<4) +#define IORESOURCE_IRQ_OPTIONAL (1<<5) + +/* PnP DMA specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_DMA_TYPE_MASK (3<<0) +#define IORESOURCE_DMA_8BIT (0<<0) +#define IORESOURCE_DMA_8AND16BIT (1<<0) +#define IORESOURCE_DMA_16BIT (2<<0) + +#define IORESOURCE_DMA_MASTER (1<<2) +#define IORESOURCE_DMA_BYTE (1<<3) +#define IORESOURCE_DMA_WORD (1<<4) + +#define IORESOURCE_DMA_SPEED_MASK (3<<6) +#define IORESOURCE_DMA_COMPATIBLE (0<<6) +#define IORESOURCE_DMA_TYPEA (1<<6) +#define IORESOURCE_DMA_TYPEB (2<<6) +#define IORESOURCE_DMA_TYPEF (3<<6) + +/* PnP memory I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */ +#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */ +#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */ +#define IORESOURCE_MEM_TYPE_MASK (3<<3) +#define IORESOURCE_MEM_8BIT (0<<3) +#define IORESOURCE_MEM_16BIT (1<<3) +#define IORESOURCE_MEM_8AND16BIT (2<<3) +#define IORESOURCE_MEM_32BIT (3<<3) +#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */ +#define IORESOURCE_MEM_EXPANSIONROM (1<<6) + +/* PnP I/O specific bits (IORESOURCE_BITS) */ +#define IORESOURCE_IO_16BIT_ADDR (1<<0) +#define IORESOURCE_IO_FIXED (1<<1) + +/* PCI ROM control bits (IORESOURCE_BITS) */ +#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */ +#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */ +#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ +#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ + +/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */ +#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */ + +/* PC/ISA/whatever - the normal PC address spaces: IO and memory */ +extern struct resource ioport_resource; +extern struct resource iomem_resource; + +extern int request_resource(struct resource *root, struct resource *new); +extern int release_resource(struct resource *new); +extern void reserve_region_with_split(struct resource *root, + resource_size_t start, resource_size_t end, + const char *name); +extern int insert_resource(struct resource *parent, struct resource *new); +extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); +extern int allocate_resource(struct resource *root, struct resource *new, + resource_size_t size, resource_size_t min, + resource_size_t max, resource_size_t align, + void (*alignf)(void *, struct resource *, + resource_size_t, resource_size_t), + void *alignf_data); +int adjust_resource(struct resource *res, resource_size_t start, + resource_size_t size); +resource_size_t resource_alignment(struct resource *res); +static inline resource_size_t resource_size(const struct resource *res) +{ + return res->end - res->start + 1; +} +static inline unsigned long resource_type(const struct resource *res) +{ + return res->flags & IORESOURCE_TYPE_BITS; +} + +/* Convenience shorthand with allocation */ +#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0) +#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl) +#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0) +#define request_mem_region_exclusive(start,n,name) \ + __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE) +#define rename_region(region, newname) do { (region)->name = (newname); } while (0) + +extern struct resource * __request_region(struct resource *, + resource_size_t start, + resource_size_t n, + const char *name, int flags); + +/* Compatibility cruft */ +#define release_region(start,n) __release_region(&ioport_resource, (start), (n)) +#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n)) +#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n)) + +extern int __check_region(struct resource *, resource_size_t, resource_size_t); +extern void __release_region(struct resource *, resource_size_t, + resource_size_t); + +static inline int __deprecated check_region(resource_size_t s, + resource_size_t n) +{ + return __check_region(&ioport_resource, s, n); +} + +/* Wrappers for managed devices */ +struct device; +#define devm_request_region(dev,start,n,name) \ + __devm_request_region(dev, &ioport_resource, (start), (n), (name)) +#define devm_request_mem_region(dev,start,n,name) \ + __devm_request_region(dev, &iomem_resource, (start), (n), (name)) + +extern struct resource * __devm_request_region(struct device *dev, + struct resource *parent, resource_size_t start, + resource_size_t n, const char *name); + +#define devm_release_region(dev, start, n) \ + __devm_release_region(dev, &ioport_resource, (start), (n)) +#define devm_release_mem_region(dev, start, n) \ + __devm_release_region(dev, &iomem_resource, (start), (n)) + +extern void __devm_release_region(struct device *dev, struct resource *parent, + resource_size_t start, resource_size_t n); +extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); +extern int iomem_is_exclusive(u64 addr); + +extern int +walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, + void *arg, int (*func)(unsigned long, unsigned long, void *)); + +#endif /* __ASSEMBLY__ */ +#endif /* _LINUX_IOPORT_H */ diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h new file mode 100644 index 0000000..8a9f645 --- /dev/null +++ b/include/linux/kbuild.h @@ -0,0 +1,20 @@ +/* + * Copied from Linux: + * commit 37487a56523d402e25650da16c337acf4cecd13d + * Author: Christoph Lameter + */ +#ifndef __LINUX_KBUILD_H +#define __LINUX_KBUILD_H + +#define DEFINE(sym, val) \ + asm volatile("\n.ascii \"->" #sym " %0 " #val "\"" : : "i" (val)) + +#define BLANK() asm volatile("\n.ascii \"->\"" : : ) + +#define OFFSET(sym, str, mem) \ + DEFINE(sym, offsetof(struct str, mem)) + +#define COMMENT(x) \ + asm volatile("\n.ascii \"->#" x "\"") + +#endif diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h new file mode 100644 index 0000000..921f77d --- /dev/null +++ b/include/linux/kconfig.h @@ -0,0 +1,109 @@ +#ifndef __LINUX_KCONFIG_H +#define __LINUX_KCONFIG_H + +//#include + +/* + * Helper macros to use CONFIG_ options in C/CPP expressions. Note that + * these only work with boolean and tristate options. + */ + +/* + * Getting something that works in C and CPP for an arg that may or may + * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1" + * we match on the placeholder define, insert the "0," for arg1 and generate + * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one). + * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when + * the last step cherry picks the 2nd arg, we get a zero. + */ +#define __ARG_PLACEHOLDER_1 0, +#define config_enabled(cfg) _config_enabled(cfg) +#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) +#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) +#define ___config_enabled(__ignored, val, ...) val + +/* + * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', + * 0 otherwise. + * + */ +#define IS_ENABLED(option) \ + (config_enabled(option) || config_enabled(option##_MODULE)) + +/* + * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 + * otherwise. For boolean options, this is equivalent to + * IS_ENABLED(CONFIG_FOO). + */ +#define IS_BUILTIN(option) config_enabled(option) + +/* + * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 + * otherwise. + */ +#define IS_MODULE(option) config_enabled(option##_MODULE) + +/* + * U-Boot add-on: Helper macros to reference to different macros + * (CONFIG_ or CONFIG_SPL_ prefixed), depending on the build context. + */ +#ifdef CONFIG_SPL_BUILD +#define _IS_SPL 1 +#endif + +#ifdef CONFIG_TPL_BUILD +#define _IS_TPL 1 +#endif + +#if defined(CONFIG_TPL_BUILD) +#define config_val(cfg) _config_val(_IS_TPL, cfg) +#define _config_val(x, cfg) __config_val(x, cfg) +#define __config_val(x, cfg) ___config_val(__ARG_PLACEHOLDER_##x, cfg) +#define ___config_val(arg1_or_junk, cfg) \ + ____config_val(arg1_or_junk CONFIG_TPL_##cfg, CONFIG_##cfg) +#define ____config_val(__ignored, val, ...) val +#else +#define config_val(cfg) _config_val(_IS_SPL, cfg) +#define _config_val(x, cfg) __config_val(x, cfg) +#define __config_val(x, cfg) ___config_val(__ARG_PLACEHOLDER_##x, cfg) +#define ___config_val(arg1_or_junk, cfg) \ + ____config_val(arg1_or_junk CONFIG_SPL_##cfg, CONFIG_##cfg) +#define ____config_val(__ignored, val, ...) val +#endif + +/* + * CONFIG_VAL(FOO) evaluates to the value of + * CONFIG_FOO if CONFIG_SPL_BUILD is undefined, + * CONFIG_SPL_FOO if CONFIG_SPL_BUILD is defined. + * CONFIG_TPL_FOO if CONFIG_TPL_BUILD is defined. + */ +#define CONFIG_VAL(option) config_val(option) + +/* + * CONFIG_IS_ENABLED(FOO) evaluates to + * 1 if CONFIG_SPL_BUILD is undefined and CONFIG_FOO is set to 'y' or 'm', + * 1 if CONFIG_SPL_BUILD is defined and CONFIG_SPL_FOO is set to 'y' or 'm', + * 1 if CONFIG_TPL_BUILD is defined and CONFIG_TPL_FOO is set to 'y' or 'm', + * 0 otherwise. + */ +#define CONFIG_IS_ENABLED(option) \ + (config_enabled(CONFIG_VAL(option)) || \ + config_enabled(CONFIG_VAL(option##_MODULE))) + +/* + * CONFIG_IS_BUILTIN(FOO) evaluates to + * 1 if CONFIG_SPL_BUILD is undefined and CONFIG_FOO is set to 'y', + * 1 if CONFIG_SPL_BUILD is defined and CONFIG_SPL_FOO is set to 'y', + * 0 otherwise. + */ +#define CONFIG_IS_BUILTIN(option) config_enabled(CONFIG_VAL(option)) + +/* + * CONFIG_IS_MODULE(FOO) evaluates to + * 1 if CONFIG_SPL_BUILD is undefined and CONFIG_FOO is set to 'm', + * 1 if CONFIG_SPL_BUILD is defined and CONFIG_SPL_FOO is set to 'm', + * 0 otherwise. + */ +#define CONFIG_IS_MODULE(option) config_enabled(CONFIG_VAL(option##_MODULE)) + +#endif /* __LINUX_KCONFIG_H */ diff --git a/include/linux/kernel.h b/include/linux/kernel.h new file mode 100644 index 0000000..a85c15d --- /dev/null +++ b/include/linux/kernel.h @@ -0,0 +1,269 @@ +#ifndef _LINUX_KERNEL_H +#define _LINUX_KERNEL_H + + +#include + +#define USHRT_MAX ((u16)(~0U)) +#define SHRT_MAX ((s16)(USHRT_MAX>>1)) +#define SHRT_MIN ((s16)(-SHRT_MAX - 1)) +#define INT_MAX ((int)(~0U>>1)) +#define INT_MIN (-INT_MAX - 1) +#define UINT_MAX (~0U) +#define LONG_MAX ((long)(~0UL>>1)) +#define LONG_MIN (-LONG_MAX - 1) +#define ULONG_MAX (~0UL) +#define LLONG_MAX ((long long)(~0ULL>>1)) +#define LLONG_MIN (-LLONG_MAX - 1) +#define ULLONG_MAX (~0ULL) +#ifndef SIZE_MAX +#define SIZE_MAX (~(size_t)0) +#endif + +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX>>1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX>>1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX>>1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX>>1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) + +/* Aliases defined by stdint.h */ +#define UINT32_MAX U32_MAX +#define UINT64_MAX U64_MAX + +#define STACK_MAGIC 0xdeadbeef + +#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) + +#define ALIGN(x,a) __ALIGN_MASK((x),(typeof(x))(a)-1) +#define ALIGN_DOWN(x, a) ALIGN((x) - ((a) - 1), (a)) +#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) +#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) +#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0) + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +/* + * This looks more complex than it should be. But we need to + * get the type for the ~ right in round_down (it needs to be + * as wide as the result!), and we want to evaluate the macro + * arguments just once each. + */ +#define __round_mask(x, y) ((__typeof__(x))((y)-1)) +#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) +#define round_down(x, y) ((x) & ~__round_mask(x, y)) + +#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) +#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) + +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) + +#define DIV_ROUND_UP_ULL(ll, d) DIV_ROUND_DOWN_ULL((ll) + (d) - 1, (d)) + +#if BITS_PER_LONG == 32 +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP_ULL(ll, d) +#else +# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d) +#endif + +/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */ +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#define rounddown(x, y) ( \ +{ \ + typeof(x) __x = (x); \ + __x - (__x % (y)); \ +} \ +) + +/* + * Divide positive or negative dividend by positive divisor and round + * to closest integer. Result is undefined for negative divisors and + * for negative dividends if the divisor variable type is unsigned. + */ +#define DIV_ROUND_CLOSEST(x, divisor)( \ +{ \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) > 0 || \ + ((typeof(divisor))-1) > 0 || (__x) > 0) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ +} \ +) +/* + * Same as above but for u64 dividends. divisor must be a 32-bit + * number. + */ +#define DIV_ROUND_CLOSEST_ULL(x, divisor)( \ +{ \ + typeof(divisor) __d = divisor; \ + unsigned long long _tmp = (x) + (__d) / 2; \ + do_div(_tmp, __d); \ + _tmp; \ +} \ +) + +/* + * Multiplies an integer by a fraction, while avoiding unnecessary + * overflow or loss of precision. + */ +#define mult_frac(x, numer, denom)( \ +{ \ + typeof(x) quot = (x) / (denom); \ + typeof(x) rem = (x) % (denom); \ + (quot * (numer)) + ((rem * (numer)) / (denom)); \ +} \ +) + +/** + * upper_32_bits - return bits 32-63 of a number + * @n: the number we're accessing + * + * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress + * the "right shift count >= width of type" warning when that quantity is + * 32-bits. + */ +#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) + +/** + * lower_32_bits - return bits 0-31 of a number + * @n: the number we're accessing + */ +#define lower_32_bits(n) ((u32)(n)) + +/* + * abs() handles unsigned and signed longs, ints, shorts and chars. For all + * input types abs() returns a signed long. + * abs() should not be used for 64-bit types (s64, u64, long long) - use abs64() + * for those. + */ +#define abs(x) ({ \ + long ret; \ + if (sizeof(x) == sizeof(long)) { \ + long __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } else { \ + int __x = (x); \ + ret = (__x < 0) ? -__x : __x; \ + } \ + ret; \ + }) + +#define abs64(x) ({ \ + s64 __x = (x); \ + (__x < 0) ? -__x : __x; \ + }) + +/* + * min()/max()/clamp() macros that also do + * strict type-checking.. See the + * "unnecessary" pointer comparison. + */ +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +#define max(x, y) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2; }) + +#define min3(x, y, z) min((typeof(x))min(x, y), z) +#define max3(x, y, z) max((typeof(x))max(x, y), z) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of lo/hi to make sure they are of the + * same type as val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ +#define min_t(type, x, y) ({ \ + type __min1 = (x); \ + type __min2 = (y); \ + __min1 < __min2 ? __min1: __min2; }) + +#define max_t(type, x, y) ({ \ + type __max1 = (x); \ + type __max2 = (y); \ + __max1 > __max2 ? __max1: __max2; }) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * 'type' to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument 'val' is. This is useful when val is an unsigned + * type and min and max are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + + +/* + * swap - swap value of @a and @b + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + const typeof( ((type *)0)->member ) *__mptr = (ptr); \ + (type *)( (char *)__mptr - offsetof(type,member) );}) + +#endif diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h new file mode 100644 index 0000000..eeb2344 --- /dev/null +++ b/include/linux/libfdt.h @@ -0,0 +1,312 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INCLUDE_LIBFDT_H_ +#define _INCLUDE_LIBFDT_H_ + +#ifndef USE_HOSTCC +#include +#endif +#include "../../scripts/dtc/libfdt/libfdt.h" + +/* U-Boot local hacks */ + +#ifndef SWIG /* Not available in Python */ +struct fdt_region { + int offset; + int size; +}; + +/* + * Flags for fdt_find_regions() + * + * Add a region for the string table (always the last region) + */ +#define FDT_REG_ADD_STRING_TAB (1 << 0) + +/* + * Add all supernodes of a matching node/property, useful for creating a + * valid subset tree + */ +#define FDT_REG_SUPERNODES (1 << 1) + +/* Add the FDT_BEGIN_NODE tags of subnodes, including their names */ +#define FDT_REG_DIRECT_SUBNODES (1 << 2) + +/* Add all subnodes of a matching node */ +#define FDT_REG_ALL_SUBNODES (1 << 3) + +/* Add a region for the mem_rsvmap table (always the first region) */ +#define FDT_REG_ADD_MEM_RSVMAP (1 << 4) + +/* Indicates what an fdt part is (node, property, value) */ +#define FDT_IS_NODE (1 << 0) +#define FDT_IS_PROP (1 << 1) +#define FDT_IS_VALUE (1 << 2) /* not supported */ +#define FDT_IS_COMPAT (1 << 3) /* used internally */ +#define FDT_NODE_HAS_PROP (1 << 4) /* node contains prop */ + +#define FDT_ANY_GLOBAL (FDT_IS_NODE | FDT_IS_PROP | FDT_IS_VALUE | \ + FDT_IS_COMPAT) +#define FDT_IS_ANY 0x1f /* all the above */ + +/* We set a reasonable limit on the number of nested nodes */ +#define FDT_MAX_DEPTH 32 + +/* Decribes what we want to include from the current tag */ +enum want_t { + WANT_NOTHING, + WANT_NODES_ONLY, /* No properties */ + WANT_NODES_AND_PROPS, /* Everything for one level */ + WANT_ALL_NODES_AND_PROPS /* Everything for all levels */ +}; + +/* Keeps track of the state at parent nodes */ +struct fdt_subnode_stack { + int offset; /* Offset of node */ + enum want_t want; /* The 'want' value here */ + int included; /* 1 if we included this node, 0 if not */ +}; + +struct fdt_region_ptrs { + int depth; /* Current tree depth */ + int done; /* What we have completed scanning */ + enum want_t want; /* What we are currently including */ + char *end; /* Pointer to end of full node path */ + int nextoffset; /* Next node offset to check */ +}; + +/* The state of our finding algortihm */ +struct fdt_region_state { + struct fdt_subnode_stack stack[FDT_MAX_DEPTH]; /* node stack */ + struct fdt_region *region; /* Contains list of regions found */ + int count; /* Numnber of regions found */ + const void *fdt; /* FDT blob */ + int max_regions; /* Maximum regions to find */ + int can_merge; /* 1 if we can merge with previous region */ + int start; /* Start position of current region */ + struct fdt_region_ptrs ptrs; /* Pointers for what we are up to */ +}; + +/** + * fdt_find_regions() - find regions in device tree + * + * Given a list of nodes to include and properties to exclude, find + * the regions of the device tree which describe those included parts. + * + * The intent is to get a list of regions which will be invariant provided + * those parts are invariant. For example, if you request a list of regions + * for all nodes but exclude the property "data", then you will get the + * same region contents regardless of any change to "data" properties. + * + * This function can be used to produce a byte-stream to send to a hashing + * function to verify that critical parts of the FDT have not changed. + * + * Nodes which are given in 'inc' are included in the region list, as + * are the names of the immediate subnodes nodes (but not the properties + * or subnodes of those subnodes). + * + * For eaxample "/" means to include the root node, all root properties + * and the FDT_BEGIN_NODE and FDT_END_NODE of all subnodes of /. The latter + * ensures that we capture the names of the subnodes. In a hashing situation + * it prevents the root node from changing at all Any change to non-excluded + * properties, names of subnodes or number of subnodes would be detected. + * + * When used with FITs this provides the ability to hash and sign parts of + * the FIT based on different configurations in the FIT. Then it is + * impossible to change anything about that configuration (include images + * attached to the configuration), but it may be possible to add new + * configurations, new images or new signatures within the existing + * framework. + * + * Adding new properties to a device tree may result in the string table + * being extended (if the new property names are different from those + * already added). This function can optionally include a region for + * the string table so that this can be part of the hash too. + * + * The device tree header is not included in the list. + * + * @fdt: Device tree to check + * @inc: List of node paths to included + * @inc_count: Number of node paths in list + * @exc_prop: List of properties names to exclude + * @exc_prop_count: Number of properties in exclude list + * @region: Returns list of regions + * @max_region: Maximum length of region list + * @path: Pointer to a temporary string for the function to use for + * building path names + * @path_len: Length of path, must be large enough to hold the longest + * path in the tree + * @add_string_tab: 1 to add a region for the string table + * @return number of regions in list. If this is >max_regions then the + * region array was exhausted. You should increase max_regions and try + * the call again. + */ +int fdt_find_regions(const void *fdt, char * const inc[], int inc_count, + char * const exc_prop[], int exc_prop_count, + struct fdt_region region[], int max_regions, + char *path, int path_len, int add_string_tab); + +/** + * fdt_first_region() - find regions in device tree + * + * Given a nodes and properties to include and properties to exclude, find + * the regions of the device tree which describe those included parts. + * + * The use for this function is twofold. Firstly it provides a convenient + * way of performing a structure-aware grep of the tree. For example it is + * possible to grep for a node and get all the properties associated with + * that node. Trees can be subsetted easily, by specifying the nodes that + * are required, and then writing out the regions returned by this function. + * This is useful for small resource-constrained systems, such as boot + * loaders, which want to use an FDT but do not need to know about all of + * it. + * + * Secondly it makes it easy to hash parts of the tree and detect changes. + * The intent is to get a list of regions which will be invariant provided + * those parts are invariant. For example, if you request a list of regions + * for all nodes but exclude the property "data", then you will get the + * same region contents regardless of any change to "data" properties. + * + * This function can be used to produce a byte-stream to send to a hashing + * function to verify that critical parts of the FDT have not changed. + * Note that semantically null changes in order could still cause false + * hash misses. Such reordering might happen if the tree is regenerated + * from source, and nodes are reordered (the bytes-stream will be emitted + * in a different order and many hash functions will detect this). However + * if an existing tree is modified using libfdt functions, such as + * fdt_add_subnode() and fdt_setprop(), then this problem is avoided. + * + * The nodes/properties to include/exclude are defined by a function + * provided by the caller. This function is called for each node and + * property, and must return: + * + * 0 - to exclude this part + * 1 - to include this part + * -1 - for FDT_IS_PROP only: no information is available, so include + * if its containing node is included + * + * The last case is only used to deal with properties. Often a property is + * included if its containing node is included - this is the case where + * -1 is returned.. However if the property is specifically required to be + * included/excluded, then 0 or 1 can be returned. Note that including a + * property when the FDT_REG_SUPERNODES flag is given will force its + * containing node to be included since it is not valid to have a property + * that is not in a node. + * + * Using the information provided, the inclusion of a node can be controlled + * either by a node name or its compatible string, or any other property + * that the function can determine. + * + * As an example, including node "/" means to include the root node and all + * root properties. A flag provides a way of also including supernodes (of + * which there is none for the root node), and another flag includes + * immediate subnodes, so in this case we would get the FDT_BEGIN_NODE and + * FDT_END_NODE of all subnodes of /. + * + * The subnode feature helps in a hashing situation since it prevents the + * root node from changing at all. Any change to non-excluded properties, + * names of subnodes or number of subnodes would be detected. + * + * When used with FITs this provides the ability to hash and sign parts of + * the FIT based on different configurations in the FIT. Then it is + * impossible to change anything about that configuration (include images + * attached to the configuration), but it may be possible to add new + * configurations, new images or new signatures within the existing + * framework. + * + * Adding new properties to a device tree may result in the string table + * being extended (if the new property names are different from those + * already added). This function can optionally include a region for + * the string table so that this can be part of the hash too. This is always + * the last region. + * + * The FDT also has a mem_rsvmap table which can also be included, and is + * always the first region if so. + * + * The device tree header is not included in the region list. Since the + * contents of the FDT are changing (shrinking, often), the caller will need + * to regenerate the header anyway. + * + * @fdt: Device tree to check + * @h_include: Function to call to determine whether to include a part or + * not: + * + * @priv: Private pointer as passed to fdt_find_regions() + * @fdt: Pointer to FDT blob + * @offset: Offset of this node / property + * @type: Type of this part, FDT_IS_... + * @data: Pointer to data (node name, property name, compatible + * string, value (not yet supported) + * @size: Size of data, or 0 if none + * @return 0 to exclude, 1 to include, -1 if no information is + * available + * @priv: Private pointer passed to h_include + * @region: Returns list of regions, sorted by offset + * @max_regions: Maximum length of region list + * @path: Pointer to a temporary string for the function to use for + * building path names + * @path_len: Length of path, must be large enough to hold the longest + * path in the tree + * @flags: Various flags that control the region algortihm, see + * FDT_REG_... + * @return number of regions in list. If this is >max_regions then the + * region array was exhausted. You should increase max_regions and try + * the call again. Only the first max_regions elements are available in the + * array. + * + * On error a -ve value is return, which can be: + * + * -FDT_ERR_BADSTRUCTURE (too deep or more END tags than BEGIN tags + * -FDT_ERR_BADLAYOUT + * -FDT_ERR_NOSPACE (path area is too small) + */ +int fdt_first_region(const void *fdt, + int (*h_include)(void *priv, const void *fdt, int offset, + int type, const char *data, int size), + void *priv, struct fdt_region *region, + char *path, int path_len, int flags, + struct fdt_region_state *info); + +/** fdt_next_region() - find next region + * + * See fdt_first_region() for full description. This function finds the + * next region according to the provided parameters, which must be the same + * as passed to fdt_first_region(). + * + * This function can additionally return -FDT_ERR_NOTFOUND when there are no + * more regions + */ +int fdt_next_region(const void *fdt, + int (*h_include)(void *priv, const void *fdt, int offset, + int type, const char *data, int size), + void *priv, struct fdt_region *region, + char *path, int path_len, int flags, + struct fdt_region_state *info); + +/** + * fdt_add_alias_regions() - find aliases that point to existing regions + * + * Once a device tree grep is complete some of the nodes will be present + * and some will have been dropped. This function checks all the alias nodes + * to figure out which points point to nodes which are still present. These + * aliases need to be kept, along with the nodes they reference. + * + * Given a list of regions function finds the aliases that still apply and + * adds more regions to the list for these. This function is called after + * fdt_next_region() has finished returning regions and requires the same + * state. + * + * @fdt: Device tree file to reference + * @region: List of regions that will be kept + * @count: Number of regions + * @max_regions: Number of entries that can fit in @region + * @info: Region state as returned from fdt_next_region() + * @return new number of regions in @region (i.e. count + the number added) + * or -FDT_ERR_NOSPACE if there was not enough space. + */ +int fdt_add_alias_regions(const void *fdt, struct fdt_region *region, int count, + int max_regions, struct fdt_region_state *info); +#endif /* SWIG */ + +extern struct fdt_header *working_fdt; /* Pointer to the working fdt */ + +#endif /* _INCLUDE_LIBFDT_H_ */ diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h new file mode 100644 index 0000000..e2bf79c --- /dev/null +++ b/include/linux/libfdt_env.h @@ -0,0 +1,31 @@ +#ifdef USE_HOSTCC +#include "../scripts/dtc/libfdt/libfdt_env.h" +#else +/* + * This position of the include guard is intentional. + * Using the same guard name as that of scripts/dtc/libfdt/libfdt_env.h + * prevents it from being included. + */ +#ifndef LIBFDT_ENV_H +#define LIBFDT_ENV_H + +#include + +#include + +typedef __be16 fdt16_t; +typedef __be32 fdt32_t; +typedef __be64 fdt64_t; + +#define fdt32_to_cpu(x) be32_to_cpu(x) +#define cpu_to_fdt32(x) cpu_to_be32(x) +#define fdt64_to_cpu(x) be64_to_cpu(x) +#define cpu_to_fdt64(x) cpu_to_be64(x) + +/* U-Boot: for strtoul in fdt_overlay.c */ +#include + +#define strtoul(cp, endp, base) simple_strtoul(cp, endp, base) + +#endif /* LIBFDT_ENV_H */ +#endif diff --git a/include/linux/linkage.h b/include/linux/linkage.h new file mode 100644 index 0000000..0b24111 --- /dev/null +++ b/include/linux/linkage.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * U-Boot - linkage.h + * + * Copyright (c) 2005-2007 Analog Devices Inc. + */ + +#ifndef _LINUX_LINKAGE_H +#define _LINUX_LINKAGE_H + +#include + +/* Some toolchains use other characters (e.g. '`') to mark new line in macro */ +#ifndef ASM_NL +#define ASM_NL ; +#endif + +#ifdef __cplusplus +#define CPP_ASMLINKAGE extern "C" +#else +#define CPP_ASMLINKAGE +#endif + +#ifndef asmlinkage +#define asmlinkage CPP_ASMLINKAGE +#endif + +#define SYMBOL_NAME_STR(X) #X +#define SYMBOL_NAME(X) X +#ifdef __STDC__ +#define SYMBOL_NAME_LABEL(X) X##: +#else +#define SYMBOL_NAME_LABEL(X) X: +#endif + +#ifndef __ALIGN +#define __ALIGN .align 4 +#endif + +#ifndef __ALIGN_STR +#define __ALIGN_STR ".align 4" +#endif + +#ifdef __ASSEMBLY__ + +#define ALIGN __ALIGN +#define ALIGN_STR __ALIGN_STR + +#define LENTRY(name) \ + ALIGN ASM_NL \ + SYMBOL_NAME_LABEL(name) + +#define ENTRY(name) \ + .globl SYMBOL_NAME(name) ASM_NL \ + LENTRY(name) + +#define WEAK(name) \ + .weak SYMBOL_NAME(name) ASM_NL \ + LENTRY(name) + +#ifndef END +#define END(name) \ + .size name, .-name +#endif + +#ifndef ENDPROC +#define ENDPROC(name) \ + .type name STT_FUNC ASM_NL \ + END(name) +#endif + +#endif + +#endif diff --git a/include/linux/linux_string.h b/include/linux/linux_string.h new file mode 100644 index 0000000..192b4c9 --- /dev/null +++ b/include/linux/linux_string.h @@ -0,0 +1,8 @@ +#ifndef _LINUX_LINUX_STRING_H_ +#define _LINUX_LINUX_STRING_H_ + +extern char * skip_spaces(const char *); + +extern char *strim(char *); + +#endif diff --git a/include/linux/list.h b/include/linux/list.h new file mode 100644 index 0000000..5b8d1df --- /dev/null +++ b/include/linux/list.h @@ -0,0 +1,685 @@ +#ifndef _LINUX_LIST_H +#define _LINUX_LIST_H + +#include +#include + +#ifndef ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCH +static inline void prefetch(const void *x) {;} +#endif + +/* + * Simple doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + list->next = list; + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head *prev, struct list_head *next) +{ + next->prev = prev; + prev->next = next; +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del(list->prev, list->next); + list_add_tail(list, head); +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + +/** + * list_empty_careful - tests whether a list is empty and not being modified + * @head: the list to test + * + * Description: + * tests whether a list is empty _and_ checks that no other CPU might be + * in the process of modifying either member (next or prev) + * + * NOTE: using list_empty_careful() without synchronization + * can only be safe if the only activity that can happen + * to the list entry is list_del_init(). Eg. it cannot be used + * if another CPU could re-list_add() it. + */ +static inline int list_empty_careful(const struct list_head *head) +{ + struct list_head *next = head->next; + return (next == head) && (next == head->prev); +} + +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + +static inline void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * list_splice - join two lists, this is designed for stacks + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(const struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_last_entry - get the last element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; prefetch(pos->next), pos != (head); \ + pos = pos->next) + +/** + * __list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * This variant differs from list_for_each() in that it's the + * simplest possible list iteration code, no prefetching is done. + * Use this for code that knows the list to be very short (empty + * or 1 entry) most of the time. + */ +#define __list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ + pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_prev_safe(pos, n, head) \ + for (pos = (head)->prev, n = pos->prev; \ + prefetch(pos->prev), pos != (head); \ + pos = n, n = pos->prev) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_struct within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, typeof(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member); \ + prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_continue_reverse - iterate backwards from the given point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Start to iterate over list of given type backwards, continuing after + * the current position. + */ +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ + prefetch(pos->member.prev), &pos->member != (head); \ + pos = list_entry(pos->member.prev, typeof(*pos), member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; prefetch(pos->member.next), &pos->member != (head); \ + pos = list_entry(pos->member.next, typeof(*pos), member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_entry((head)->next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_continue + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_entry(pos->member.next, typeof(*pos), member), \ + n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_from + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_entry(pos->member.next, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, typeof(*n), member)) + +/** + * list_for_each_entry_safe_reverse + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_struct within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_entry((head)->prev, typeof(*pos), member), \ + n = list_entry(pos->member.prev, typeof(*pos), member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.prev, typeof(*n), member)) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +static inline int hlist_empty(const struct hlist_head *h) +{ + return !h->first; +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + *pprev = next; + if (next) + next->pprev = pprev; +} + +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + n->next = first; + if (first) + first->pprev = &n->next; + h->first = n; + n->pprev = &h->first; +} + +/* next must be != NULL */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; + *(n->pprev) = n; +} + +static inline void hlist_add_after(struct hlist_node *n, + struct hlist_node *next) +{ + next->next = n->next; + n->next = next; + next->pprev = &n->next; + + if(next->next) + next->next->pprev = &next->next; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ + pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +/** + * hlist_for_each_entry - iterate over list of given type + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(tpos, pos, head, member) \ + for (pos = (head)->first; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(tpos, pos, member) \ + for (pos = (pos)->next; \ + pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(tpos, pos, member) \ + for (; pos && ({ prefetch(pos->next); 1;}) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = pos->next) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @tpos: the type * to use as a loop cursor. + * @pos: the &struct hlist_node to use as a loop cursor. + * @n: another &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ + for (pos = (head)->first; \ + pos && ({ n = pos->next; 1; }) && \ + ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ + pos = n) + +#endif diff --git a/include/linux/list_sort.h b/include/linux/list_sort.h new file mode 100644 index 0000000..1a2df2e --- /dev/null +++ b/include/linux/list_sort.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_LIST_SORT_H +#define _LINUX_LIST_SORT_H + +#include + +struct list_head; + +void list_sort(void *priv, struct list_head *head, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)); +#endif diff --git a/include/linux/log2.h b/include/linux/log2.h new file mode 100644 index 0000000..d4e32ec --- /dev/null +++ b/include/linux/log2.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Integer base 2 logarithm calculation + * + * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_LOG2_H +#define _LINUX_LOG2_H + +#include +#include + +/* + * non-constant log of base 2 calculators + * - the arch may override these in asm/bitops.h if they can be implemented + * more efficiently than using fls() and fls64() + * - the arch is not required to handle n==0 if implementing the fallback + */ +#ifndef CONFIG_ARCH_HAS_ILOG2_U32 +static inline __attribute__((const)) +int __ilog2_u32(u32 n) +{ + return fls(n) - 1; +} +#endif + +#ifndef CONFIG_ARCH_HAS_ILOG2_U64 +static inline __attribute__((const)) +int __ilog2_u64(u64 n) +{ + return fls64(n) - 1; +} +#endif + +/** + * is_power_of_2() - check if a value is a power of two + * @n: the value to check + * + * Determine whether some value is a power of two, where zero is + * *not* considered a power of two. + * Return: true if @n is a power of 2, otherwise false. + */ +static inline __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + +/** + * __roundup_pow_of_two() - round up to nearest power of two + * @n: value to round up + */ +static inline __attribute__((const)) +unsigned long __roundup_pow_of_two(unsigned long n) +{ + return 1UL << fls_long(n - 1); +} + +/** + * __rounddown_pow_of_two() - round down to nearest power of two + * @n: value to round down + */ +static inline __attribute__((const)) +unsigned long __rounddown_pow_of_two(unsigned long n) +{ + return 1UL << (fls_long(n) - 1); +} + +/** + * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value + * @n: parameter + * + * constant-capable log of base 2 calculation + * - this can be used to initialise global variables from constant data, hence + * the massive ternary operator construction + * + * selects the appropriately-sized optimised version depending on sizeof(n) + */ +#define ilog2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (n) < 2 ? 0 : \ + (n) & (1ULL << 63) ? 63 : \ + (n) & (1ULL << 62) ? 62 : \ + (n) & (1ULL << 61) ? 61 : \ + (n) & (1ULL << 60) ? 60 : \ + (n) & (1ULL << 59) ? 59 : \ + (n) & (1ULL << 58) ? 58 : \ + (n) & (1ULL << 57) ? 57 : \ + (n) & (1ULL << 56) ? 56 : \ + (n) & (1ULL << 55) ? 55 : \ + (n) & (1ULL << 54) ? 54 : \ + (n) & (1ULL << 53) ? 53 : \ + (n) & (1ULL << 52) ? 52 : \ + (n) & (1ULL << 51) ? 51 : \ + (n) & (1ULL << 50) ? 50 : \ + (n) & (1ULL << 49) ? 49 : \ + (n) & (1ULL << 48) ? 48 : \ + (n) & (1ULL << 47) ? 47 : \ + (n) & (1ULL << 46) ? 46 : \ + (n) & (1ULL << 45) ? 45 : \ + (n) & (1ULL << 44) ? 44 : \ + (n) & (1ULL << 43) ? 43 : \ + (n) & (1ULL << 42) ? 42 : \ + (n) & (1ULL << 41) ? 41 : \ + (n) & (1ULL << 40) ? 40 : \ + (n) & (1ULL << 39) ? 39 : \ + (n) & (1ULL << 38) ? 38 : \ + (n) & (1ULL << 37) ? 37 : \ + (n) & (1ULL << 36) ? 36 : \ + (n) & (1ULL << 35) ? 35 : \ + (n) & (1ULL << 34) ? 34 : \ + (n) & (1ULL << 33) ? 33 : \ + (n) & (1ULL << 32) ? 32 : \ + (n) & (1ULL << 31) ? 31 : \ + (n) & (1ULL << 30) ? 30 : \ + (n) & (1ULL << 29) ? 29 : \ + (n) & (1ULL << 28) ? 28 : \ + (n) & (1ULL << 27) ? 27 : \ + (n) & (1ULL << 26) ? 26 : \ + (n) & (1ULL << 25) ? 25 : \ + (n) & (1ULL << 24) ? 24 : \ + (n) & (1ULL << 23) ? 23 : \ + (n) & (1ULL << 22) ? 22 : \ + (n) & (1ULL << 21) ? 21 : \ + (n) & (1ULL << 20) ? 20 : \ + (n) & (1ULL << 19) ? 19 : \ + (n) & (1ULL << 18) ? 18 : \ + (n) & (1ULL << 17) ? 17 : \ + (n) & (1ULL << 16) ? 16 : \ + (n) & (1ULL << 15) ? 15 : \ + (n) & (1ULL << 14) ? 14 : \ + (n) & (1ULL << 13) ? 13 : \ + (n) & (1ULL << 12) ? 12 : \ + (n) & (1ULL << 11) ? 11 : \ + (n) & (1ULL << 10) ? 10 : \ + (n) & (1ULL << 9) ? 9 : \ + (n) & (1ULL << 8) ? 8 : \ + (n) & (1ULL << 7) ? 7 : \ + (n) & (1ULL << 6) ? 6 : \ + (n) & (1ULL << 5) ? 5 : \ + (n) & (1ULL << 4) ? 4 : \ + (n) & (1ULL << 3) ? 3 : \ + (n) & (1ULL << 2) ? 2 : \ + 1) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ + ) + +/** + * roundup_pow_of_two - round the given value up to nearest power of two + * @n: parameter + * + * round the given value up to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define roundup_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (n == 1) ? 1 : \ + (1UL << (ilog2((n) - 1) + 1)) \ + ) : \ + __roundup_pow_of_two(n) \ + ) + +/** + * rounddown_pow_of_two - round the given value down to nearest power of two + * @n: parameter + * + * round the given value down to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define rounddown_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (1UL << ilog2(n))) : \ + __rounddown_pow_of_two(n) \ + ) + +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} + +/** + * order_base_2 - calculate the (rounded up) base 2 order of the argument + * @n: parameter + * + * The first few values calculated by this routine: + * ob2(0) = 0 + * ob2(1) = 0 + * ob2(2) = 1 + * ob2(3) = 2 + * ob2(4) = 2 + * ob2(5) = 3 + * ... and so on. + */ +#define order_base_2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) ? 0 : \ + ilog2((n) - 1) + 1) : \ + __order_base_2(n) \ +) +#endif /* _LINUX_LOG2_H */ diff --git a/include/linux/lzo.h b/include/linux/lzo.h new file mode 100644 index 0000000..8981d04 --- /dev/null +++ b/include/linux/lzo.h @@ -0,0 +1,51 @@ +#ifndef __LZO_H__ +#define __LZO_H__ +/* + * LZO Public Kernel Interface + * A mini subset of the LZO real-time data compression library + * + * Copyright (C) 1996-2005 Markus F.X.J. Oberhumer + * + * The full LZO package can be found at: + * http://www.oberhumer.com/opensource/lzo/ + * + * Changed for kernel use by: + * Nitin Gupta + * Richard Purdie + */ + +#define LZO1X_MEM_COMPRESS (16384 * sizeof(unsigned char *)) +#define LZO1X_1_MEM_COMPRESS LZO1X_MEM_COMPRESS + +#define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) + +/* This requires 'workmem' of size LZO1X_1_MEM_COMPRESS */ +int lzo1x_1_compress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len, void *wrkmem); + +/* safe decompression with overrun testing */ +int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len); + +/* decompress lzop format */ +int lzop_decompress(const unsigned char *src, size_t src_len, + unsigned char *dst, size_t *dst_len); + +/* check if the header is valid (based on magic numbers) */ +bool lzop_is_valid_header(const unsigned char *src); + +/* + * Return values (< 0 = Error) + */ +#define LZO_E_OK 0 +#define LZO_E_ERROR (-1) +#define LZO_E_OUT_OF_MEMORY (-2) +#define LZO_E_NOT_COMPRESSIBLE (-3) +#define LZO_E_INPUT_OVERRUN (-4) +#define LZO_E_OUTPUT_OVERRUN (-5) +#define LZO_E_LOOKBEHIND_OVERRUN (-6) +#define LZO_E_EOF_NOT_FOUND (-7) +#define LZO_E_INPUT_NOT_CONSUMED (-8) +#define LZO_E_NOT_YET_IMPLEMENTED (-9) + +#endif diff --git a/include/linux/math64.h b/include/linux/math64.h new file mode 100644 index 0000000..08584c8 --- /dev/null +++ b/include/linux/math64.h @@ -0,0 +1,257 @@ +#ifndef _LINUX_MATH64_H +#define _LINUX_MATH64_H + +#include +#include +#include + +#if BITS_PER_LONG == 64 + +#define div64_long(x, y) div64_s64((x), (y)) +#define div64_ul(x, y) div64_u64((x), (y)) + +/** + * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder + * + * This is commonly provided by 32bit archs to provide an optimized 64bit + * divide. + */ +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div_s64_rem - signed 64bit divide with 32bit divisor with remainder + */ +static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder + */ +static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder) +{ + *remainder = dividend % divisor; + return dividend / divisor; +} + +/** + * div64_u64 - unsigned 64bit divide with 64bit divisor + */ +static inline u64 div64_u64(u64 dividend, u64 divisor) +{ + return dividend / divisor; +} + +/** + * div64_s64 - signed 64bit divide with 64bit divisor + */ +static inline s64 div64_s64(s64 dividend, s64 divisor) +{ + return dividend / divisor; +} + +#elif BITS_PER_LONG == 32 + +#define div64_long(x, y) div_s64((x), (y)) +#define div64_ul(x, y) div_u64((x), (y)) + +#ifndef div_u64_rem +static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) +{ + *remainder = do_div(dividend, divisor); + return dividend; +} +#endif + +#ifndef div_s64_rem +extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); +#endif + +#ifndef div64_u64_rem +extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder); +#endif + +#ifndef div64_u64 +extern u64 div64_u64(u64 dividend, u64 divisor); +#endif + +#ifndef div64_s64 +extern s64 div64_s64(s64 dividend, s64 divisor); +#endif + +#endif /* BITS_PER_LONG */ + +/** + * div_u64 - unsigned 64bit divide with 32bit divisor + * + * This is the most common 64bit divide and should be used if possible, + * as many 32bit archs can optimize this variant better than a full 64bit + * divide. + */ +#ifndef div_u64 +static inline u64 div_u64(u64 dividend, u32 divisor) +{ + u32 remainder; + return div_u64_rem(dividend, divisor, &remainder); +} +#endif + +/** + * div_s64 - signed 64bit divide with 32bit divisor + */ +#ifndef div_s64 +static inline s64 div_s64(s64 dividend, s32 divisor) +{ + s32 remainder; + return div_s64_rem(dividend, divisor, &remainder); +} +#endif + +u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); + +static __always_inline u32 +__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) +{ + u32 ret = 0; + + while (dividend >= divisor) { + /* The following asm() prevents the compiler from + optimising this loop into a modulo operation. */ + asm("" : "+rm"(dividend)); + + dividend -= divisor; + ret++; + } + + *remainder = dividend; + + return ret; +} + +#ifndef mul_u32_u32 +/* + * Many a GCC version messes this up and generates a 64x64 mult :-( + */ +static inline u64 mul_u32_u32(u32 a, u32 b) +{ + return (u64)a * b; +} +#endif + +#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__) + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u32_shr */ + +#ifndef mul_u64_u64_shr +static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift) +{ + return (u64)(((unsigned __int128)a * mul) >> shift); +} +#endif /* mul_u64_u64_shr */ + +#else + +#ifndef mul_u64_u32_shr +static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift) +{ + u32 ah, al; + u64 ret; + + al = a; + ah = a >> 32; + + ret = mul_u32_u32(al, mul) >> shift; + if (ah) + ret += mul_u32_u32(ah, mul) << (32 - shift); + + return ret; +} +#endif /* mul_u64_u32_shr */ + +#ifndef mul_u64_u64_shr +static inline u64 mul_u64_u64_shr(u64 a, u64 b, unsigned int shift) +{ + union { + u64 ll; + struct { +#ifdef __BIG_ENDIAN + u32 high, low; +#else + u32 low, high; +#endif + } l; + } rl, rm, rn, rh, a0, b0; + u64 c; + + a0.ll = a; + b0.ll = b; + + rl.ll = mul_u32_u32(a0.l.low, b0.l.low); + rm.ll = mul_u32_u32(a0.l.low, b0.l.high); + rn.ll = mul_u32_u32(a0.l.high, b0.l.low); + rh.ll = mul_u32_u32(a0.l.high, b0.l.high); + + /* + * Each of these lines computes a 64-bit intermediate result into "c", + * starting at bits 32-95. The low 32-bits go into the result of the + * multiplication, the high 32-bits are carried into the next step. + */ + rl.l.high = c = (u64)rl.l.high + rm.l.low + rn.l.low; + rh.l.low = c = (c >> 32) + rm.l.high + rn.l.high + rh.l.low; + rh.l.high = (c >> 32) + rh.l.high; + + /* + * The 128-bit result of the multiplication is in rl.ll and rh.ll, + * shift it right and throw away the high part of the result. + */ + if (shift == 0) + return rl.ll; + if (shift < 64) + return (rl.ll >> shift) | (rh.ll << (64 - shift)); + return rh.ll >> (shift & 63); +} +#endif /* mul_u64_u64_shr */ + +#endif + +#ifndef mul_u64_u32_div +static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) +{ + union { + u64 ll; + struct { +#ifdef __BIG_ENDIAN + u32 high, low; +#else + u32 low, high; +#endif + } l; + } u, rl, rh; + + u.ll = a; + rl.ll = mul_u32_u32(u.l.low, mul); + rh.ll = mul_u32_u32(u.l.high, mul) + rl.l.high; + + /* Bits 32-63 of the result will be in rh.l.low. */ + rl.l.high = do_div(rh.ll, divisor); + + /* Bits 0-31 of the result will be in rl.l.low. */ + do_div(rl.ll, divisor); + + rl.l.high = rh.l.low; + return rl.ll; +} +#endif /* mul_u64_u32_div */ + +#endif /* _LINUX_MATH64_H */ diff --git a/include/linux/mbus.h b/include/linux/mbus.h new file mode 100644 index 0000000..717cbea --- /dev/null +++ b/include/linux/mbus.h @@ -0,0 +1,73 @@ +/* + * Marvell MBUS common definitions. + * + * Copyright (C) 2008 Marvell Semiconductor + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#ifndef __LINUX_MBUS_H +#define __LINUX_MBUS_H + +struct resource; + +struct mbus_dram_target_info { + /* + * The 4-bit MBUS target ID of the DRAM controller. + */ + u8 mbus_dram_target_id; + + /* + * The base address, size, and MBUS attribute ID for each + * of the possible DRAM chip selects. Peripherals are + * required to support at least 4 decode windows. + */ + int num_cs; + struct mbus_dram_window { + u8 cs_index; + u8 mbus_attr; + u32 base; + u32 size; + } cs[4]; +}; + +struct mvebu_mbus_state { + void __iomem *mbuswins_base; + void __iomem *sdramwins_base; + struct dentry *debugfs_root; + struct dentry *debugfs_sdram; + struct dentry *debugfs_devs; + const struct mvebu_mbus_soc_data *soc; + int hw_io_coherency; +}; + +/* Flags for PCI/PCIe address decoding regions */ +#define MVEBU_MBUS_PCI_IO 0x1 +#define MVEBU_MBUS_PCI_MEM 0x2 +#define MVEBU_MBUS_PCI_WA 0x3 + +/* + * Magic value that explicits that we don't need a remapping-capable + * address decoding window. + */ +#define MVEBU_MBUS_NO_REMAP (0xffffffff) + +/* Maximum size of a mbus window name */ +#define MVEBU_MBUS_MAX_WINNAME_SZ 32 + +const struct mbus_dram_target_info *mvebu_mbus_dram_info(void); +void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); +void mvebu_mbus_get_pcie_io_aperture(struct resource *res); +int mvebu_mbus_add_window_remap_by_id(unsigned int target, + unsigned int attribute, + phys_addr_t base, size_t size, + phys_addr_t remap); +int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute, + phys_addr_t base, size_t size); +int mvebu_mbus_del_window(phys_addr_t base, size_t size); +int mbus_dt_setup_win(struct mvebu_mbus_state *mbus, + u32 base, u32 size, u8 target, u8 attr); + +#endif /* __LINUX_MBUS_H */ diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h new file mode 100644 index 0000000..0644d92 --- /dev/null +++ b/include/linux/mc146818rtc.h @@ -0,0 +1,86 @@ +/* mc146818rtc.h - register definitions for the Real-Time-Clock / CMOS RAM + * Copyright Torsten Duwe 1993 + * derived from Data Sheet, Copyright Motorola 1984 (!). + * It was written to be part of the Linux operating system. + */ +/* permission is hereby granted to copy, modify and redistribute this code + * in terms of the GNU Library General Public License, Version 2 or later, + * at your option. + */ + +#ifndef _MC146818RTC_H +#define _MC146818RTC_H + +#include +#include /* get the user-level API */ +#include /* register access macros */ + +/********************************************************************** + * register summary + **********************************************************************/ +#define RTC_SECONDS 0 +#define RTC_SECONDS_ALARM 1 +#define RTC_MINUTES 2 +#define RTC_MINUTES_ALARM 3 +#define RTC_HOURS 4 +#define RTC_HOURS_ALARM 5 +/* RTC_*_alarm is always true if 2 MSBs are set */ +# define RTC_ALARM_DONT_CARE 0xC0 + +#define RTC_DAY_OF_WEEK 6 +#define RTC_DAY_OF_MONTH 7 +#define RTC_MONTH 8 +#define RTC_YEAR 9 + +/* control registers - Moto names + */ +#define RTC_REG_A 10 +#define RTC_REG_B 11 +#define RTC_REG_C 12 +#define RTC_REG_D 13 + +/********************************************************************** + * register details + **********************************************************************/ +#define RTC_FREQ_SELECT RTC_REG_A + +/* update-in-progress - set to "1" 244 microsecs before RTC goes off the bus, + * reset after update (may take 1.984ms @ 32768Hz RefClock) is complete, + * totalling to a max high interval of 2.228 ms. + */ +# define RTC_UIP 0x80 +# define RTC_DIV_CTL 0x70 + /* divider control: refclock values 4.194 / 1.049 MHz / 32.768 kHz */ +# define RTC_REF_CLCK_4MHZ 0x00 +# define RTC_REF_CLCK_1MHZ 0x10 +# define RTC_REF_CLCK_32KHZ 0x20 + /* 2 values for divider stage reset, others for "testing purposes only" */ +# define RTC_DIV_RESET1 0x60 +# define RTC_DIV_RESET2 0x70 + /* Periodic intr. / Square wave rate select. 0=none, 1=32.8kHz,... 15=2Hz */ +# define RTC_RATE_SELECT 0x0F + +/**********************************************************************/ +#define RTC_CONTROL RTC_REG_B +# define RTC_SET 0x80 /* disable updates for clock setting */ +# define RTC_PIE 0x40 /* periodic interrupt enable */ +# define RTC_AIE 0x20 /* alarm interrupt enable */ +# define RTC_UIE 0x10 /* update-finished interrupt enable */ +# define RTC_SQWE 0x08 /* enable square-wave output */ +# define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */ +# define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */ +# define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */ + +/**********************************************************************/ +#define RTC_INTR_FLAGS RTC_REG_C +/* caution - cleared by read */ +# define RTC_IRQF 0x80 /* any of the following 3 is active */ +# define RTC_PF 0x40 +# define RTC_AF 0x20 +# define RTC_UF 0x10 + +/**********************************************************************/ +#define RTC_VALID RTC_REG_D +# define RTC_VRT 0x80 /* valid RAM and time */ +/**********************************************************************/ +#endif /* _MC146818RTC_H */ diff --git a/include/linux/mdio.h b/include/linux/mdio.h new file mode 100644 index 0000000..6e821d9 --- /dev/null +++ b/include/linux/mdio.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * linux/mdio.h: definitions for MDIO (clause 45) transceivers + * Copyright 2006-2009 Solarflare Communications Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation, incorporated herein by reference. + */ + +#ifndef __LINUX_MDIO_H__ +#define __LINUX_MDIO_H__ + +#include + +/* MDIO Manageable Devices (MMDs). */ +#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment/ + * Physical Medium Dependent */ +#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */ +#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */ +#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */ +#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */ +#define MDIO_MMD_TC 6 /* Transmission Convergence */ +#define MDIO_MMD_AN 7 /* Auto-Negotiation */ +#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */ +#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */ +#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */ + +/* Generic MDIO registers. */ +#define MDIO_CTRL1 MII_BMCR +#define MDIO_STAT1 MII_BMSR +#define MDIO_DEVID1 MII_PHYSID1 +#define MDIO_DEVID2 MII_PHYSID2 +#define MDIO_SPEED 4 /* Speed ability */ +#define MDIO_DEVS1 5 /* Devices in package */ +#define MDIO_DEVS2 6 +#define MDIO_CTRL2 7 /* 10G control 2 */ +#define MDIO_STAT2 8 /* 10G status 2 */ +#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */ +#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */ +#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */ +#define MDIO_PKGID1 14 /* Package identifier */ +#define MDIO_PKGID2 15 +#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */ +#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */ +#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */ +#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */ +#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */ +#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */ +#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */ + +/* Media-dependent registers. */ +#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */ +#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A. + * Lanes B-D are numbered 134-136. */ +#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */ +#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */ +#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */ +#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */ +#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */ + +/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */ +#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */ +#define MDIO_PMA_LASI_TXCTRL 0x9001 /* TX_ALARM control */ +#define MDIO_PMA_LASI_CTRL 0x9002 /* LASI control */ +#define MDIO_PMA_LASI_RXSTAT 0x9003 /* RX_ALARM status */ +#define MDIO_PMA_LASI_TXSTAT 0x9004 /* TX_ALARM status */ +#define MDIO_PMA_LASI_STAT 0x9005 /* LASI status */ + +/* Control register 1. */ +/* Enable extended speed selection */ +#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100) +/* All speed selection bits */ +#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c) +#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX +#define MDIO_CTRL1_LPOWER BMCR_PDOWN +#define MDIO_CTRL1_RESET BMCR_RESET +#define MDIO_PMA_CTRL1_LOOPBACK 0x0001 +#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000 +#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100 +#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK +#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART +#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE +#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */ +#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */ + +/* 10 Gb/s */ +#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00) +/* 10PASS-TS/2BASE-TL */ +#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04) + +/* Status register 1. */ +#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */ +#define MDIO_STAT1_LSTATUS BMSR_LSTATUS +#define MDIO_STAT1_FAULT 0x0080 /* Fault */ +#define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */ +#define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE +#define MDIO_AN_STAT1_RFAULT BMSR_RFAULT +#define MDIO_AN_STAT1_COMPLETE BMSR_ANEGCOMPLETE +#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */ +#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */ + +/* Speed register. */ +#define MDIO_SPEED_10G 0x0001 /* 10G capable */ +#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */ +#define MDIO_PMA_SPEED_10P 0x0004 /* 10PASS-TS capable */ +#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */ +#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */ +#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */ +#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */ + +/* Device present registers. */ +#define MDIO_DEVS_PRESENT(devad) (1 << (devad)) +#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD) +#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS) +#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS) +#define MDIO_DEVS_PHYXS MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS) +#define MDIO_DEVS_DTEXS MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS) +#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC) +#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN) +#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT) +#define MDIO_DEVS_VEND1 MDIO_DEVS_PRESENT(MDIO_MMD_VEND1) +#define MDIO_DEVS_VEND2 MDIO_DEVS_PRESENT(MDIO_MMD_VEND2) + +#define MDIO_DEVS_LINK (MDIO_DEVS_PMAPMD | \ + MDIO_DEVS_WIS | \ + MDIO_DEVS_PCS | \ + MDIO_DEVS_PHYXS | \ + MDIO_DEVS_DTEXS | \ + MDIO_DEVS_AN) + +/* Control register 2. */ +#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */ +#define MDIO_PMA_CTRL2_10GBCX4 0x0000 /* 10GBASE-CX4 type */ +#define MDIO_PMA_CTRL2_10GBEW 0x0001 /* 10GBASE-EW type */ +#define MDIO_PMA_CTRL2_10GBLW 0x0002 /* 10GBASE-LW type */ +#define MDIO_PMA_CTRL2_10GBSW 0x0003 /* 10GBASE-SW type */ +#define MDIO_PMA_CTRL2_10GBLX4 0x0004 /* 10GBASE-LX4 type */ +#define MDIO_PMA_CTRL2_10GBER 0x0005 /* 10GBASE-ER type */ +#define MDIO_PMA_CTRL2_10GBLR 0x0006 /* 10GBASE-LR type */ +#define MDIO_PMA_CTRL2_10GBSR 0x0007 /* 10GBASE-SR type */ +#define MDIO_PMA_CTRL2_10GBLRM 0x0008 /* 10GBASE-LRM type */ +#define MDIO_PMA_CTRL2_10GBT 0x0009 /* 10GBASE-T type */ +#define MDIO_PMA_CTRL2_10GBKX4 0x000a /* 10GBASE-KX4 type */ +#define MDIO_PMA_CTRL2_10GBKR 0x000b /* 10GBASE-KR type */ +#define MDIO_PMA_CTRL2_1000BT 0x000c /* 1000BASE-T type */ +#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */ +#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */ +#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */ +#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */ +#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */ +#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */ +#define MDIO_PCS_CTRL2_10GBW 0x0002 /* 10GBASE-W type */ +#define MDIO_PCS_CTRL2_10GBT 0x0003 /* 10GBASE-T type */ + +/* Status register 2. */ +#define MDIO_STAT2_RXFAULT 0x0400 /* Receive fault */ +#define MDIO_STAT2_TXFAULT 0x0800 /* Transmit fault */ +#define MDIO_STAT2_DEVPRST 0xc000 /* Device present */ +#define MDIO_STAT2_DEVPRST_VAL 0x8000 /* Device present value */ +#define MDIO_PMA_STAT2_LBABLE 0x0001 /* PMA loopback ability */ +#define MDIO_PMA_STAT2_10GBEW 0x0002 /* 10GBASE-EW ability */ +#define MDIO_PMA_STAT2_10GBLW 0x0004 /* 10GBASE-LW ability */ +#define MDIO_PMA_STAT2_10GBSW 0x0008 /* 10GBASE-SW ability */ +#define MDIO_PMA_STAT2_10GBLX4 0x0010 /* 10GBASE-LX4 ability */ +#define MDIO_PMA_STAT2_10GBER 0x0020 /* 10GBASE-ER ability */ +#define MDIO_PMA_STAT2_10GBLR 0x0040 /* 10GBASE-LR ability */ +#define MDIO_PMA_STAT2_10GBSR 0x0080 /* 10GBASE-SR ability */ +#define MDIO_PMD_STAT2_TXDISAB 0x0100 /* PMD TX disable ability */ +#define MDIO_PMA_STAT2_EXTABLE 0x0200 /* Extended abilities */ +#define MDIO_PMA_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */ +#define MDIO_PMA_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */ +#define MDIO_PCS_STAT2_10GBR 0x0001 /* 10GBASE-R capable */ +#define MDIO_PCS_STAT2_10GBX 0x0002 /* 10GBASE-X capable */ +#define MDIO_PCS_STAT2_10GBW 0x0004 /* 10GBASE-W capable */ +#define MDIO_PCS_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */ +#define MDIO_PCS_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */ + +/* Transmit disable register. */ +#define MDIO_PMD_TXDIS_GLOBAL 0x0001 /* Global PMD TX disable */ +#define MDIO_PMD_TXDIS_0 0x0002 /* PMD TX disable 0 */ +#define MDIO_PMD_TXDIS_1 0x0004 /* PMD TX disable 1 */ +#define MDIO_PMD_TXDIS_2 0x0008 /* PMD TX disable 2 */ +#define MDIO_PMD_TXDIS_3 0x0010 /* PMD TX disable 3 */ + +/* Receive signal detect register. */ +#define MDIO_PMD_RXDET_GLOBAL 0x0001 /* Global PMD RX signal detect */ +#define MDIO_PMD_RXDET_0 0x0002 /* PMD RX signal detect 0 */ +#define MDIO_PMD_RXDET_1 0x0004 /* PMD RX signal detect 1 */ +#define MDIO_PMD_RXDET_2 0x0008 /* PMD RX signal detect 2 */ +#define MDIO_PMD_RXDET_3 0x0010 /* PMD RX signal detect 3 */ + +/* Extended abilities register. */ +#define MDIO_PMA_EXTABLE_10GCX4 0x0001 /* 10GBASE-CX4 ability */ +#define MDIO_PMA_EXTABLE_10GBLRM 0x0002 /* 10GBASE-LRM ability */ +#define MDIO_PMA_EXTABLE_10GBT 0x0004 /* 10GBASE-T ability */ +#define MDIO_PMA_EXTABLE_10GBKX4 0x0008 /* 10GBASE-KX4 ability */ +#define MDIO_PMA_EXTABLE_10GBKR 0x0010 /* 10GBASE-KR ability */ +#define MDIO_PMA_EXTABLE_1000BT 0x0020 /* 1000BASE-T ability */ +#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */ +#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */ +#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */ + +/* PHY XGXS lane state register. */ +#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 +#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 +#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 +#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 +#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 + +/* PMA 10GBASE-T pair swap & polarity */ +#define MDIO_PMA_10GBT_SWAPPOL_ABNX 0x0001 /* Pair A/B uncrossed */ +#define MDIO_PMA_10GBT_SWAPPOL_CDNX 0x0002 /* Pair C/D uncrossed */ +#define MDIO_PMA_10GBT_SWAPPOL_AREV 0x0100 /* Pair A polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_BREV 0x0200 /* Pair B polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_CREV 0x0400 /* Pair C polarity reversed */ +#define MDIO_PMA_10GBT_SWAPPOL_DREV 0x0800 /* Pair D polarity reversed */ + +/* PMA 10GBASE-T TX power register. */ +#define MDIO_PMA_10GBT_TXPWR_SHORT 0x0001 /* Short-reach mode */ + +/* PMA 10GBASE-T SNR registers. */ +/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */ +#define MDIO_PMA_10GBT_SNR_BIAS 0x8000 +#define MDIO_PMA_10GBT_SNR_MAX 127 + +/* PMA 10GBASE-R FEC ability register. */ +#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */ +#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */ + +/* PCS 10GBASE-R/-T status register 1. */ +#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */ + +/* PCS 10GBASE-R/-T status register 2. */ +#define MDIO_PCS_10GBRT_STAT2_ERR 0x00ff +#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00 + +/* AN 10GBASE-T control register. */ +#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */ + +/* AN 10GBASE-T status register. */ +#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */ +#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */ +#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */ +#define MDIO_AN_10GBT_STAT_REMOK 0x1000 /* Remote OK */ +#define MDIO_AN_10GBT_STAT_LOCOK 0x2000 /* Local OK */ +#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */ +#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */ + +/* EEE Supported/Advertisement/LP Advertisement registers. + * + * EEE capability Register (3.20), Advertisement (7.60) and + * Link partner ability (7.61) registers have and can use the same identical + * bit masks. + */ +#define MDIO_AN_EEE_ADV_100TX 0x0002 /* Advertise 100TX EEE cap */ +#define MDIO_AN_EEE_ADV_1000T 0x0004 /* Advertise 1000T EEE cap */ +/* Note: the two defines above can be potentially used by the user-land + * and cannot remove them now. + * So, we define the new generic MDIO_EEE_100TX and MDIO_EEE_1000T macros + * using the previous ones (that can be considered obsolete). + */ +#define MDIO_EEE_100TX MDIO_AN_EEE_ADV_100TX /* 100TX EEE cap */ +#define MDIO_EEE_1000T MDIO_AN_EEE_ADV_1000T /* 1000T EEE cap */ +#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ +#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ +#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ +#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ + +/* LASI RX_ALARM control/status registers. */ +#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */ +#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */ +#define MDIO_PMA_LASI_RX_PMALFLT 0x0010 /* PMA/PMD RX local fault */ +#define MDIO_PMA_LASI_RX_OPTICPOWERFLT 0x0020 /* RX optical power fault */ +#define MDIO_PMA_LASI_RX_WISLFLT 0x0200 /* WIS local fault */ + +/* LASI TX_ALARM control/status registers. */ +#define MDIO_PMA_LASI_TX_PHYXSLFLT 0x0001 /* PHY XS TX local fault */ +#define MDIO_PMA_LASI_TX_PCSLFLT 0x0008 /* PCS TX local fault */ +#define MDIO_PMA_LASI_TX_PMALFLT 0x0010 /* PMA/PMD TX local fault */ +#define MDIO_PMA_LASI_TX_LASERPOWERFLT 0x0080 /* Laser output power fault */ +#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100 /* Laser temperature fault */ +#define MDIO_PMA_LASI_TX_LASERBICURRFLT 0x0200 /* Laser bias current fault */ + +/* LASI control/status registers. */ +#define MDIO_PMA_LASI_LSALARM 0x0001 /* LS_ALARM enable/status */ +#define MDIO_PMA_LASI_TXALARM 0x0002 /* TX_ALARM enable/status */ +#define MDIO_PMA_LASI_RXALARM 0x0004 /* RX_ALARM enable/status */ + +/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */ + +#define MDIO_PHY_ID_C45 0x8000 +#define MDIO_PHY_ID_PRTAD 0x03e0 +#define MDIO_PHY_ID_DEVAD 0x001f +#define MDIO_PHY_ID_C45_MASK \ + (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD) + +#define MDIO_PRTAD_NONE (-1) +#define MDIO_DEVAD_NONE (-1) +#define MDIO_EMULATE_C22 4 + +static inline __u16 mdio_phy_id_c45(int prtad, int devad) +{ + return MDIO_PHY_ID_C45 | (prtad << 5) | devad; +} + +#endif /* __LINUX_MDIO_H__ */ diff --git a/include/linux/mii.h b/include/linux/mii.h new file mode 100644 index 0000000..21db032 --- /dev/null +++ b/include/linux/mii.h @@ -0,0 +1,228 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * linux/mii.h: definitions for MII-compatible transceivers + * Originally drivers/net/sunhme.h. + * + * Copyright (C) 1996, 1999, 2001 David S. Miller (davem@redhat.com) + */ + +#ifndef __LINUX_MII_H__ +#define __LINUX_MII_H__ + +/* Generic MII registers. */ +#define MII_BMCR 0x00 /* Basic mode control register */ +#define MII_BMSR 0x01 /* Basic mode status register */ +#define MII_PHYSID1 0x02 /* PHYS ID 1 */ +#define MII_PHYSID2 0x03 /* PHYS ID 2 */ +#define MII_ADVERTISE 0x04 /* Advertisement control reg */ +#define MII_LPA 0x05 /* Link partner ability reg */ +#define MII_EXPANSION 0x06 /* Expansion register */ +#define MII_CTRL1000 0x09 /* 1000BASE-T control */ +#define MII_STAT1000 0x0a /* 1000BASE-T status */ +#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */ +#define MII_MMD_DATA 0x0e /* MMD Access Data Register */ +#define MII_ESTATUS 0x0f /* Extended Status */ +#define MII_DCOUNTER 0x12 /* Disconnect counter */ +#define MII_FCSCOUNTER 0x13 /* False carrier counter */ +#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */ +#define MII_RERRCOUNTER 0x15 /* Receive error counter */ +#define MII_SREVISION 0x16 /* Silicon revision */ +#define MII_RESV1 0x17 /* Reserved... */ +#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */ +#define MII_PHYADDR 0x19 /* PHY address */ +#define MII_RESV2 0x1a /* Reserved... */ +#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */ +#define MII_NCONFIG 0x1c /* Network interface config */ + +/* Basic mode control register. */ +#define BMCR_RESV 0x003f /* Unused... */ +#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ +#define BMCR_CTST 0x0080 /* Collision test */ +#define BMCR_FULLDPLX 0x0100 /* Full duplex */ +#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */ +#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define BMCR_PDOWN 0x0800 /* Enable low power state */ +#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ +#define BMCR_SPEED100 0x2000 /* Select 100Mbps */ +#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */ +#define BMCR_RESET 0x8000 /* Reset to default state */ +#define BMCR_SPEED10 0x0000 /* Select 10Mbps */ + +/* Basic mode status register. */ +#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ +#define BMSR_JCD 0x0002 /* Jabber detected */ +#define BMSR_LSTATUS 0x0004 /* Link status */ +#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ +#define BMSR_RFAULT 0x0010 /* Remote fault detected */ +#define BMSR_ANEGCOMPLETE 0x0020 /* Auto-negotiation complete */ +#define BMSR_RESV 0x00c0 /* Unused... */ +#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ +#define BMSR_100HALF2 0x0200 /* Can do 100BASE-T2 HDX */ +#define BMSR_100FULL2 0x0400 /* Can do 100BASE-T2 FDX */ +#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ +#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ +#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ +#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ +#define BMSR_100BASE4 0x8000 /* Can do 100mbps, 4k packets */ + +/* Advertisement control register. */ +#define ADVERTISE_SLCT 0x001f /* Selector bits */ +#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ +#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ +#define ADVERTISE_1000XFULL 0x0020 /* Try for 1000BASE-X full-duplex */ +#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ +#define ADVERTISE_1000XHALF 0x0040 /* Try for 1000BASE-X half-duplex */ +#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ +#define ADVERTISE_1000XPAUSE 0x0080 /* Try for 1000BASE-X pause */ +#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ +#define ADVERTISE_1000XPSE_ASYM 0x0100 /* Try for 1000BASE-X asym pause */ +#define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */ +#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ +#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */ +#define ADVERTISE_RESV 0x1000 /* Unused... */ +#define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */ +#define ADVERTISE_LPACK 0x4000 /* Ack link partners response */ +#define ADVERTISE_NPAGE 0x8000 /* Next page bit */ + +#define ADVERTISE_FULL (ADVERTISE_100FULL | ADVERTISE_10FULL | \ + ADVERTISE_CSMA) +#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ + ADVERTISE_100HALF | ADVERTISE_100FULL) + +/* Link partner ability register. */ +#define LPA_SLCT 0x001f /* Same as advertise selector */ +#define LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */ +#define LPA_1000XFULL 0x0020 /* Can do 1000BASE-X full-duplex */ +#define LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */ +#define LPA_1000XHALF 0x0040 /* Can do 1000BASE-X half-duplex */ +#define LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */ +#define LPA_1000XPAUSE 0x0080 /* Can do 1000BASE-X pause */ +#define LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */ +#define LPA_1000XPAUSE_ASYM 0x0100 /* Can do 1000BASE-X pause asym*/ +#define LPA_100BASE4 0x0200 /* Can do 100mbps 4k packets */ +#define LPA_PAUSE_CAP 0x0400 /* Can pause */ +#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ +#define LPA_RESV 0x1000 /* Unused... */ +#define LPA_RFAULT 0x2000 /* Link partner faulted */ +#define LPA_LPACK 0x4000 /* Link partner acked us */ +#define LPA_NPAGE 0x8000 /* Next page bit */ + +#define LPA_DUPLEX (LPA_10FULL | LPA_100FULL) +#define LPA_100 (LPA_100FULL | LPA_100HALF | LPA_100BASE4) + +/* Expansion register for auto-negotiation. */ +#define EXPANSION_NWAY 0x0001 /* Can do N-way auto-nego */ +#define EXPANSION_LCWP 0x0002 /* Got new RX page code word */ +#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ +#define EXPANSION_NPCAPABLE 0x0008 /* Link partner supports npage */ +#define EXPANSION_MFAULTS 0x0010 /* Multiple faults detected */ +#define EXPANSION_RESV 0xffe0 /* Unused... */ + +#define ESTATUS_1000_XFULL 0x8000 /* Can do 1000BX Full */ +#define ESTATUS_1000_XHALF 0x4000 /* Can do 1000BX Half */ +#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ +#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ + +/* N-way test register. */ +#define NWAYTEST_RESV1 0x00ff /* Unused... */ +#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */ +#define NWAYTEST_RESV2 0xfe00 /* Unused... */ + +/* 1000BASE-T Control register */ +#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ +#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ +#define CTL1000_AS_MASTER 0x0800 +#define CTL1000_ENABLE_MASTER 0x1000 + +/* 1000BASE-T Status register */ +#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ +#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ +#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */ +#define LPA_1000HALF 0x0400 /* Link partner 1000BASE-T half duplex */ + +/* Flow control flags */ +#define FLOW_CTRL_TX 0x01 +#define FLOW_CTRL_RX 0x02 + +/* MMD Access Control register fields */ +#define MII_MMD_CTRL_DEVAD_MASK 0x1f /* Mask MMD DEVAD*/ +#define MII_MMD_CTRL_ADDR 0x0000 /* Address */ +#define MII_MMD_CTRL_NOINCR 0x4000 /* no post increment */ +#define MII_MMD_CTRL_INCR_RDWT 0x8000 /* post increment on reads & writes */ +#define MII_MMD_CTRL_INCR_ON_WT 0xC000 /* post increment on writes only */ + +/** + * mii_nway_result + * @negotiated: value of MII ANAR and'd with ANLPAR + * + * Given a set of MII abilities, check each bit and returns the + * currently supported media, in the priority order defined by + * IEEE 802.3u. We use LPA_xxx constants but note this is not the + * value of LPA solely, as described above. + * + * The one exception to IEEE 802.3u is that 100baseT4 is placed + * between 100T-full and 100T-half. If your phy does not support + * 100T4 this is fine. If your phy places 100T4 elsewhere in the + * priority order, you will need to roll your own function. + */ +static inline unsigned int mii_nway_result (unsigned int negotiated) +{ + unsigned int ret; + + if (negotiated & LPA_100FULL) + ret = LPA_100FULL; + else if (negotiated & LPA_100BASE4) + ret = LPA_100BASE4; + else if (negotiated & LPA_100HALF) + ret = LPA_100HALF; + else if (negotiated & LPA_10FULL) + ret = LPA_10FULL; + else + ret = LPA_10HALF; + + return ret; +} + +/** + * mii_duplex + * @duplex_lock: Non-zero if duplex is locked at full + * @negotiated: value of MII ANAR and'd with ANLPAR + * + * A small helper function for a common case. Returns one + * if the media is operating or locked at full duplex, and + * returns zero otherwise. + */ +static inline unsigned int mii_duplex (unsigned int duplex_lock, + unsigned int negotiated) +{ + if (duplex_lock) + return 1; + if (mii_nway_result(negotiated) & LPA_DUPLEX) + return 1; + return 0; +} + +/** + * mii_resolve_flowctrl_fdx + * @lcladv: value of MII ADVERTISE register + * @rmtadv: value of MII LPA register + * + * Resolve full duplex flow control as per IEEE 802.3-2005 table 28B-3 + */ +static inline u8 mii_resolve_flowctrl_fdx(u16 lcladv, u16 rmtadv) +{ + u8 cap = 0; + + if (lcladv & rmtadv & ADVERTISE_PAUSE_CAP) { + cap = FLOW_CTRL_TX | FLOW_CTRL_RX; + } else if (lcladv & rmtadv & ADVERTISE_PAUSE_ASYM) { + if (lcladv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_RX; + else if (rmtadv & ADVERTISE_PAUSE_CAP) + cap = FLOW_CTRL_TX; + } + + return cap; +} + +#endif /* __LINUX_MII_H__ */ diff --git a/include/linux/mtd/bbm.h b/include/linux/mtd/bbm.h new file mode 100644 index 0000000..7239eb1 --- /dev/null +++ b/include/linux/mtd/bbm.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * linux/include/linux/mtd/bbm.h + * + * NAND family Bad Block Management (BBM) header file + * - Bad Block Table (BBT) implementation + * + * Copyright © 2005 Samsung Electronics + * Kyungmin Park + * + * Copyright © 2000-2005 + * Thomas Gleixner + * + */ +#ifndef __LINUX_MTD_BBM_H +#define __LINUX_MTD_BBM_H + +/* The maximum number of NAND chips in an array */ +#ifndef CONFIG_SYS_NAND_MAX_CHIPS +#define CONFIG_SYS_NAND_MAX_CHIPS 1 +#endif + +/** + * struct nand_bbt_descr - bad block table descriptor + * @options: options for this descriptor + * @pages: the page(s) where we find the bbt, used with option BBT_ABSPAGE + * when bbt is searched, then we store the found bbts pages here. + * Its an array and supports up to 8 chips now + * @offs: offset of the pattern in the oob area of the page + * @veroffs: offset of the bbt version counter in the oob are of the page + * @version: version read from the bbt page during scan + * @len: length of the pattern, if 0 no pattern check is performed + * @maxblocks: maximum number of blocks to search for a bbt. This number of + * blocks is reserved at the end of the device where the tables are + * written. + * @reserved_block_code: if non-0, this pattern denotes a reserved (rather than + * bad) block in the stored bbt + * @pattern: pattern to identify bad block table or factory marked good / + * bad blocks, can be NULL, if len = 0 + * + * Descriptor for the bad block table marker and the descriptor for the + * pattern which identifies good and bad blocks. The assumption is made + * that the pattern and the version count are always located in the oob area + * of the first block. + */ +struct nand_bbt_descr { + int options; + int pages[CONFIG_SYS_NAND_MAX_CHIPS]; + int offs; + int veroffs; + uint8_t version[CONFIG_SYS_NAND_MAX_CHIPS]; + int len; + int maxblocks; + int reserved_block_code; + uint8_t *pattern; +}; + +/* Options for the bad block table descriptors */ + +/* The number of bits used per block in the bbt on the device */ +#define NAND_BBT_NRBITS_MSK 0x0000000F +#define NAND_BBT_1BIT 0x00000001 +#define NAND_BBT_2BIT 0x00000002 +#define NAND_BBT_4BIT 0x00000004 +#define NAND_BBT_8BIT 0x00000008 +/* The bad block table is in the last good block of the device */ +#define NAND_BBT_LASTBLOCK 0x00000010 +/* The bbt is at the given page, else we must scan for the bbt */ +#define NAND_BBT_ABSPAGE 0x00000020 +/* bbt is stored per chip on multichip devices */ +#define NAND_BBT_PERCHIP 0x00000080 +/* bbt has a version counter at offset veroffs */ +#define NAND_BBT_VERSION 0x00000100 +/* Create a bbt if none exists */ +#define NAND_BBT_CREATE 0x00000200 +/* + * Create an empty BBT with no vendor information. Vendor's information may be + * unavailable, for example, if the NAND controller has a different data and OOB + * layout or if this information is already purged. Must be used in conjunction + * with NAND_BBT_CREATE. + */ +#define NAND_BBT_CREATE_EMPTY 0x00000400 +/* Write bbt if neccecary */ +#define NAND_BBT_WRITE 0x00002000 +/* Read and write back block contents when writing bbt */ +#define NAND_BBT_SAVECONTENT 0x00004000 +/* Search good / bad pattern on the first and the second page */ +#define NAND_BBT_SCAN2NDPAGE 0x00008000 +/* Search good / bad pattern on the last page of the eraseblock */ +#define NAND_BBT_SCANLASTPAGE 0x00010000 +/* + * Use a flash based bad block table. By default, OOB identifier is saved in + * OOB area. This option is passed to the default bad block table function. + */ +#define NAND_BBT_USE_FLASH 0x00020000 +/* + * Do not store flash based bad block table marker in the OOB area; store it + * in-band. + */ +#define NAND_BBT_NO_OOB 0x00040000 +/* + * Do not write new bad block markers to OOB; useful, e.g., when ECC covers + * entire spare area. Must be used with NAND_BBT_USE_FLASH. + */ +#define NAND_BBT_NO_OOB_BBM 0x00080000 + +/* + * Flag set by nand_create_default_bbt_descr(), marking that the nand_bbt_descr + * was allocated dynamicaly and must be freed in nand_release(). Has no meaning + * in nand_chip.bbt_options. + */ +#define NAND_BBT_DYNAMICSTRUCT 0x80000000 + +/* The maximum number of blocks to scan for a bbt */ +#define NAND_BBT_SCAN_MAXBLOCKS 4 + +/* + * Constants for oob configuration + */ +#define NAND_SMALL_BADBLOCK_POS 5 +#define NAND_LARGE_BADBLOCK_POS 0 +#define ONENAND_BADBLOCK_POS 0 + +/* + * Bad block scanning errors + */ +#define ONENAND_BBT_READ_ERROR 1 +#define ONENAND_BBT_READ_ECC_ERROR 2 +#define ONENAND_BBT_READ_FATAL_ERROR 4 + +/** + * struct bbm_info - [GENERIC] Bad Block Table data structure + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @badblockpos: [INTERN] position of the bad block marker in the oob area + * @options: options for this descriptor + * @bbt: [INTERN] bad block table pointer + * @isbad_bbt: function to determine if a block is bad + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for + * initial bad block scan + * @priv: [OPTIONAL] pointer to private bbm date + */ +struct bbm_info { + int bbt_erase_shift; + int badblockpos; + int options; + + uint8_t *bbt; + + int (*isbad_bbt)(struct mtd_info *mtd, loff_t ofs, int allowbbt); + + /* TODO Add more NAND specific fileds */ + struct nand_bbt_descr *badblock_pattern; + + void *priv; +}; + +/* OneNAND BBT interface */ +extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd); +extern int onenand_default_bbt(struct mtd_info *mtd); + +#endif /* __LINUX_MTD_BBM_H */ diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h new file mode 100644 index 0000000..3555518 --- /dev/null +++ b/include/linux/mtd/cfi.h @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2000-2010 David Woodhouse et al. + * + */ + +#ifndef __MTD_CFI_H__ +#define __MTD_CFI_H__ + +#define CFI_MFR_ANY 0xFFFF +#define CFI_ID_ANY 0xFFFF +#define CFI_MFR_CONTINUATION 0x007F + +#define CFI_MFR_AMD 0x0001 +#define CFI_MFR_AMIC 0x0037 +#define CFI_MFR_ATMEL 0x001F +#define CFI_MFR_EON 0x001C +#define CFI_MFR_FUJITSU 0x0004 +#define CFI_MFR_HYUNDAI 0x00AD +#define CFI_MFR_INTEL 0x0089 +#define CFI_MFR_MACRONIX 0x00C2 +#define CFI_MFR_NEC 0x0010 +#define CFI_MFR_PMC 0x009D +#define CFI_MFR_SAMSUNG 0x00EC +#define CFI_MFR_SHARP 0x00B0 +#define CFI_MFR_SST 0x00BF +#define CFI_MFR_ST 0x0020 /* STMicroelectronics */ +#define CFI_MFR_MICRON 0x002C /* Micron */ +#define CFI_MFR_TOSHIBA 0x0098 +#define CFI_MFR_WINBOND 0x00DA + +#endif /* __MTD_CFI_H__ */ diff --git a/include/linux/mtd/concat.h b/include/linux/mtd/concat.h new file mode 100644 index 0000000..c57e973 --- /dev/null +++ b/include/linux/mtd/concat.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * MTD device concatenation layer definitions + * + * Copyright © 2002 Robert Kaiser + * + */ + +#ifndef MTD_CONCAT_H +#define MTD_CONCAT_H + +struct mtd_info *mtd_concat_create( + struct mtd_info *subdev[], /* subdevices to concatenate */ + int num_devs, /* number of subdevices */ +#ifndef __UBOOT__ + const char *name); /* name for the new device */ +#else + char *name); /* name for the new device */ +#endif + +void mtd_concat_destroy(struct mtd_info *mtd); + +#endif diff --git a/include/linux/mtd/doc2000.h b/include/linux/mtd/doc2000.h new file mode 100644 index 0000000..d57f8da --- /dev/null +++ b/include/linux/mtd/doc2000.h @@ -0,0 +1,207 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Linux driver for Disk-On-Chip devices + * + * Copyright © 1999 Machine Vision Holdings, Inc. + * Copyright © 1999-2010 David Woodhouse + * Copyright © 2002-2003 Greg Ungerer + * Copyright © 2002-2003 SnapGear Inc + * + */ + +#ifndef __MTD_DOC2000_H__ +#define __MTD_DOC2000_H__ + +#include +#if 0 +#include +#endif + +#define DoC_Sig1 0 +#define DoC_Sig2 1 + +#define DoC_ChipID 0x1000 +#define DoC_DOCStatus 0x1001 +#define DoC_DOCControl 0x1002 +#define DoC_FloorSelect 0x1003 +#define DoC_CDSNControl 0x1004 +#define DoC_CDSNDeviceSelect 0x1005 +#define DoC_ECCConf 0x1006 +#define DoC_2k_ECCStatus 0x1007 + +#define DoC_CDSNSlowIO 0x100d +#define DoC_ECCSyndrome0 0x1010 +#define DoC_ECCSyndrome1 0x1011 +#define DoC_ECCSyndrome2 0x1012 +#define DoC_ECCSyndrome3 0x1013 +#define DoC_ECCSyndrome4 0x1014 +#define DoC_ECCSyndrome5 0x1015 +#define DoC_AliasResolution 0x101b +#define DoC_ConfigInput 0x101c +#define DoC_ReadPipeInit 0x101d +#define DoC_WritePipeTerm 0x101e +#define DoC_LastDataRead 0x101f +#define DoC_NOP 0x1020 + +#define DoC_Mil_CDSN_IO 0x0800 +#define DoC_2k_CDSN_IO 0x1800 + +#define DoC_Mplus_NOP 0x1002 +#define DoC_Mplus_AliasResolution 0x1004 +#define DoC_Mplus_DOCControl 0x1006 +#define DoC_Mplus_AccessStatus 0x1008 +#define DoC_Mplus_DeviceSelect 0x1008 +#define DoC_Mplus_Configuration 0x100a +#define DoC_Mplus_OutputControl 0x100c +#define DoC_Mplus_FlashControl 0x1020 +#define DoC_Mplus_FlashSelect 0x1022 +#define DoC_Mplus_FlashCmd 0x1024 +#define DoC_Mplus_FlashAddress 0x1026 +#define DoC_Mplus_FlashData0 0x1028 +#define DoC_Mplus_FlashData1 0x1029 +#define DoC_Mplus_ReadPipeInit 0x102a +#define DoC_Mplus_LastDataRead 0x102c +#define DoC_Mplus_LastDataRead1 0x102d +#define DoC_Mplus_WritePipeTerm 0x102e +#define DoC_Mplus_ECCSyndrome0 0x1040 +#define DoC_Mplus_ECCSyndrome1 0x1041 +#define DoC_Mplus_ECCSyndrome2 0x1042 +#define DoC_Mplus_ECCSyndrome3 0x1043 +#define DoC_Mplus_ECCSyndrome4 0x1044 +#define DoC_Mplus_ECCSyndrome5 0x1045 +#define DoC_Mplus_ECCConf 0x1046 +#define DoC_Mplus_Toggle 0x1046 +#define DoC_Mplus_DownloadStatus 0x1074 +#define DoC_Mplus_CtrlConfirm 0x1076 +#define DoC_Mplus_Power 0x1fff + +/* How to access the device? + * On ARM, it'll be mmap'd directly with 32-bit wide accesses. + * On PPC, it's mmap'd and 16-bit wide. + * Others use readb/writeb + */ +#if defined(__arm__) +#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)))) +#define WriteDOC_(d, adr, reg) do{ *(volatile __u32 *)(((unsigned long)adr)+((reg)<<2)) = (__u32)d; wmb();} while(0) +#define DOC_IOREMAP_LEN 0x8000 +#elif defined(__ppc__) +#define ReadDOC_(adr, reg) ((unsigned char)(*(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)))) +#define WriteDOC_(d, adr, reg) do{ *(volatile __u16 *)(((unsigned long)adr)+((reg)<<1)) = (__u16)d; wmb();} while(0) +#define DOC_IOREMAP_LEN 0x4000 +#else +#define ReadDOC_(adr, reg) readb((void __iomem *)(adr) + (reg)) +#define WriteDOC_(d, adr, reg) writeb(d, (void __iomem *)(adr) + (reg)) +#define DOC_IOREMAP_LEN 0x2000 + +#endif + +#if defined(__i386__) || defined(__x86_64__) +#define USE_MEMCPY +#endif + +/* These are provided to directly use the DoC_xxx defines */ +#define ReadDOC(adr, reg) ReadDOC_(adr,DoC_##reg) +#define WriteDOC(d, adr, reg) WriteDOC_(d,adr,DoC_##reg) + +#define DOC_MODE_RESET 0 +#define DOC_MODE_NORMAL 1 +#define DOC_MODE_RESERVED1 2 +#define DOC_MODE_RESERVED2 3 + +#define DOC_MODE_CLR_ERR 0x80 +#define DOC_MODE_RST_LAT 0x10 +#define DOC_MODE_BDECT 0x08 +#define DOC_MODE_MDWREN 0x04 + +#define DOC_ChipID_Doc2k 0x20 +#define DOC_ChipID_Doc2kTSOP 0x21 /* internal number for MTD */ +#define DOC_ChipID_DocMil 0x30 +#define DOC_ChipID_DocMilPlus32 0x40 +#define DOC_ChipID_DocMilPlus16 0x41 + +#define CDSN_CTRL_FR_B 0x80 +#define CDSN_CTRL_FR_B0 0x40 +#define CDSN_CTRL_FR_B1 0x80 + +#define CDSN_CTRL_ECC_IO 0x20 +#define CDSN_CTRL_FLASH_IO 0x10 +#define CDSN_CTRL_WP 0x08 +#define CDSN_CTRL_ALE 0x04 +#define CDSN_CTRL_CLE 0x02 +#define CDSN_CTRL_CE 0x01 + +#define DOC_ECC_RESET 0 +#define DOC_ECC_ERROR 0x80 +#define DOC_ECC_RW 0x20 +#define DOC_ECC__EN 0x08 +#define DOC_TOGGLE_BIT 0x04 +#define DOC_ECC_RESV 0x02 +#define DOC_ECC_IGNORE 0x01 + +#define DOC_FLASH_CE 0x80 +#define DOC_FLASH_WP 0x40 +#define DOC_FLASH_BANK 0x02 + +/* We have to also set the reserved bit 1 for enable */ +#define DOC_ECC_EN (DOC_ECC__EN | DOC_ECC_RESV) +#define DOC_ECC_DIS (DOC_ECC_RESV) + +struct Nand { + char floor, chip; + unsigned long curadr; + unsigned char curmode; + /* Also some erase/write/pipeline info when we get that far */ +}; + +#define MAX_FLOORS 4 +#define MAX_CHIPS 4 + +#define MAX_FLOORS_MIL 1 +#define MAX_CHIPS_MIL 1 + +#define MAX_FLOORS_MPLUS 2 +#define MAX_CHIPS_MPLUS 1 + +#define ADDR_COLUMN 1 +#define ADDR_PAGE 2 +#define ADDR_COLUMN_PAGE 3 + +struct DiskOnChip { + unsigned long physadr; + void __iomem *virtadr; + unsigned long totlen; + unsigned char ChipID; /* Type of DiskOnChip */ + int ioreg; + + unsigned long mfr; /* Flash IDs - only one type of flash per device */ + unsigned long id; + int chipshift; + char page256; + char pageadrlen; + char interleave; /* Internal interleaving - Millennium Plus style */ + unsigned long erasesize; + + int curfloor; + int curchip; + + int numchips; + struct Nand *chips; + struct mtd_info *nextdoc; +/* XXX U-BOOT XXX */ +#if 0 + struct mutex lock; +#endif +}; + +int doc_decode_ecc(unsigned char sector[512], unsigned char ecc1[6]); + +/* XXX U-BOOT XXX */ +#if 1 +/* + * NAND Flash Manufacturer ID Codes + */ +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_SAMSUNG 0xec +#endif + +#endif /* __MTD_DOC2000_H__ */ diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h new file mode 100644 index 0000000..666480d --- /dev/null +++ b/include/linux/mtd/flashchip.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright © 2000 Red Hat UK Limited + * Copyright © 2000-2010 David Woodhouse + * + */ + +#ifndef __MTD_FLASHCHIP_H__ +#define __MTD_FLASHCHIP_H__ + +#ifndef __UBOOT__ +/* For spinlocks. sched.h includes spinlock.h from whichever directory it + * happens to be in - so we don't have to care whether we're on 2.2, which + * has asm/spinlock.h, or 2.4, which has linux/spinlock.h + */ +#include +#include +#endif + +typedef enum { + FL_READY, + FL_STATUS, + FL_CFI_QUERY, + FL_JEDEC_QUERY, + FL_ERASING, + FL_ERASE_SUSPENDING, + FL_ERASE_SUSPENDED, + FL_WRITING, + FL_WRITING_TO_BUFFER, + FL_OTP_WRITE, + FL_WRITE_SUSPENDING, + FL_WRITE_SUSPENDED, + FL_PM_SUSPENDED, + FL_SYNCING, + FL_UNLOADING, + FL_LOCKING, + FL_UNLOCKING, + FL_POINT, + FL_XIP_WHILE_ERASING, + FL_XIP_WHILE_WRITING, + FL_SHUTDOWN, + /* These 2 come from nand_state_t, which has been unified here */ + FL_READING, + FL_CACHEDPRG, + /* These 4 come from onenand_state_t, which has been unified here */ + FL_RESETING, + FL_OTPING, + FL_PREPARING_ERASE, + FL_VERIFYING_ERASE, + + FL_UNKNOWN +} flstate_t; + + + +/* NOTE: confusingly, this can be used to refer to more than one chip at a time, + if they're interleaved. This can even refer to individual partitions on + the same physical chip when present. */ + +struct flchip { + unsigned long start; /* Offset within the map */ + // unsigned long len; + /* We omit len for now, because when we group them together + we insist that they're all of the same size, and the chip size + is held in the next level up. If we get more versatile later, + it'll make it a damn sight harder to find which chip we want from + a given offset, and we'll want to add the per-chip length field + back in. + */ + int ref_point_counter; + flstate_t state; + flstate_t oldstate; + + unsigned int write_suspended:1; + unsigned int erase_suspended:1; + unsigned long in_progress_block_addr; + + struct mutex mutex; +#ifndef __UBOOT__ + wait_queue_head_t wq; /* Wait on here when we're waiting for the chip + to be ready */ +#endif + int word_write_time; + int buffer_write_time; + int erase_time; + + int word_write_time_max; + int buffer_write_time_max; + int erase_time_max; + + void *priv; +}; + +/* This is used to handle contention on write/erase operations + between partitions of the same physical chip. */ +struct flchip_shared { + struct mutex lock; + struct flchip *writing; + struct flchip *erasing; +}; + + +#endif /* __MTD_FLASHCHIP_H__ */ diff --git a/include/linux/mtd/fsl_upm.h b/include/linux/mtd/fsl_upm.h new file mode 100644 index 0000000..9999993 --- /dev/null +++ b/include/linux/mtd/fsl_upm.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * FSL UPM NAND driver + * + * Copyright (C) 2007 MontaVista Software, Inc. + * Anton Vorontsov + */ + +#ifndef __LINUX_MTD_NAND_FSL_UPM +#define __LINUX_MTD_NAND_FSL_UPM + +#include + +#define FSL_UPM_WAIT_RUN_PATTERN 0x1 +#define FSL_UPM_WAIT_WRITE_BYTE 0x2 +#define FSL_UPM_WAIT_WRITE_BUFFER 0x4 + +struct fsl_upm { + void __iomem *mdr; + void __iomem *mxmr; + void __iomem *mar; + void __iomem *io_addr; +}; + +struct fsl_upm_nand { + struct fsl_upm upm; + + int width; + int upm_cmd_offset; + int upm_addr_offset; + int upm_mar_chip_offset; + int wait_flags; + int (*dev_ready)(int chip_nr); + int chip_delay; + int chip_offset; + int chip_nr; + + /* no need to fill */ + int last_ctrl; +}; + +extern int fsl_upm_nand_init(struct nand_chip *chip, struct fsl_upm_nand *fun); + +#endif diff --git a/include/linux/mtd/fsmc_nand.h b/include/linux/mtd/fsmc_nand.h new file mode 100644 index 0000000..6079f9e --- /dev/null +++ b/include/linux/mtd/fsmc_nand.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2010 + * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com. + */ + +#ifndef __FSMC_NAND_H__ +#define __FSMC_NAND_H__ + +#include + +struct fsmc_regs { + u32 ctrl; /* 0x00 */ + u8 reserved_1[0x40 - 0x04]; + u32 pc; /* 0x40 */ + u32 sts; /* 0x44 */ + u32 comm; /* 0x48 */ + u32 attrib; /* 0x4c */ + u32 ioata; /* 0x50 */ + u32 ecc1; /* 0x54 */ + u32 ecc2; /* 0x58 */ + u32 ecc3; /* 0x5c */ + u8 reserved_2[0xfe0 - 0x60]; + u32 peripid0; /* 0xfe0 */ + u32 peripid1; /* 0xfe4 */ + u32 peripid2; /* 0xfe8 */ + u32 peripid3; /* 0xfec */ + u32 pcellid0; /* 0xff0 */ + u32 pcellid1; /* 0xff4 */ + u32 pcellid2; /* 0xff8 */ + u32 pcellid3; /* 0xffc */ +}; + +/* ctrl register definitions */ +#define FSMC_WP (1 << 7) + +/* pc register definitions */ +#define FSMC_RESET (1 << 0) +#define FSMC_WAITON (1 << 1) +#define FSMC_ENABLE (1 << 2) +#define FSMC_DEVTYPE_NAND (1 << 3) +#define FSMC_DEVWID_8 (0 << 4) +#define FSMC_DEVWID_16 (1 << 4) +#define FSMC_ECCEN (1 << 6) +#define FSMC_ECCPLEN_512 (0 << 7) +#define FSMC_ECCPLEN_256 (1 << 7) +#define FSMC_TCLR_1 (1 << 9) +#define FSMC_TAR_1 (1 << 13) + +/* sts register definitions */ +#define FSMC_CODE_RDY (1 << 15) + +/* comm register definitions */ +#define FSMC_TSET_0 (0 << 0) +#define FSMC_TWAIT_6 (6 << 8) +#define FSMC_THOLD_4 (4 << 16) +#define FSMC_THIZ_1 (1 << 24) + +/* peripid2 register definitions */ +#define FSMC_REVISION_MSK (0xf) +#define FSMC_REVISION_SHFT (0x4) + +#define FSMC_VER8 0x8 + +/* + * There are 13 bytes of ecc for every 512 byte block and it has to be read + * consecutively and immediately after the 512 byte data block for hardware to + * generate the error bit offsets + * Managing the ecc bytes in the following way is easier. This way is similar to + * oobfree structure maintained already in u-boot nand driver + */ +#define FSMC_MAX_ECCPLACE_ENTRIES 32 + +struct fsmc_nand_eccplace { + u32 offset; + u32 length; +}; + +struct fsmc_eccplace { + struct fsmc_nand_eccplace eccplace[FSMC_MAX_ECCPLACE_ENTRIES]; +}; + +extern int fsmc_nand_init(struct nand_chip *nand); +#endif diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h new file mode 100644 index 0000000..e3549f0 --- /dev/null +++ b/include/linux/mtd/mtd.h @@ -0,0 +1,608 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright © 1999-2010 David Woodhouse et al. + * + */ + +#ifndef __MTD_MTD_H__ +#define __MTD_MTD_H__ + +#ifndef __UBOOT__ +#include +#include +#include +#include + +#include + +#include +#else +#include +#include +#include +#include +#include +#if IS_ENABLED(CONFIG_DM) +#include +#endif + +#define MAX_MTD_DEVICES 32 +#endif + +#define MTD_ERASE_PENDING 0x01 +#define MTD_ERASING 0x02 +#define MTD_ERASE_SUSPEND 0x04 +#define MTD_ERASE_DONE 0x08 +#define MTD_ERASE_FAILED 0x10 + +#define MTD_FAIL_ADDR_UNKNOWN -1LL + +/* + * If the erase fails, fail_addr might indicate exactly which block failed. If + * fail_addr = MTD_FAIL_ADDR_UNKNOWN, the failure was not at the device level + * or was not specific to any particular block. + */ +struct erase_info { + struct mtd_info *mtd; + uint64_t addr; + uint64_t len; + uint64_t fail_addr; + u_long time; + u_long retries; + unsigned dev; + unsigned cell; + void (*callback) (struct erase_info *self); + u_long priv; + u_char state; + struct erase_info *next; + int scrub; +}; + +struct mtd_erase_region_info { + uint64_t offset; /* At which this region starts, from the beginning of the MTD */ + uint32_t erasesize; /* For this region */ + uint32_t numblocks; /* Number of blocks of erasesize in this region */ + unsigned long *lockmap; /* If keeping bitmap of locks */ +}; + +/** + * struct mtd_oob_ops - oob operation operands + * @mode: operation mode + * + * @len: number of data bytes to write/read + * + * @retlen: number of data bytes written/read + * + * @ooblen: number of oob bytes to write/read + * @oobretlen: number of oob bytes written/read + * @ooboffs: offset of oob data in the oob area (only relevant when + * mode = MTD_OPS_PLACE_OOB or MTD_OPS_RAW) + * @datbuf: data buffer - if NULL only oob data are read/written + * @oobbuf: oob data buffer + */ +struct mtd_oob_ops { + unsigned int mode; + size_t len; + size_t retlen; + size_t ooblen; + size_t oobretlen; + uint32_t ooboffs; + uint8_t *datbuf; + uint8_t *oobbuf; +}; + +#ifdef CONFIG_SYS_NAND_MAX_OOBFREE +#define MTD_MAX_OOBFREE_ENTRIES_LARGE CONFIG_SYS_NAND_MAX_OOBFREE +#else +#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32 +#endif + +#ifdef CONFIG_SYS_NAND_MAX_ECCPOS +#define MTD_MAX_ECCPOS_ENTRIES_LARGE CONFIG_SYS_NAND_MAX_ECCPOS +#else +#define MTD_MAX_ECCPOS_ENTRIES_LARGE 680 +#endif +/** + * struct mtd_oob_region - oob region definition + * @offset: region offset + * @length: region length + * + * This structure describes a region of the OOB area, and is used + * to retrieve ECC or free bytes sections. + * Each section is defined by an offset within the OOB area and a + * length. + */ +struct mtd_oob_region { + u32 offset; + u32 length; +}; + +/* + * struct mtd_ooblayout_ops - NAND OOB layout operations + * @ecc: function returning an ECC region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * ECC sections. + * @free: function returning a free region in the OOB area. + * Should return -ERANGE if %section exceeds the total number of + * free sections. + */ +struct mtd_ooblayout_ops { + int (*ecc)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); + int (*free)(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +}; + +/* + * Internal ECC layout control structure. For historical reasons, there is a + * similar, smaller struct nand_ecclayout_user (in mtd-abi.h) that is retained + * for export to user-space via the ECCGETLAYOUT ioctl. + * nand_ecclayout should be expandable in the future simply by the above macros. + */ +struct nand_ecclayout { + __u32 eccbytes; + __u32 eccpos[MTD_MAX_ECCPOS_ENTRIES_LARGE]; + __u32 oobavail; + struct nand_oobfree oobfree[MTD_MAX_OOBFREE_ENTRIES_LARGE]; +}; + +struct module; /* only needed for owner field in mtd_info */ + +struct mtd_info { + u_char type; + uint32_t flags; + uint64_t size; // Total size of the MTD + + /* "Major" erase size for the device. Naïve users may take this + * to be the only erase size available, or may use the more detailed + * information below if they desire + */ + uint32_t erasesize; + /* Minimal writable flash unit size. In case of NOR flash it is 1 (even + * though individual bits can be cleared), in case of NAND flash it is + * one NAND page (or half, or one-fourths of it), in case of ECC-ed NOR + * it is of ECC block size, etc. It is illegal to have writesize = 0. + * Any driver registering a struct mtd_info must ensure a writesize of + * 1 or larger. + */ + uint32_t writesize; + + /* + * Size of the write buffer used by the MTD. MTD devices having a write + * buffer can write multiple writesize chunks at a time. E.g. while + * writing 4 * writesize bytes to a device with 2 * writesize bytes + * buffer the MTD driver can (but doesn't have to) do 2 writesize + * operations, but not 4. Currently, all NANDs have writebufsize + * equivalent to writesize (NAND page size). Some NOR flashes do have + * writebufsize greater than writesize. + */ + uint32_t writebufsize; + + uint32_t oobsize; // Amount of OOB data per block (e.g. 16) + uint32_t oobavail; // Available OOB bytes per block + + /* + * If erasesize is a power of 2 then the shift is stored in + * erasesize_shift otherwise erasesize_shift is zero. Ditto writesize. + */ + unsigned int erasesize_shift; + unsigned int writesize_shift; + /* Masks based on erasesize_shift and writesize_shift */ + unsigned int erasesize_mask; + unsigned int writesize_mask; + + /* + * read ops return -EUCLEAN if max number of bitflips corrected on any + * one region comprising an ecc step equals or exceeds this value. + * Settable by driver, else defaults to ecc_strength. User can override + * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed; + * see Documentation/ABI/testing/sysfs-class-mtd for more detail. + */ + unsigned int bitflip_threshold; + + // Kernel-only stuff starts here. +#ifndef __UBOOT__ + const char *name; +#else + char *name; +#endif + int index; + + /* OOB layout description */ + const struct mtd_ooblayout_ops *ooblayout; + + /* ECC layout structure pointer - read only! */ + struct nand_ecclayout *ecclayout; + + /* the ecc step size. */ + unsigned int ecc_step_size; + + /* max number of correctible bit errors per ecc step */ + unsigned int ecc_strength; + + /* Data for variable erase regions. If numeraseregions is zero, + * it means that the whole device has erasesize as given above. + */ + int numeraseregions; + struct mtd_erase_region_info *eraseregions; + + /* + * Do not call via these pointers, use corresponding mtd_*() + * wrappers instead. + */ + int (*_erase) (struct mtd_info *mtd, struct erase_info *instr); +#ifndef __UBOOT__ + int (*_point) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, void **virt, resource_size_t *phys); + int (*_unpoint) (struct mtd_info *mtd, loff_t from, size_t len); +#endif + unsigned long (*_get_unmapped_area) (struct mtd_info *mtd, + unsigned long len, + unsigned long offset, + unsigned long flags); + int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); + int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const u_char *buf); + int (*_read_oob) (struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); + int (*_write_oob) (struct mtd_info *mtd, loff_t to, + struct mtd_oob_ops *ops); + int (*_get_fact_prot_info) (struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf); + int (*_read_fact_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*_get_user_prot_info) (struct mtd_info *mtd, size_t len, + size_t *retlen, struct otp_info *buf); + int (*_read_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len, size_t *retlen, u_char *buf); + int (*_write_user_prot_reg) (struct mtd_info *mtd, loff_t to, + size_t len, size_t *retlen, u_char *buf); + int (*_lock_user_prot_reg) (struct mtd_info *mtd, loff_t from, + size_t len); +#ifndef __UBOOT__ + int (*_writev) (struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); +#endif + void (*_sync) (struct mtd_info *mtd); + int (*_lock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_unlock) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_is_locked) (struct mtd_info *mtd, loff_t ofs, uint64_t len); + int (*_block_isreserved) (struct mtd_info *mtd, loff_t ofs); + int (*_block_isbad) (struct mtd_info *mtd, loff_t ofs); + int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); +#ifndef __UBOOT__ + int (*_suspend) (struct mtd_info *mtd); + void (*_resume) (struct mtd_info *mtd); + void (*_reboot) (struct mtd_info *mtd); +#endif + /* + * If the driver is something smart, like UBI, it may need to maintain + * its own reference counting. The below functions are only for driver. + */ + int (*_get_device) (struct mtd_info *mtd); + void (*_put_device) (struct mtd_info *mtd); + +#ifndef __UBOOT__ + /* Backing device capabilities for this device + * - provides mmap capabilities + */ + struct backing_dev_info *backing_dev_info; + + struct notifier_block reboot_notifier; /* default mode before reboot */ +#endif + + /* ECC status information */ + struct mtd_ecc_stats ecc_stats; + /* Subpage shift (NAND) */ + int subpage_sft; + + void *priv; + + struct module *owner; +#ifndef __UBOOT__ + struct device dev; +#else + struct udevice *dev; +#endif + int usecount; + + /* MTD devices do not have any parent. MTD partitions do. */ + struct mtd_info *parent; + + /* + * Offset of the partition relatively to the parent offset. + * Is 0 for real MTD devices (ie. not partitions). + */ + u64 offset; + + /* + * List node used to add an MTD partition to the parent + * partition list. + */ + struct list_head node; + + /* + * List of partitions attached to this MTD device (the parent + * MTD device can itself be a partition). + */ + struct list_head partitions; +}; + +#if IS_ENABLED(CONFIG_DM) +static inline void mtd_set_of_node(struct mtd_info *mtd, + const struct device_node *np) +{ + mtd->dev->node.np = np; +} + +static inline const struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return mtd->dev->node.np; +} +#else +struct device_node; + +static inline void mtd_set_of_node(struct mtd_info *mtd, + const struct device_node *np) +{ +} + +static inline const struct device_node *mtd_get_of_node(struct mtd_info *mtd) +{ + return NULL; +} +#endif + +static inline bool mtd_is_partition(const struct mtd_info *mtd) +{ + return mtd->parent; +} + +static inline bool mtd_has_partitions(const struct mtd_info *mtd) +{ + return !list_empty(&mtd->partitions); +} + +bool mtd_partitions_used(struct mtd_info *master); + +int mtd_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobecc); +int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte, + int *section, + struct mtd_oob_region *oobregion); +int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobfree); +int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf, + const u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf, + u8 *oobbuf, int start, int nbytes); +int mtd_ooblayout_count_freebytes(struct mtd_info *mtd); +int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd); + +static inline void mtd_set_ooblayout(struct mtd_info *mtd, + const struct mtd_ooblayout_ops *ooblayout) +{ + mtd->ooblayout = ooblayout; +} + +static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops) +{ + return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize; +} + +int mtd_erase(struct mtd_info *mtd, struct erase_info *instr); +#ifndef __UBOOT__ +int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + void **virt, resource_size_t *phys); +int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len); +#endif +unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len, + unsigned long offset, unsigned long flags); +int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, + u_char *buf); +int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf); +int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, + const u_char *buf); + +int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops); +int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops); + +int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, + struct otp_info *buf); +int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); +int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, + struct otp_info *buf); +int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, u_char *buf); +int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, u_char *buf); +int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len); + +#ifndef __UBOOT__ +int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs, + unsigned long count, loff_t to, size_t *retlen); +#endif + +static inline void mtd_sync(struct mtd_info *mtd) +{ + if (mtd->_sync) + mtd->_sync(mtd); +} + +int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len); +int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs); +int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs); +int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs); + +#ifndef __UBOOT__ +static inline int mtd_suspend(struct mtd_info *mtd) +{ + return mtd->_suspend ? mtd->_suspend(mtd) : 0; +} + +static inline void mtd_resume(struct mtd_info *mtd) +{ + if (mtd->_resume) + mtd->_resume(mtd); +} +#endif + +static inline uint32_t mtd_div_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz >> mtd->erasesize_shift; + do_div(sz, mtd->erasesize); + return sz; +} + +static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->erasesize_shift) + return sz & mtd->erasesize_mask; + return do_div(sz, mtd->erasesize); +} + +static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz >> mtd->writesize_shift; + do_div(sz, mtd->writesize); + return sz; +} + +static inline uint32_t mtd_mod_by_ws(uint64_t sz, struct mtd_info *mtd) +{ + if (mtd->writesize_shift) + return sz & mtd->writesize_mask; + return do_div(sz, mtd->writesize); +} + +static inline int mtd_has_oob(const struct mtd_info *mtd) +{ + return mtd->_read_oob && mtd->_write_oob; +} + +static inline int mtd_type_is_nand(const struct mtd_info *mtd) +{ + return mtd->type == MTD_NANDFLASH || mtd->type == MTD_MLCNANDFLASH; +} + +static inline int mtd_can_have_bb(const struct mtd_info *mtd) +{ + return !!mtd->_block_isbad; +} + + /* Kernel-side ioctl definitions */ + +struct mtd_partition; +struct mtd_part_parser_data; + +extern int mtd_device_parse_register(struct mtd_info *mtd, + const char * const *part_probe_types, + struct mtd_part_parser_data *parser_data, + const struct mtd_partition *defparts, + int defnr_parts); +#define mtd_device_register(master, parts, nr_parts) \ + mtd_device_parse_register(master, NULL, NULL, parts, nr_parts) +extern int mtd_device_unregister(struct mtd_info *master); +extern struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num); +extern int __get_mtd_device(struct mtd_info *mtd); +extern void __put_mtd_device(struct mtd_info *mtd); +extern struct mtd_info *get_mtd_device_nm(const char *name); +extern void put_mtd_device(struct mtd_info *mtd); + + +#ifndef __UBOOT__ +struct mtd_notifier { + void (*add)(struct mtd_info *mtd); + void (*remove)(struct mtd_info *mtd); + struct list_head list; +}; + + +extern void register_mtd_user (struct mtd_notifier *new); +extern int unregister_mtd_user (struct mtd_notifier *old); +#endif +void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size); + +#ifdef CONFIG_MTD_PARTITIONS +void mtd_erase_callback(struct erase_info *instr); +#else +static inline void mtd_erase_callback(struct erase_info *instr) +{ + if (instr->callback) + instr->callback(instr); +} +#endif + +static inline int mtd_is_bitflip(int err) { + return err == -EUCLEAN; +} + +static inline int mtd_is_eccerr(int err) { + return err == -EBADMSG; +} + +static inline int mtd_is_bitflip_or_eccerr(int err) { + return mtd_is_bitflip(err) || mtd_is_eccerr(err); +} + +unsigned mtd_mmap_capabilities(struct mtd_info *mtd); + +#ifdef __UBOOT__ +/* drivers/mtd/mtdcore.h */ +int add_mtd_device(struct mtd_info *mtd); +int del_mtd_device(struct mtd_info *mtd); + +#ifdef CONFIG_MTD_PARTITIONS +int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); +int del_mtd_partitions(struct mtd_info *); +#else +static inline int add_mtd_partitions(struct mtd_info *mtd, + const struct mtd_partition *parts, + int nparts) +{ + return 0; +} + +static inline int del_mtd_partitions(struct mtd_info *mtd) +{ + return 0; +} +#endif + +struct mtd_info *__mtd_next_device(int i); +#define mtd_for_each_device(mtd) \ + for ((mtd) = __mtd_next_device(0); \ + (mtd) != NULL; \ + (mtd) = __mtd_next_device(mtd->index + 1)) + +int mtd_arg_off(const char *arg, int *idx, loff_t *off, loff_t *size, + loff_t *maxsize, int devtype, uint64_t chipsize); +int mtd_arg_off_size(int argc, char *const argv[], int *idx, loff_t *off, + loff_t *size, loff_t *maxsize, int devtype, + uint64_t chipsize); + +/* drivers/mtd/mtdcore.c */ +void mtd_get_len_incl_bad(struct mtd_info *mtd, uint64_t offset, + const uint64_t length, uint64_t *len_incl_bad, + int *truncated); +bool mtd_dev_list_updated(void); + +/* drivers/mtd/mtd_uboot.c */ +int mtd_search_alternate_name(const char *mtdname, char *altname, + unsigned int max_len); + +#endif +#endif /* __MTD_MTD_H__ */ diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h new file mode 100644 index 0000000..13e8dd1 --- /dev/null +++ b/include/linux/mtd/nand.h @@ -0,0 +1,734 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2017 - Free Electrons + * + * Authors: + * Boris Brezillon + * Peter Pan + */ + +#ifndef __LINUX_MTD_NAND_H +#define __LINUX_MTD_NAND_H + +#include + +/** + * struct nand_memory_organization - Memory organization structure + * @bits_per_cell: number of bits per NAND cell + * @pagesize: page size + * @oobsize: OOB area size + * @pages_per_eraseblock: number of pages per eraseblock + * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number) + * @planes_per_lun: number of planes per LUN + * @luns_per_target: number of LUN per target (target is a synonym for die) + * @ntargets: total number of targets exposed by the NAND device + */ +struct nand_memory_organization { + unsigned int bits_per_cell; + unsigned int pagesize; + unsigned int oobsize; + unsigned int pages_per_eraseblock; + unsigned int eraseblocks_per_lun; + unsigned int planes_per_lun; + unsigned int luns_per_target; + unsigned int ntargets; +}; + +#define NAND_MEMORG(bpc, ps, os, ppe, epl, ppl, lpt, nt) \ + { \ + .bits_per_cell = (bpc), \ + .pagesize = (ps), \ + .oobsize = (os), \ + .pages_per_eraseblock = (ppe), \ + .eraseblocks_per_lun = (epl), \ + .planes_per_lun = (ppl), \ + .luns_per_target = (lpt), \ + .ntargets = (nt), \ + } + +/** + * struct nand_row_converter - Information needed to convert an absolute offset + * into a row address + * @lun_addr_shift: position of the LUN identifier in the row address + * @eraseblock_addr_shift: position of the eraseblock identifier in the row + * address + */ +struct nand_row_converter { + unsigned int lun_addr_shift; + unsigned int eraseblock_addr_shift; +}; + +/** + * struct nand_pos - NAND position object + * @target: the NAND target/die + * @lun: the LUN identifier + * @plane: the plane within the LUN + * @eraseblock: the eraseblock within the LUN + * @page: the page within the LUN + * + * These information are usually used by specific sub-layers to select the + * appropriate target/die and generate a row address to pass to the device. + */ +struct nand_pos { + unsigned int target; + unsigned int lun; + unsigned int plane; + unsigned int eraseblock; + unsigned int page; +}; + +/** + * struct nand_page_io_req - NAND I/O request object + * @pos: the position this I/O request is targeting + * @dataoffs: the offset within the page + * @datalen: number of data bytes to read from/write to this page + * @databuf: buffer to store data in or get data from + * @ooboffs: the OOB offset within the page + * @ooblen: the number of OOB bytes to read from/write to this page + * @oobbuf: buffer to store OOB data in or get OOB data from + * @mode: one of the %MTD_OPS_XXX mode + * + * This object is used to pass per-page I/O requests to NAND sub-layers. This + * way all useful information are already formatted in a useful way and + * specific NAND layers can focus on translating these information into + * specific commands/operations. + */ +struct nand_page_io_req { + struct nand_pos pos; + unsigned int dataoffs; + unsigned int datalen; + union { + const void *out; + void *in; + } databuf; + unsigned int ooboffs; + unsigned int ooblen; + union { + const void *out; + void *in; + } oobbuf; + int mode; +}; + +/** + * struct nand_ecc_req - NAND ECC requirements + * @strength: ECC strength + * @step_size: ECC step/block size + */ +struct nand_ecc_req { + unsigned int strength; + unsigned int step_size; +}; + +#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) } + +/** + * struct nand_bbt - bad block table object + * @cache: in memory BBT cache + */ +struct nand_bbt { + unsigned long *cache; +}; + +struct nand_device; + +/** + * struct nand_ops - NAND operations + * @erase: erase a specific block. No need to check if the block is bad before + * erasing, this has been taken care of by the generic NAND layer + * @markbad: mark a specific block bad. No need to check if the block is + * already marked bad, this has been taken care of by the generic + * NAND layer. This method should just write the BBM (Bad Block + * Marker) so that future call to struct_nand_ops->isbad() return + * true + * @isbad: check whether a block is bad or not. This method should just read + * the BBM and return whether the block is bad or not based on what it + * reads + * + * These are all low level operations that should be implemented by specialized + * NAND layers (SPI NAND, raw NAND, ...). + */ +struct nand_ops { + int (*erase)(struct nand_device *nand, const struct nand_pos *pos); + int (*markbad)(struct nand_device *nand, const struct nand_pos *pos); + bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos); +}; + +/** + * struct nand_device - NAND device + * @mtd: MTD instance attached to the NAND device + * @memorg: memory layout + * @eccreq: ECC requirements + * @rowconv: position to row address converter + * @bbt: bad block table info + * @ops: NAND operations attached to the NAND device + * + * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND) + * should declare their own NAND object embedding a nand_device struct (that's + * how inheritance is done). + * struct_nand_device->memorg and struct_nand_device->eccreq should be filled + * at device detection time to reflect the NAND device + * capabilities/requirements. Once this is done nanddev_init() can be called. + * It will take care of converting NAND information into MTD ones, which means + * the specialized NAND layers should never manually tweak + * struct_nand_device->mtd except for the ->_read/write() hooks. + */ +struct nand_device { + struct mtd_info *mtd; + struct nand_memory_organization memorg; + struct nand_ecc_req eccreq; + struct nand_row_converter rowconv; + struct nand_bbt bbt; + const struct nand_ops *ops; +}; + +/** + * struct nand_io_iter - NAND I/O iterator + * @req: current I/O request + * @oobbytes_per_page: maximum number of OOB bytes per page + * @dataleft: remaining number of data bytes to read/write + * @oobleft: remaining number of OOB bytes to read/write + * + * Can be used by specialized NAND layers to iterate over all pages covered + * by an MTD I/O request, which should greatly simplifies the boiler-plate + * code needed to read/write data from/to a NAND device. + */ +struct nand_io_iter { + struct nand_page_io_req req; + unsigned int oobbytes_per_page; + unsigned int dataleft; + unsigned int oobleft; +}; + +/** + * mtd_to_nanddev() - Get the NAND device attached to the MTD instance + * @mtd: MTD instance + * + * Return: the NAND device embedding @mtd. + */ +static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd) +{ + return mtd->priv; +} + +/** + * nanddev_to_mtd() - Get the MTD device attached to a NAND device + * @nand: NAND device + * + * Return: the MTD device embedded in @nand. + */ +static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand) +{ + return nand->mtd; +} + +/* + * nanddev_bits_per_cell() - Get the number of bits per cell + * @nand: NAND device + * + * Return: the number of bits per cell. + */ +static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand) +{ + return nand->memorg.bits_per_cell; +} + +/** + * nanddev_page_size() - Get NAND page size + * @nand: NAND device + * + * Return: the page size. + */ +static inline size_t nanddev_page_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize; +} + +/** + * nanddev_per_page_oobsize() - Get NAND OOB size + * @nand: NAND device + * + * Return: the OOB size. + */ +static inline unsigned int +nanddev_per_page_oobsize(const struct nand_device *nand) +{ + return nand->memorg.oobsize; +} + +/** + * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock + * @nand: NAND device + * + * Return: the number of pages per eraseblock. + */ +static inline unsigned int +nanddev_pages_per_eraseblock(const struct nand_device *nand) +{ + return nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_per_page_oobsize() - Get NAND erase block size + * @nand: NAND device + * + * Return: the eraseblock size. + */ +static inline size_t nanddev_eraseblock_size(const struct nand_device *nand) +{ + return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN + * @nand: NAND device + * + * Return: the number of eraseblocks per LUN. + */ +static inline unsigned int +nanddev_eraseblocks_per_lun(const struct nand_device *nand) +{ + return nand->memorg.eraseblocks_per_lun; +} + +/** + * nanddev_target_size() - Get the total size provided by a single target/die + * @nand: NAND device + * + * Return: the total size exposed by a single target/die in bytes. + */ +static inline u64 nanddev_target_size(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock * + nand->memorg.pagesize; +} + +/** + * nanddev_ntarget() - Get the total of targets + * @nand: NAND device + * + * Return: the number of targets/dies exposed by @nand. + */ +static inline unsigned int nanddev_ntargets(const struct nand_device *nand) +{ + return nand->memorg.ntargets; +} + +/** + * nanddev_neraseblocks() - Get the total number of erasablocks + * @nand: NAND device + * + * Return: the total number of eraseblocks exposed by @nand. + */ +static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand) +{ + return (u64)nand->memorg.luns_per_target * + nand->memorg.eraseblocks_per_lun * + nand->memorg.pages_per_eraseblock; +} + +/** + * nanddev_size() - Get NAND size + * @nand: NAND device + * + * Return: the total size (in bytes) exposed by @nand. + */ +static inline u64 nanddev_size(const struct nand_device *nand) +{ + return nanddev_target_size(nand) * nanddev_ntargets(nand); +} + +/** + * nanddev_get_memorg() - Extract memory organization info from a NAND device + * @nand: NAND device + * + * This can be used by the upper layer to fill the memorg info before calling + * nanddev_init(). + * + * Return: the memorg object embedded in the NAND device. + */ +static inline struct nand_memory_organization * +nanddev_get_memorg(struct nand_device *nand) +{ + return &nand->memorg; +} + +int nanddev_init(struct nand_device *nand, const struct nand_ops *ops, + struct module *owner); +void nanddev_cleanup(struct nand_device *nand); + +/** + * nanddev_register() - Register a NAND device + * @nand: NAND device + * + * Register a NAND device. + * This function is just a wrapper around mtd_device_register() + * registering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_register(struct nand_device *nand) +{ + return mtd_device_register(nand->mtd, NULL, 0); +} + +/** + * nanddev_unregister() - Unregister a NAND device + * @nand: NAND device + * + * Unregister a NAND device. + * This function is just a wrapper around mtd_device_unregister() + * unregistering the MTD device embedded in @nand. + * + * Return: 0 in case of success, a negative error code otherwise. + */ +static inline int nanddev_unregister(struct nand_device *nand) +{ + return mtd_device_unregister(nand->mtd); +} + +/** + * nanddev_set_of_node() - Attach a DT node to a NAND device + * @nand: NAND device + * @np: DT node + * + * Attach a DT node to a NAND device. + */ +static inline void nanddev_set_of_node(struct nand_device *nand, + const struct device_node *np) +{ + mtd_set_of_node(nand->mtd, np); +} + +/** + * nanddev_get_of_node() - Retrieve the DT node attached to a NAND device + * @nand: NAND device + * + * Return: the DT node attached to @nand. + */ +static inline const struct device_node *nanddev_get_of_node(struct nand_device *nand) +{ + return mtd_get_of_node(nand->mtd); +} + +/** + * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position + * @nand: NAND device + * @offs: absolute NAND offset (usually passed by the MTD layer) + * @pos: a NAND position object to fill in + * + * Converts @offs into a nand_pos representation. + * + * Return: the offset within the NAND page pointed by @pos. + */ +static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand, + loff_t offs, + struct nand_pos *pos) +{ + unsigned int pageoffs; + u64 tmp = offs; + + pageoffs = do_div(tmp, nand->memorg.pagesize); + pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock); + pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun); + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; + pos->lun = do_div(tmp, nand->memorg.luns_per_target); + pos->target = tmp; + + return pageoffs; +} + +/** + * nanddev_pos_cmp() - Compare two NAND positions + * @a: First NAND position + * @b: Second NAND position + * + * Compares two NAND positions. + * + * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b. + */ +static inline int nanddev_pos_cmp(const struct nand_pos *a, + const struct nand_pos *b) +{ + if (a->target != b->target) + return a->target < b->target ? -1 : 1; + + if (a->lun != b->lun) + return a->lun < b->lun ? -1 : 1; + + if (a->eraseblock != b->eraseblock) + return a->eraseblock < b->eraseblock ? -1 : 1; + + if (a->page != b->page) + return a->page < b->page ? -1 : 1; + + return 0; +} + +/** + * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset + * @nand: NAND device + * @pos: the NAND position to convert + * + * Converts @pos NAND position into an absolute offset. + * + * Return: the absolute offset. Note that @pos points to the beginning of a + * page, if one wants to point to a specific offset within this page + * the returned offset has to be adjusted manually. + */ +static inline loff_t nanddev_pos_to_offs(struct nand_device *nand, + const struct nand_pos *pos) +{ + unsigned int npages; + + npages = pos->page + + ((pos->eraseblock + + (pos->lun + + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun) * + nand->memorg.pages_per_eraseblock); + + return (loff_t)npages * nand->memorg.pagesize; +} + +/** + * nanddev_pos_to_row() - Extract a row address from a NAND position + * @nand: NAND device + * @pos: the position to convert + * + * Converts a NAND position into a row address that can then be passed to the + * device. + * + * Return: the row address extracted from @pos. + */ +static inline unsigned int nanddev_pos_to_row(struct nand_device *nand, + const struct nand_pos *pos) +{ + return (pos->lun << nand->rowconv.lun_addr_shift) | + (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) | + pos->page; +} + +/** + * nanddev_pos_next_target() - Move a position to the next target/die + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next target/die. Useful when you + * want to iterate over all targets/dies of a NAND device. + */ +static inline void nanddev_pos_next_target(struct nand_device *nand, + struct nand_pos *pos) +{ + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; + pos->lun = 0; + pos->target++; +} + +/** + * nanddev_pos_next_lun() - Move a position to the next LUN + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next LUN. Useful when you want to + * iterate over all LUNs of a NAND device. + */ +static inline void nanddev_pos_next_lun(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->lun >= nand->memorg.luns_per_target - 1) + return nanddev_pos_next_target(nand, pos); + + pos->lun++; + pos->page = 0; + pos->plane = 0; + pos->eraseblock = 0; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next eraseblock. Useful when you + * want to iterate over all eraseblocks of a NAND device. + */ +static inline void nanddev_pos_next_eraseblock(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1) + return nanddev_pos_next_lun(nand, pos); + + pos->eraseblock++; + pos->page = 0; + pos->plane = pos->eraseblock % nand->memorg.planes_per_lun; +} + +/** + * nanddev_pos_next_eraseblock() - Move a position to the next page + * @nand: NAND device + * @pos: the position to update + * + * Updates @pos to point to the start of the next page. Useful when you want to + * iterate over all pages of a NAND device. + */ +static inline void nanddev_pos_next_page(struct nand_device *nand, + struct nand_pos *pos) +{ + if (pos->page >= nand->memorg.pages_per_eraseblock - 1) + return nanddev_pos_next_eraseblock(nand, pos); + + pos->page++; +} + +/** + * nand_io_iter_init - Initialize a NAND I/O iterator + * @nand: NAND device + * @offs: absolute offset + * @req: MTD request + * @iter: NAND I/O iterator + * + * Initializes a NAND iterator based on the information passed by the MTD + * layer. + */ +static inline void nanddev_io_iter_init(struct nand_device *nand, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) +{ + struct mtd_info *mtd = nanddev_to_mtd(nand); + + iter->req.mode = req->mode; + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); + iter->req.ooboffs = req->ooboffs; + iter->oobbytes_per_page = mtd_oobavail(mtd, req); + iter->dataleft = req->len; + iter->oobleft = req->ooblen; + iter->req.databuf.in = req->datbuf; + iter->req.datalen = min_t(unsigned int, + nand->memorg.pagesize - iter->req.dataoffs, + iter->dataleft); + iter->req.oobbuf.in = req->oobbuf; + iter->req.ooblen = min_t(unsigned int, + iter->oobbytes_per_page - iter->req.ooboffs, + iter->oobleft); +} + +/** + * nand_io_iter_next_page - Move to the next page + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Updates the @iter to point to the next page. + */ +static inline void nanddev_io_iter_next_page(struct nand_device *nand, + struct nand_io_iter *iter) +{ + nanddev_pos_next_page(nand, &iter->req.pos); + iter->dataleft -= iter->req.datalen; + iter->req.databuf.in += iter->req.datalen; + iter->oobleft -= iter->req.ooblen; + iter->req.oobbuf.in += iter->req.ooblen; + iter->req.dataoffs = 0; + iter->req.ooboffs = 0; + iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize, + iter->dataleft); + iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page, + iter->oobleft); +} + +/** + * nand_io_iter_end - Should end iteration or not + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Check whether @iter has reached the end of the NAND portion it was asked to + * iterate on or not. + * + * Return: true if @iter has reached the end of the iteration request, false + * otherwise. + */ +static inline bool nanddev_io_iter_end(struct nand_device *nand, + const struct nand_io_iter *iter) +{ + if (iter->dataleft || iter->oobleft) + return false; + + return true; +} + +/** + * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O + * request + * @nand: NAND device + * @start: start address to read/write from + * @req: MTD I/O request + * @iter: NAND I/O iterator + * + * Should be used for iterate over pages that are contained in an MTD request. + */ +#define nanddev_io_for_each_page(nand, start, req, iter) \ + for (nanddev_io_iter_init(nand, start, req, iter); \ + !nanddev_io_iter_end(nand, iter); \ + nanddev_io_iter_next_page(nand, iter)) + +bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); +bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos); +int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); + +/* BBT related functions */ +enum nand_bbt_block_status { + NAND_BBT_BLOCK_STATUS_UNKNOWN, + NAND_BBT_BLOCK_GOOD, + NAND_BBT_BLOCK_WORN, + NAND_BBT_BLOCK_RESERVED, + NAND_BBT_BLOCK_FACTORY_BAD, + NAND_BBT_BLOCK_NUM_STATUS, +}; + +int nanddev_bbt_init(struct nand_device *nand); +void nanddev_bbt_cleanup(struct nand_device *nand); +int nanddev_bbt_update(struct nand_device *nand); +int nanddev_bbt_get_block_status(const struct nand_device *nand, + unsigned int entry); +int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry, + enum nand_bbt_block_status status); +int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block); + +/** + * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry + * @nand: NAND device + * @pos: the NAND position we want to get BBT entry for + * + * Return the BBT entry used to store information about the eraseblock pointed + * by @pos. + * + * Return: the BBT entry storing information about eraseblock pointed by @pos. + */ +static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand, + const struct nand_pos *pos) +{ + return pos->eraseblock + + ((pos->lun + (pos->target * nand->memorg.luns_per_target)) * + nand->memorg.eraseblocks_per_lun); +} + +/** + * nanddev_bbt_is_initialized() - Check if the BBT has been initialized + * @nand: NAND device + * + * Return: true if the BBT has been initialized, false otherwise. + */ +static inline bool nanddev_bbt_is_initialized(struct nand_device *nand) +{ + return !!nand->bbt.cache; +} + +/* MTD -> NAND helper functions. */ +int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo); + +#endif /* __LINUX_MTD_NAND_H */ diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h new file mode 100644 index 0000000..8ea6b04 --- /dev/null +++ b/include/linux/mtd/nand_bch.h @@ -0,0 +1,68 @@ +/* + * Copyright © 2011 Ivan Djelic + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This file is the header for the NAND BCH ECC implementation. + */ + +#ifndef __MTD_NAND_BCH_H__ +#define __MTD_NAND_BCH_H__ + +struct mtd_info; +struct nand_bch_control; + +#if defined(CONFIG_NAND_ECC_BCH) + +static inline int mtd_nand_has_bch(void) { return 1; } + +/* + * Calculate BCH ecc code + */ +int nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code); + +/* + * Detect and correct bit errors + */ +int nand_bch_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, + u_char *calc_ecc); +/* + * Initialize BCH encoder/decoder + */ +struct nand_bch_control *nand_bch_init(struct mtd_info *mtd); +/* + * Release BCH encoder/decoder resources + */ +void nand_bch_free(struct nand_bch_control *nbc); + +#else /* !CONFIG_NAND_ECC_BCH */ + +static inline int mtd_nand_has_bch(void) { return 0; } + +static inline int +nand_bch_calculate_ecc(struct mtd_info *mtd, const u_char *dat, + u_char *ecc_code) +{ + return -1; +} + +static inline int +nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf, + unsigned char *read_ecc, unsigned char *calc_ecc) +{ + return -ENOTSUPP; +} + +static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd) +{ + return NULL; +} + +static inline void nand_bch_free(struct nand_bch_control *nbc) {} + +#endif /* CONFIG_NAND_ECC_BCH */ + +#endif /* __MTD_NAND_BCH_H__ */ diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h new file mode 100644 index 0000000..3978363 --- /dev/null +++ b/include/linux/mtd/nand_ecc.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * drivers/mtd/nand_ecc.h + * + * Copyright (C) 2000-2010 Steven J. Hill + * David Woodhouse + * Thomas Gleixner + * + * This file is the header for the ECC algorithm. + */ + +#ifndef __MTD_NAND_ECC_H__ +#define __MTD_NAND_ECC_H__ + +struct mtd_info; + +/* + * Calculate 3 byte ECC code for 256 byte block + */ +int nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code); + +/* + * Detect and correct a 1 bit error for 256 byte block + */ +int nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); + +#endif /* __MTD_NAND_ECC_H__ */ diff --git a/include/linux/mtd/ndfc.h b/include/linux/mtd/ndfc.h new file mode 100644 index 0000000..d0558a9 --- /dev/null +++ b/include/linux/mtd/ndfc.h @@ -0,0 +1,67 @@ +/* + * linux/include/linux/mtd/ndfc.h + * + * Copyright (c) 2006 Thomas Gleixner + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Info: + * Contains defines, datastructures for ndfc nand controller + * + */ +#ifndef __LINUX_MTD_NDFC_H +#define __LINUX_MTD_NDFC_H + +/* NDFC Register definitions */ +#define NDFC_CMD 0x00 +#define NDFC_ALE 0x04 +#define NDFC_DATA 0x08 +#define NDFC_ECC 0x10 +#define NDFC_BCFG0 0x30 +#define NDFC_BCFG1 0x34 +#define NDFC_BCFG2 0x38 +#define NDFC_BCFG3 0x3c +#define NDFC_CCR 0x40 +#define NDFC_STAT 0x44 +#define NDFC_HWCTL 0x48 +#define NDFC_REVID 0x50 + +#define NDFC_STAT_IS_READY 0x01000000 + +#define NDFC_CCR_RESET_CE 0x80000000 /* CE Reset */ +#define NDFC_CCR_RESET_ECC 0x40000000 /* ECC Reset */ +#define NDFC_CCR_RIE 0x20000000 /* Interrupt Enable on Device Rdy */ +#define NDFC_CCR_REN 0x10000000 /* Enable wait for Rdy in LinearR */ +#define NDFC_CCR_ROMEN 0x08000000 /* Enable ROM In LinearR */ +#define NDFC_CCR_ARE 0x04000000 /* Auto-Read Enable */ +#define NDFC_CCR_BS(x) (((x) & 0x3) << 24) /* Select Bank on CE[x] */ +#define NDFC_CCR_BS_MASK 0x03000000 /* Select Bank */ +#define NDFC_CCR_ARAC0 0x00000000 /* 3 Addr, 1 Col 2 Row 512b page */ +#define NDFC_CCR_ARAC1 0x00001000 /* 4 Addr, 1 Col 3 Row 512b page */ +#define NDFC_CCR_ARAC2 0x00002000 /* 4 Addr, 2 Col 2 Row 2K page */ +#define NDFC_CCR_ARAC3 0x00003000 /* 5 Addr, 2 Col 3 Row 2K page */ +#define NDFC_CCR_ARAC_MASK 0x00003000 /* Auto-Read mode Addr Cycles */ +#define NDFC_CCR_RPG 0x0000C000 /* Auto-Read Page */ +#define NDFC_CCR_EBCC 0x00000004 /* EBC Configuration Completed */ +#define NDFC_CCR_DHC 0x00000002 /* Direct Hardware Control Enable */ + +#define NDFC_BxCFG_EN 0x80000000 /* Bank Enable */ +#define NDFC_BxCFG_CED 0x40000000 /* nCE Style */ +#define NDFC_BxCFG_SZ_MASK 0x08000000 /* Bank Size */ +#define NDFC_BxCFG_SZ_8BIT 0x00000000 /* 8bit */ +#define NDFC_BxCFG_SZ_16BIT 0x08000000 /* 16bit */ + +#define NDFC_MAX_BANKS 4 + +struct ndfc_controller_settings { + uint32_t ccr_settings; + uint64_t ndfc_erpn; +}; + +struct ndfc_chip_settings { + uint32_t bank_settings; +}; + +#endif diff --git a/include/linux/mtd/omap_elm.h b/include/linux/mtd/omap_elm.h new file mode 100644 index 0000000..f3db00d --- /dev/null +++ b/include/linux/mtd/omap_elm.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2010-2011 Texas Instruments, + * Mansoor Ahamed + * + * Derived from work done by Rohit Choraria for omap3 + */ +#ifndef __ASM_ARCH_ELM_H +#define __ASM_ARCH_ELM_H +/* + * ELM Module Registers + */ + +/* ELM registers bit fields */ +#define ELM_SYSCONFIG_SOFTRESET_MASK (0x2) +#define ELM_SYSCONFIG_SOFTRESET (0x2) +#define ELM_SYSSTATUS_RESETDONE_MASK (0x1) +#define ELM_SYSSTATUS_RESETDONE (0x1) +#define ELM_LOCATION_CONFIG_ECC_BCH_LEVEL_MASK (0x3) +#define ELM_LOCATION_CONFIG_ECC_SIZE_MASK (0x7FF0000) +#define ELM_LOCATION_CONFIG_ECC_SIZE_POS (16) +#define ELM_SYNDROME_FRAGMENT_6_SYNDROME_VALID (0x00010000) +#define ELM_LOCATION_STATUS_ECC_CORRECTABLE_MASK (0x100) +#define ELM_LOCATION_STATUS_ECC_NB_ERRORS_MASK (0x1F) + +#define ELM_MAX_CHANNELS 8 +#define ELM_MAX_ERROR_COUNT 16 + +#ifndef __ASSEMBLY__ + +enum bch_level { + BCH_4_BIT = 0, + BCH_8_BIT, + BCH_16_BIT +}; + + +/* BCH syndrome registers */ +struct syndrome { + u32 syndrome_fragment_x[7]; /* 0x400, 0x404.... 0x418 */ + u8 res1[36]; /* 0x41c */ +}; + +/* BCH error status & location register */ +struct location { + u32 location_status; /* 0x800 */ + u8 res1[124]; /* 0x804 */ + u32 error_location_x[ELM_MAX_ERROR_COUNT]; /* 0x880, 0x980, .. */ + u8 res2[64]; /* 0x8c0 */ +}; + +/* BCH ELM register map - do not try to allocate memmory for this structure. + * We have used plenty of reserved variables to fill the slots in the ELM + * register memory map. + * Directly initialize the struct pointer to ELM base address. + */ +struct elm { + u32 rev; /* 0x000 */ + u8 res1[12]; /* 0x004 */ + u32 sysconfig; /* 0x010 */ + u32 sysstatus; /* 0x014 */ + u32 irqstatus; /* 0x018 */ + u32 irqenable; /* 0x01c */ + u32 location_config; /* 0x020 */ + u8 res2[92]; /* 0x024 */ + u32 page_ctrl; /* 0x080 */ + u8 res3[892]; /* 0x084 */ + struct syndrome syndrome_fragments[ELM_MAX_CHANNELS]; /* 0x400,0x420 */ + u8 res4[512]; /* 0x600 */ + struct location error_location[ELM_MAX_CHANNELS]; /* 0x800,0x900 ... */ +}; + +int elm_check_error(u8 *syndrome, enum bch_level bch_type, u32 *error_count, + u32 *error_locations); +int elm_config(enum bch_level level); +void elm_reset(void); +void elm_init(void); +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_ARCH_ELM_H */ diff --git a/include/linux/mtd/omap_gpmc.h b/include/linux/mtd/omap_gpmc.h new file mode 100644 index 0000000..864b05e --- /dev/null +++ b/include/linux/mtd/omap_gpmc.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2004-2008 Texas Instruments, + * Rohit Choraria + * + * (C) Copyright 2013 Andreas Bießmann + */ +#ifndef __ASM_OMAP_GPMC_H +#define __ASM_OMAP_GPMC_H + +#define GPMC_BUF_EMPTY 0 +#define GPMC_BUF_FULL 1 +#define GPMC_MAX_SECTORS 8 + +enum omap_ecc { + /* 1-bit ECC calculation by Software, Error detection by Software */ + OMAP_ECC_HAM1_CODE_SW = 1, /* avoid un-initialized int can be 0x0 */ + /* 1-bit ECC calculation by GPMC, Error detection by Software */ + /* ECC layout compatible to legacy ROMCODE. */ + OMAP_ECC_HAM1_CODE_HW, + /* 4-bit ECC calculation by GPMC, Error detection by Software */ + OMAP_ECC_BCH4_CODE_HW_DETECTION_SW, + /* 4-bit ECC calculation by GPMC, Error detection by ELM */ + OMAP_ECC_BCH4_CODE_HW, + /* 8-bit ECC calculation by GPMC, Error detection by Software */ + OMAP_ECC_BCH8_CODE_HW_DETECTION_SW, + /* 8-bit ECC calculation by GPMC, Error detection by ELM */ + OMAP_ECC_BCH8_CODE_HW, + /* 16-bit ECC calculation by GPMC, Error detection by ELM */ + OMAP_ECC_BCH16_CODE_HW, +}; + +struct gpmc_cs { + u32 config1; /* 0x00 */ + u32 config2; /* 0x04 */ + u32 config3; /* 0x08 */ + u32 config4; /* 0x0C */ + u32 config5; /* 0x10 */ + u32 config6; /* 0x14 */ + u32 config7; /* 0x18 */ + u32 nand_cmd; /* 0x1C */ + u32 nand_adr; /* 0x20 */ + u32 nand_dat; /* 0x24 */ + u8 res[8]; /* blow up to 0x30 byte */ +}; + +struct bch_res_0_3 { + u32 bch_result_x[4]; +}; + +struct bch_res_4_6 { + u32 bch_result_x[3]; +}; + +struct gpmc { + u8 res1[0x10]; + u32 sysconfig; /* 0x10 */ + u8 res2[0x4]; + u32 irqstatus; /* 0x18 */ + u32 irqenable; /* 0x1C */ + u8 res3[0x20]; + u32 timeout_control; /* 0x40 */ + u8 res4[0xC]; + u32 config; /* 0x50 */ + u32 status; /* 0x54 */ + u8 res5[0x8]; /* 0x58 */ + struct gpmc_cs cs[8]; /* 0x60, 0x90, .. */ + u32 prefetch_config1; /* 0x1E0 */ + u32 prefetch_config2; /* 0x1E4 */ + u32 res6; /* 0x1E8 */ + u32 prefetch_control; /* 0x1EC */ + u32 prefetch_status; /* 0x1F0 */ + u32 ecc_config; /* 0x1F4 */ + u32 ecc_control; /* 0x1F8 */ + u32 ecc_size_config; /* 0x1FC */ + u32 ecc1_result; /* 0x200 */ + u32 ecc2_result; /* 0x204 */ + u32 ecc3_result; /* 0x208 */ + u32 ecc4_result; /* 0x20C */ + u32 ecc5_result; /* 0x210 */ + u32 ecc6_result; /* 0x214 */ + u32 ecc7_result; /* 0x218 */ + u32 ecc8_result; /* 0x21C */ + u32 ecc9_result; /* 0x220 */ + u8 res7[12]; /* 0x224 */ + u32 testmomde_ctrl; /* 0x230 */ + u8 res8[12]; /* 0x234 */ + struct bch_res_0_3 bch_result_0_3[GPMC_MAX_SECTORS]; /* 0x240,0x250, */ + u8 res9[16 * 4]; /* 0x2C0 - 0x2FF */ + struct bch_res_4_6 bch_result_4_6[GPMC_MAX_SECTORS]; /* 0x300,0x310, */ +}; + +/* Used for board specific gpmc initialization */ +extern const struct gpmc *gpmc_cfg; +extern char gpmc_cs0_flash; + +#endif /* __ASM_OMAP_GPMC_H */ diff --git a/include/linux/mtd/onenand.h b/include/linux/mtd/onenand.h new file mode 100644 index 0000000..e7b63dd --- /dev/null +++ b/include/linux/mtd/onenand.h @@ -0,0 +1,180 @@ +/* + * linux/include/linux/mtd/onenand.h + * + * Copyright (C) 2005-2007 Samsung Electronics + * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __LINUX_MTD_ONENAND_H +#define __LINUX_MTD_ONENAND_H + +#include + +/* Note: The header order is impoertant */ +#include + +#include +#include + +#define MAX_DIES 2 +#define MAX_BUFFERRAM 2 +#define MAX_ONENAND_PAGESIZE (4096 + 128) + +/* Scan and identify a OneNAND device */ +extern int onenand_scan (struct mtd_info *mtd, int max_chips); +/* Free resources held by the OneNAND device */ +extern void onenand_release (struct mtd_info *mtd); + +/** + * struct onenand_bufferram - OneNAND BufferRAM Data + * @param blockpage block & page address in BufferRAM + */ +struct onenand_bufferram { + int blockpage; +}; + +/** + * struct onenand_chip - OneNAND Private Flash Chip Data + * @param base [BOARDSPECIFIC] address to access OneNAND + * @dies: [INTERN][FLEXONENAND] number of dies on chip + * @boundary: [INTERN][FLEXONENAND] Boundary of the dies + * @diesize: [INTERN][FLEXONENAND] Size of the dies + * @param chipsize [INTERN] the size of one chip for multichip arrays + * @param device_id [INTERN] device ID + * @param verstion_id [INTERN] version ID + * @technology [INTERN] describes the internal NAND array technology such as SLC or MLC. + * @density_mask: [INTERN] chip density, used for DDP devices + * @param options [BOARDSPECIFIC] various chip options. They can partly be set to inform onenand_scan about + * @param erase_shift [INTERN] number of address bits in a block + * @param page_shift [INTERN] number of address bits in a page + * @param ppb_shift [INTERN] number of address bits in a pages per block + * @param page_mask [INTERN] a page per block mask + * @param writesize [INTERN] a real page size + * @param bufferam_index [INTERN] BufferRAM index + * @param bufferam [INTERN] BufferRAM info + * @param readw [REPLACEABLE] hardware specific function for read short + * @param writew [REPLACEABLE] hardware specific function for write short + * @param command [REPLACEABLE] hardware specific function for writing commands to the chip + * @param wait [REPLACEABLE] hardware specific function for wait on ready + * @param read_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area + * @param write_bufferram [REPLACEABLE] hardware specific function for BufferRAM Area + * @param chip_lock [INTERN] spinlock used to protect access to this structure and the chip + * @param wq [INTERN] wait queue to sleep on if a OneNAND operation is in progress + * @param state [INTERN] the current state of the OneNAND device + * @param autooob [REPLACEABLE] the default (auto)placement scheme + * @param priv [OPTIONAL] pointer to private chip date + */ +struct onenand_chip { + void __iomem *base; + unsigned int dies; + unsigned int boundary[MAX_DIES]; + unsigned int diesize[MAX_DIES]; + unsigned int chipsize; + unsigned int device_id; + unsigned int version_id; + unsigned int technology; + unsigned int density_mask; + unsigned int options; + + unsigned int erase_shift; + unsigned int page_shift; + unsigned int ppb_shift; /* Pages per block shift */ + unsigned int page_mask; + unsigned int writesize; + + unsigned int bufferram_index; + struct onenand_bufferram bufferram[MAX_BUFFERRAM]; + + int (*command) (struct mtd_info *mtd, int cmd, loff_t address, + size_t len); + int (*wait) (struct mtd_info *mtd, int state); + int (*bbt_wait) (struct mtd_info *mtd, int state); + void (*unlock_all)(struct mtd_info *mtd); + int (*read_bufferram) (struct mtd_info *mtd, loff_t addr, int area, + unsigned char *buffer, int offset, size_t count); + int (*write_bufferram) (struct mtd_info *mtd, loff_t addr, int area, + const unsigned char *buffer, int offset, + size_t count); + unsigned short (*read_word) (void __iomem *addr); + void (*write_word) (unsigned short value, void __iomem *addr); + int (*chip_probe)(struct mtd_info *mtd); + void (*mmcontrol) (struct mtd_info *mtd, int sync_read); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); + int (*scan_bbt)(struct mtd_info *mtd); + + unsigned char *main_buf; + unsigned char *spare_buf; +#ifdef DONT_USE_UBOOT + spinlock_t chip_lock; + wait_queue_head_t wq; +#endif + int state; + unsigned char *page_buf; + unsigned char *oob_buf; + + struct nand_oobinfo *autooob; + int subpagesize; + struct nand_ecclayout *ecclayout; + + void *bbm; + + void *priv; +}; + +/* + * Helper macros + */ +#define ONENAND_CURRENT_BUFFERRAM(this) (this->bufferram_index) +#define ONENAND_NEXT_BUFFERRAM(this) (this->bufferram_index ^ 1) +#define ONENAND_SET_NEXT_BUFFERRAM(this) (this->bufferram_index ^= 1) +#define ONENAND_SET_PREV_BUFFERRAM(this) (this->bufferram_index ^= 1) +#define ONENAND_SET_BUFFERRAM0(this) (this->bufferram_index = 0) +#define ONENAND_SET_BUFFERRAM1(this) (this->bufferram_index = 1) + +#define FLEXONENAND(this) (this->device_id & DEVICE_IS_FLEXONENAND) +#define ONENAND_IS_MLC(this) (this->technology & ONENAND_TECHNOLOGY_IS_MLC) +#define ONENAND_IS_DDP(this) \ + (this->device_id & ONENAND_DEVICE_IS_DDP) + +#define ONENAND_IS_4KB_PAGE(this) \ + (this->options & ONENAND_HAS_4KB_PAGE) + +#define ONENAND_IS_2PLANE(this) (0) + +/* + * Options bits + */ +#define ONENAND_HAS_CONT_LOCK (0x0001) +#define ONENAND_HAS_UNLOCK_ALL (0x0002) +#define ONENAND_HAS_2PLANE (0x0004) +#define ONENAND_HAS_4KB_PAGE (0x0008) +#define ONENAND_RUNTIME_BADBLOCK_CHECK (0x0200) +#define ONENAND_PAGEBUF_ALLOC (0x1000) +#define ONENAND_OOBBUF_ALLOC (0x2000) + +/* + * OneNAND Flash Manufacturer ID Codes + */ +#define ONENAND_MFR_NUMONYX 0x20 +#define ONENAND_MFR_SAMSUNG 0xec + +/** + * struct nand_manufacturers - NAND Flash Manufacturer ID Structure + * @param name: Manufacturer name + * @param id: manufacturer ID code of device. +*/ +struct onenand_manufacturers { + int id; + char *name; +}; + +int onenand_bbt_read_oob(struct mtd_info *mtd, loff_t from, + struct mtd_oob_ops *ops); + +unsigned int onenand_block(struct onenand_chip *this, loff_t addr); +int flexonenand_region(struct mtd_info *mtd, loff_t addr); +#endif /* __LINUX_MTD_ONENAND_H */ diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h new file mode 100644 index 0000000..8449a3c --- /dev/null +++ b/include/linux/mtd/onenand_regs.h @@ -0,0 +1,208 @@ +/* + * linux/include/linux/mtd/onenand_regs.h + * + * OneNAND Register header file + * + * Copyright (C) 2005-2007 Samsung Electronics + * Kyungmin Park + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ONENAND_REG_H +#define __ONENAND_REG_H + +/* Memory Address Map Translation (Word order) */ +#define ONENAND_MEMORY_MAP(x) ((x) << 1) + +/* + * External BufferRAM area + */ +#define ONENAND_BOOTRAM ONENAND_MEMORY_MAP(0x0000) +#define ONENAND_DATARAM ONENAND_MEMORY_MAP(0x0200) +#define ONENAND_SPARERAM ONENAND_MEMORY_MAP(0x8010) + +/* + * OneNAND Registers + */ +#define ONENAND_REG_MANUFACTURER_ID ONENAND_MEMORY_MAP(0xF000) +#define ONENAND_REG_DEVICE_ID ONENAND_MEMORY_MAP(0xF001) +#define ONENAND_REG_VERSION_ID ONENAND_MEMORY_MAP(0xF002) +#define ONENAND_REG_DATA_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF003) +#define ONENAND_REG_BOOT_BUFFER_SIZE ONENAND_MEMORY_MAP(0xF004) +#define ONENAND_REG_NUM_BUFFERS ONENAND_MEMORY_MAP(0xF005) +#define ONENAND_REG_TECHNOLOGY ONENAND_MEMORY_MAP(0xF006) + +#define ONENAND_REG_START_ADDRESS1 ONENAND_MEMORY_MAP(0xF100) +#define ONENAND_REG_START_ADDRESS2 ONENAND_MEMORY_MAP(0xF101) +#define ONENAND_REG_START_ADDRESS3 ONENAND_MEMORY_MAP(0xF102) +#define ONENAND_REG_START_ADDRESS4 ONENAND_MEMORY_MAP(0xF103) +#define ONENAND_REG_START_ADDRESS5 ONENAND_MEMORY_MAP(0xF104) +#define ONENAND_REG_START_ADDRESS6 ONENAND_MEMORY_MAP(0xF105) +#define ONENAND_REG_START_ADDRESS7 ONENAND_MEMORY_MAP(0xF106) +#define ONENAND_REG_START_ADDRESS8 ONENAND_MEMORY_MAP(0xF107) + +#define ONENAND_REG_START_BUFFER ONENAND_MEMORY_MAP(0xF200) +#define ONENAND_REG_COMMAND ONENAND_MEMORY_MAP(0xF220) +#define ONENAND_REG_SYS_CFG1 ONENAND_MEMORY_MAP(0xF221) +#define ONENAND_REG_SYS_CFG2 ONENAND_MEMORY_MAP(0xF222) +#define ONENAND_REG_CTRL_STATUS ONENAND_MEMORY_MAP(0xF240) +#define ONENAND_REG_INTERRUPT ONENAND_MEMORY_MAP(0xF241) +#define ONENAND_REG_START_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24C) +#define ONENAND_REG_END_BLOCK_ADDRESS ONENAND_MEMORY_MAP(0xF24D) +#define ONENAND_REG_WP_STATUS ONENAND_MEMORY_MAP(0xF24E) + +#define ONENAND_REG_ECC_STATUS ONENAND_MEMORY_MAP(0xFF00) +#define ONENAND_REG_ECC_M0 ONENAND_MEMORY_MAP(0xFF01) +#define ONENAND_REG_ECC_S0 ONENAND_MEMORY_MAP(0xFF02) +#define ONENAND_REG_ECC_M1 ONENAND_MEMORY_MAP(0xFF03) +#define ONENAND_REG_ECC_S1 ONENAND_MEMORY_MAP(0xFF04) +#define ONENAND_REG_ECC_M2 ONENAND_MEMORY_MAP(0xFF05) +#define ONENAND_REG_ECC_S2 ONENAND_MEMORY_MAP(0xFF06) +#define ONENAND_REG_ECC_M3 ONENAND_MEMORY_MAP(0xFF07) +#define ONENAND_REG_ECC_S3 ONENAND_MEMORY_MAP(0xFF08) + +/* + * Device ID Register F001h (R) + */ +#define DEVICE_IS_FLEXONENAND (1 << 9) +#define FLEXONENAND_PI_MASK (0x3ff) +#define FLEXONENAND_PI_UNLOCK_SHIFT (14) +#define ONENAND_DEVICE_DENSITY_MASK (0xf) +#define ONENAND_DEVICE_DENSITY_SHIFT (4) +#define ONENAND_DEVICE_IS_DDP (1 << 3) +#define ONENAND_DEVICE_IS_DEMUX (1 << 2) +#define ONENAND_DEVICE_VCC_MASK (0x3) + +#define ONENAND_DEVICE_DENSITY_512Mb (0x002) +#define ONENAND_DEVICE_DENSITY_1Gb (0x003) +#define ONENAND_DEVICE_DENSITY_2Gb (0x004) +#define ONENAND_DEVICE_DENSITY_4Gb (0x005) + +/* + * Version ID Register F002h (R) + */ +#define ONENAND_VERSION_PROCESS_SHIFT (8) + +/* + * Technology Register F006h (R) + */ +#define ONENAND_TECHNOLOGY_IS_MLC (1 << 0) + +/* + * Start Address 1 F100h (R/W) + */ +#define ONENAND_DDP_SHIFT (15) +#define ONENAND_DDP_CHIP0 (0) +#define ONENAND_DDP_CHIP1 (1 << ONENAND_DDP_SHIFT) + +/* + * Start Address 8 F107h (R/W) + */ +#define ONENAND_FPA_MASK (0x7f) +#define ONENAND_FPA_SHIFT (2) +#define ONENAND_FSA_MASK (0x03) + +/* + * Start Buffer Register F200h (R/W) + */ +#define ONENAND_BSA_MASK (0x03) +#define ONENAND_BSA_SHIFT (8) +#define ONENAND_BSA_BOOTRAM (0 << 2) +#define ONENAND_BSA_DATARAM0 (2 << 2) +#define ONENAND_BSA_DATARAM1 (3 << 2) +#define ONENAND_BSC_MASK (0x07) + +/* + * Command Register F220h (R/W) + */ +#define ONENAND_CMD_READ (0x00) +#define ONENAND_CMD_READOOB (0x13) +#define ONENAND_CMD_PROG (0x80) +#define ONENAND_CMD_PROGOOB (0x1A) +#define ONENAND_CMD_2X_PROG (0x7D) +#define ONENAND_CMD_2X_CACHE_PROG (0x7F) +#define ONENAND_CMD_UNLOCK (0x23) +#define ONENAND_CMD_LOCK (0x2A) +#define ONENAND_CMD_LOCK_TIGHT (0x2C) +#define ONENAND_CMD_UNLOCK_ALL (0x27) +#define ONENAND_CMD_ERASE (0x94) +#define ONENAND_CMD_MULTIBLOCK_ERASE (0x95) +#define ONENAND_CMD_ERASE_VERIFY (0x71) +#define ONENAND_CMD_RESET (0xF0) +#define ONENAND_CMD_READID (0x90) +#define FLEXONENAND_CMD_RESET (0xF3) +#define FLEXONENAND_CMD_PI_UPDATE (0x05) +#define FLEXONENAND_CMD_PI_ACCESS (0x66) +#define FLEXONENAND_CMD_RECOVER_LSB (0x05) + +/* NOTE: Those are not *REAL* commands */ +#define ONENAND_CMD_BUFFERRAM (0x1978) +#define FLEXONENAND_CMD_READ_PI (0x1985) + +/* + * System Configuration 1 Register F221h (R, R/W) + */ +#define ONENAND_SYS_CFG1_SYNC_READ (1 << 15) +#define ONENAND_SYS_CFG1_BRL_7 (7 << 12) +#define ONENAND_SYS_CFG1_BRL_6 (6 << 12) +#define ONENAND_SYS_CFG1_BRL_5 (5 << 12) +#define ONENAND_SYS_CFG1_BRL_4 (4 << 12) +#define ONENAND_SYS_CFG1_BRL_3 (3 << 12) +#define ONENAND_SYS_CFG1_BRL_10 (2 << 12) +#define ONENAND_SYS_CFG1_BRL_9 (1 << 12) +#define ONENAND_SYS_CFG1_BRL_8 (0 << 12) +#define ONENAND_SYS_CFG1_BRL_SHIFT (12) +#define ONENAND_SYS_CFG1_BL_32 (4 << 9) +#define ONENAND_SYS_CFG1_BL_16 (3 << 9) +#define ONENAND_SYS_CFG1_BL_8 (2 << 9) +#define ONENAND_SYS_CFG1_BL_4 (1 << 9) +#define ONENAND_SYS_CFG1_BL_CONT (0 << 9) +#define ONENAND_SYS_CFG1_BL_SHIFT (9) +#define ONENAND_SYS_CFG1_NO_ECC (1 << 8) +#define ONENAND_SYS_CFG1_RDY (1 << 7) +#define ONENAND_SYS_CFG1_INT (1 << 6) +#define ONENAND_SYS_CFG1_IOBE (1 << 5) +#define ONENAND_SYS_CFG1_RDY_CONF (1 << 4) + +/* + * Controller Status Register F240h (R) + */ +#define ONENAND_CTRL_ONGO (1 << 15) +#define ONENAND_CTRL_LOCK (1 << 14) +#define ONENAND_CTRL_LOAD (1 << 13) +#define ONENAND_CTRL_PROGRAM (1 << 12) +#define ONENAND_CTRL_ERASE (1 << 11) +#define ONENAND_CTRL_ERROR (1 << 10) +#define ONENAND_CTRL_RSTB (1 << 7) + +/* + * Interrupt Status Register F241h (R) + */ +#define ONENAND_INT_MASTER (1 << 15) +#define ONENAND_INT_READ (1 << 7) +#define ONENAND_INT_WRITE (1 << 6) +#define ONENAND_INT_ERASE (1 << 5) +#define ONENAND_INT_RESET (1 << 4) +#define ONENAND_INT_CLEAR (0 << 0) + +/* + * NAND Flash Write Protection Status Register F24Eh (R) + */ +#define ONENAND_WP_US (1 << 2) +#define ONENAND_WP_LS (1 << 1) +#define ONENAND_WP_LTS (1 << 0) + +/* + * ECC Status Reigser FF00h (R) + */ +#define ONENAND_ECC_1BIT (1 << 0) +#define ONENAND_ECC_1BIT_ALL (0x5555) +#define ONENAND_ECC_2BIT (1 << 1) +#define ONENAND_ECC_2BIT_ALL (0xAAAA) +#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) +#define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) + +#endif /* __ONENAND_REG_H */ diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h new file mode 100644 index 0000000..3822237 --- /dev/null +++ b/include/linux/mtd/partitions.h @@ -0,0 +1,110 @@ +/* + * MTD partitioning layer definitions + * + * (C) 2000 Nicolas Pitre + * + * This code is GPL + */ + +#ifndef MTD_PARTITIONS_H +#define MTD_PARTITIONS_H + +#include + + +/* + * Partition definition structure: + * + * An array of struct partition is passed along with a MTD object to + * mtd_device_register() to create them. + * + * For each partition, these fields are available: + * name: string that will be used to label the partition's MTD device. + * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition + * will extend to the end of the master MTD device. + * offset: absolute starting position within the master MTD device; if + * defined as MTDPART_OFS_APPEND, the partition will start where the + * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block; + * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size + * after the end of partition. + * mask_flags: contains flags that have to be masked (removed) from the + * master MTD flag set for the corresponding MTD partition. + * For example, to force a read-only partition, simply adding + * MTD_WRITEABLE to the mask_flags will do the trick. + * + * Note: writeable partitions require their size and offset be + * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK). + */ + +struct mtd_partition { + const char *name; /* identifier string */ + uint64_t size; /* partition size */ + uint64_t offset; /* offset within the master MTD space */ + uint32_t mask_flags; /* master MTD flags to mask out for this partition */ + struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only) */ +}; + +#define MTDPART_OFS_RETAIN (-3) +#define MTDPART_OFS_NXTBLK (-2) +#define MTDPART_OFS_APPEND (-1) +#define MTDPART_SIZ_FULL (0) + + +struct mtd_info; +struct device_node; + +#ifndef __UBOOT__ +/** + * struct mtd_part_parser_data - used to pass data to MTD partition parsers. + * @origin: for RedBoot, start address of MTD device + * @of_node: for OF parsers, device node containing partitioning information + */ +struct mtd_part_parser_data { + unsigned long origin; + struct device_node *of_node; +}; + + +/* + * Functions dealing with the various ways of partitioning the space + */ + +struct mtd_part_parser { + struct list_head list; + struct module *owner; + const char *name; + int (*parse_fn)(struct mtd_info *, struct mtd_partition **, + struct mtd_part_parser_data *); +}; + +extern void register_mtd_parser(struct mtd_part_parser *parser); +extern void deregister_mtd_parser(struct mtd_part_parser *parser); +#endif + +int mtd_add_partition(struct mtd_info *master, const char *name, + long long offset, long long length); +int mtd_del_partition(struct mtd_info *master, int partno); +uint64_t mtd_get_device_size(const struct mtd_info *mtd); + +#if defined(CONFIG_MTD_PARTITIONS) +int mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts, + struct mtd_partition **_parts, int *_nparts); +void mtd_free_parsed_partitions(struct mtd_partition *parts, + unsigned int nparts); +#else +static inline int +mtd_parse_partitions(struct mtd_info *parent, const char **_mtdparts, + struct mtd_partition **_parts, int *_nparts) +{ + *_nparts = 0; + + return 0; +} +static inline void +mtd_free_parsed_partitions(struct mtd_partition *parts, unsigned int nparts) +{ + return; +} +#endif /* defined(MTD_PARTITIONS) */ + +#endif diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h new file mode 100644 index 0000000..bd373b9 --- /dev/null +++ b/include/linux/mtd/rawnand.h @@ -0,0 +1,1332 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright © 2000-2010 David Woodhouse + * Steven J. Hill + * Thomas Gleixner + * + * Info: + * Contains standard defines and IDs for NAND flash devices + * + * Changelog: + * See git changelog. + */ +#ifndef __LINUX_MTD_RAWNAND_H +#define __LINUX_MTD_RAWNAND_H + +#include + +#include +#include +#include +#include +#include +#include + +struct mtd_info; +struct nand_chip; +struct nand_flash_dev; +struct device_node; + +/* Get the flash and manufacturer id and lookup if the type is supported. */ +struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, + struct nand_chip *chip, + int *maf_id, int *dev_id, + struct nand_flash_dev *type); + +/* Scan and identify a NAND device */ +int nand_scan(struct mtd_info *mtd, int max_chips); +/* + * Separate phases of nand_scan(), allowing board driver to intervene + * and override command or ECC setup according to flash type. + */ +int nand_scan_ident(struct mtd_info *mtd, int max_chips, + struct nand_flash_dev *table); +int nand_scan_tail(struct mtd_info *mtd); + +/* Free resources held by the NAND device */ +void nand_release(struct mtd_info *mtd); + +/* Internal helper for board drivers which need to override command function */ +void nand_wait_ready(struct mtd_info *mtd); + +/* + * This constant declares the max. oobsize / page, which + * is supported now. If you add a chip with bigger oobsize/page + * adjust this accordingly. + */ +#define NAND_MAX_OOBSIZE 1664 +#define NAND_MAX_PAGESIZE 16384 + +/* + * Constants for hardware specific CLE/ALE/NCE function + * + * These are bits which can be or'ed to set/clear multiple + * bits in one go. + */ +/* Select the chip by setting nCE to low */ +#define NAND_NCE 0x01 +/* Select the command latch by setting CLE to high */ +#define NAND_CLE 0x02 +/* Select the address latch by setting ALE to high */ +#define NAND_ALE 0x04 + +#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE) +#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE) +#define NAND_CTRL_CHANGE 0x80 + +/* + * Standard NAND flash commands + */ +#define NAND_CMD_READ0 0 +#define NAND_CMD_READ1 1 +#define NAND_CMD_RNDOUT 5 +#define NAND_CMD_PAGEPROG 0x10 +#define NAND_CMD_READOOB 0x50 +#define NAND_CMD_ERASE1 0x60 +#define NAND_CMD_STATUS 0x70 +#define NAND_CMD_SEQIN 0x80 +#define NAND_CMD_RNDIN 0x85 +#define NAND_CMD_READID 0x90 +#define NAND_CMD_ERASE2 0xd0 +#define NAND_CMD_PARAM 0xec +#define NAND_CMD_GET_FEATURES 0xee +#define NAND_CMD_SET_FEATURES 0xef +#define NAND_CMD_RESET 0xff + +#define NAND_CMD_LOCK 0x2a +#define NAND_CMD_UNLOCK1 0x23 +#define NAND_CMD_UNLOCK2 0x24 + +/* Extended commands for large page devices */ +#define NAND_CMD_READSTART 0x30 +#define NAND_CMD_RNDOUTSTART 0xE0 +#define NAND_CMD_CACHEDPROG 0x15 + +/* Extended commands for AG-AND device */ +/* + * Note: the command for NAND_CMD_DEPLETE1 is really 0x00 but + * there is no way to distinguish that from NAND_CMD_READ0 + * until the remaining sequence of commands has been completed + * so add a high order bit and mask it off in the command. + */ +#define NAND_CMD_DEPLETE1 0x100 +#define NAND_CMD_DEPLETE2 0x38 +#define NAND_CMD_STATUS_MULTI 0x71 +#define NAND_CMD_STATUS_ERROR 0x72 +/* multi-bank error status (banks 0-3) */ +#define NAND_CMD_STATUS_ERROR0 0x73 +#define NAND_CMD_STATUS_ERROR1 0x74 +#define NAND_CMD_STATUS_ERROR2 0x75 +#define NAND_CMD_STATUS_ERROR3 0x76 +#define NAND_CMD_STATUS_RESET 0x7f +#define NAND_CMD_STATUS_CLEAR 0xff + +#define NAND_CMD_NONE -1 + +/* Status bits */ +#define NAND_STATUS_FAIL 0x01 +#define NAND_STATUS_FAIL_N1 0x02 +#define NAND_STATUS_TRUE_READY 0x20 +#define NAND_STATUS_READY 0x40 +#define NAND_STATUS_WP 0x80 + +#define NAND_DATA_IFACE_CHECK_ONLY -1 + +/* + * Constants for ECC_MODES + */ +typedef enum { + NAND_ECC_NONE, + NAND_ECC_SOFT, + NAND_ECC_HW, + NAND_ECC_HW_SYNDROME, + NAND_ECC_HW_OOB_FIRST, + NAND_ECC_SOFT_BCH, +} nand_ecc_modes_t; + +enum nand_ecc_algo { + NAND_ECC_UNKNOWN, + NAND_ECC_HAMMING, + NAND_ECC_BCH, +}; + +/* + * Constants for Hardware ECC + */ +/* Reset Hardware ECC for read */ +#define NAND_ECC_READ 0 +/* Reset Hardware ECC for write */ +#define NAND_ECC_WRITE 1 +/* Enable Hardware ECC before syndrome is read back from flash */ +#define NAND_ECC_READSYN 2 + +/* + * Enable generic NAND 'page erased' check. This check is only done when + * ecc.correct() returns -EBADMSG. + * Set this flag if your implementation does not fix bitflips in erased + * pages and you want to rely on the default implementation. + */ +#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) +#define NAND_ECC_MAXIMIZE BIT(1) +/* + * If your controller already sends the required NAND commands when + * reading or writing a page, then the framework is not supposed to + * send READ0 and SEQIN/PAGEPROG respectively. + */ +#define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2) + +/* Bit mask for flags passed to do_nand_read_ecc */ +#define NAND_GET_DEVICE 0x80 + + +/* + * Option constants for bizarre disfunctionality and real + * features. + */ +/* Buswidth is 16 bit */ +#define NAND_BUSWIDTH_16 0x00000002 +/* Device supports partial programming without padding */ +#define NAND_NO_PADDING 0x00000004 +/* Chip has cache program function */ +#define NAND_CACHEPRG 0x00000008 +/* Chip has copy back function */ +#define NAND_COPYBACK 0x00000010 +/* + * Chip requires ready check on read (for auto-incremented sequential read). + * True only for small page devices; large page devices do not support + * autoincrement. + */ +#define NAND_NEED_READRDY 0x00000100 + +/* Chip does not allow subpage writes */ +#define NAND_NO_SUBPAGE_WRITE 0x00000200 + +/* Device is one of 'new' xD cards that expose fake nand command set */ +#define NAND_BROKEN_XD 0x00000400 + +/* Device behaves just like nand, but is readonly */ +#define NAND_ROM 0x00000800 + +/* Device supports subpage reads */ +#define NAND_SUBPAGE_READ 0x00001000 + +/* + * Some MLC NANDs need data scrambling to limit bitflips caused by repeated + * patterns. + */ +#define NAND_NEED_SCRAMBLING 0x00002000 + +/* Device needs 3rd row address cycle */ +#define NAND_ROW_ADDR_3 0x00004000 + +/* Options valid for Samsung large page devices */ +#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG + +/* Macros to identify the above */ +#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) +#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ)) +#define NAND_HAS_SUBPAGE_WRITE(chip) !((chip)->options & NAND_NO_SUBPAGE_WRITE) + +/* Non chip related options */ +/* This option skips the bbt scan during initialization. */ +#define NAND_SKIP_BBTSCAN 0x00010000 +/* + * This option is defined if the board driver allocates its own buffers + * (e.g. because it needs them DMA-coherent). + */ +#define NAND_OWN_BUFFERS 0x00020000 +/* Chip may not exist, so silence any errors in scan */ +#define NAND_SCAN_SILENT_NODEV 0x00040000 +/* + * Autodetect nand buswidth with readid/onfi. + * This suppose the driver will configure the hardware in 8 bits mode + * when calling nand_scan_ident, and update its configuration + * before calling nand_scan_tail. + */ +#define NAND_BUSWIDTH_AUTO 0x00080000 +/* + * This option could be defined by controller drivers to protect against + * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers + */ +#define NAND_USE_BOUNCE_BUFFER 0x00100000 + +/* Options set by nand scan */ +/* bbt has already been read */ +#define NAND_BBT_SCANNED 0x40000000 +/* Nand scan has allocated controller struct */ +#define NAND_CONTROLLER_ALLOC 0x80000000 + +/* Cell info constants */ +#define NAND_CI_CHIPNR_MSK 0x03 +#define NAND_CI_CELLTYPE_MSK 0x0C +#define NAND_CI_CELLTYPE_SHIFT 2 + +/* ONFI features */ +#define ONFI_FEATURE_16_BIT_BUS (1 << 0) +#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7) + +/* ONFI timing mode, used in both asynchronous and synchronous mode */ +#define ONFI_TIMING_MODE_0 (1 << 0) +#define ONFI_TIMING_MODE_1 (1 << 1) +#define ONFI_TIMING_MODE_2 (1 << 2) +#define ONFI_TIMING_MODE_3 (1 << 3) +#define ONFI_TIMING_MODE_4 (1 << 4) +#define ONFI_TIMING_MODE_5 (1 << 5) +#define ONFI_TIMING_MODE_UNKNOWN (1 << 6) + +/* ONFI feature address */ +#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1 + +/* Vendor-specific feature address (Micron) */ +#define ONFI_FEATURE_ADDR_READ_RETRY 0x89 + +/* ONFI subfeature parameters length */ +#define ONFI_SUBFEATURE_PARAM_LEN 4 + +/* ONFI optional commands SET/GET FEATURES supported? */ +#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2) + +struct nand_onfi_params { + /* rev info and features block */ + /* 'O' 'N' 'F' 'I' */ + u8 sig[4]; + __le16 revision; + __le16 features; + __le16 opt_cmd; + u8 reserved0[2]; + __le16 ext_param_page_length; /* since ONFI 2.1 */ + u8 num_of_param_pages; /* since ONFI 2.1 */ + u8 reserved1[17]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id; + __le16 date_code; + u8 reserved2[13]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + __le32 data_bytes_per_ppage; + __le16 spare_bytes_per_ppage; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + __le16 bb_per_lun; + __le16 block_endurance; + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + u8 programs_per_page; + u8 ppage_attr; + u8 ecc_bits; + u8 interleaved_bits; + u8 interleaved_ops; + u8 reserved3[13]; + + /* electrical parameter block */ + u8 io_pin_capacitance_max; + __le16 async_timing_mode; + __le16 program_cache_timing_mode; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_ccs; + __le16 src_sync_timing_mode; + u8 src_ssync_features; + __le16 clk_pin_capacitance_typ; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + u8 input_pin_capacitance_max; + u8 driver_strength_support; + __le16 t_int_r; + __le16 t_adl; + u8 reserved4[8]; + + /* vendor */ + __le16 vendor_revision; + u8 vendor[88]; + + __le16 crc; +} __packed; + +#define ONFI_CRC_BASE 0x4F4E + +/* Extended ECC information Block Definition (since ONFI 2.1) */ +struct onfi_ext_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +#define ONFI_SECTION_TYPE_0 0 /* Unused section. */ +#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */ +#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */ +struct onfi_ext_section { + u8 type; + u8 length; +} __packed; + +#define ONFI_EXT_SECTION_MAX 8 + +/* Extended Parameter Page Definition (since ONFI 2.1) */ +struct onfi_ext_param_page { + __le16 crc; + u8 sig[4]; /* 'E' 'P' 'P' 'S' */ + u8 reserved0[10]; + struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX]; + + /* + * The actual size of the Extended Parameter Page is in + * @ext_param_page_length of nand_onfi_params{}. + * The following are the variable length sections. + * So we do not add any fields below. Please see the ONFI spec. + */ +} __packed; + +struct nand_onfi_vendor_micron { + u8 two_plane_read; + u8 read_cache; + u8 read_unique_id; + u8 dq_imped; + u8 dq_imped_num_settings; + u8 dq_imped_feat_addr; + u8 rb_pulldown_strength; + u8 rb_pulldown_strength_feat_addr; + u8 rb_pulldown_strength_num_settings; + u8 otp_mode; + u8 otp_page_start; + u8 otp_data_prot_addr; + u8 otp_num_pages; + u8 otp_feat_addr; + u8 read_retry_options; + u8 reserved[72]; + u8 param_revision; +} __packed; + +struct jedec_ecc_info { + u8 ecc_bits; + u8 codeword_size; + __le16 bb_per_lun; + __le16 block_endurance; + u8 reserved[2]; +} __packed; + +/* JEDEC features */ +#define JEDEC_FEATURE_16_BIT_BUS (1 << 0) + +struct nand_jedec_params { + /* rev info and features block */ + /* 'J' 'E' 'S' 'D' */ + u8 sig[4]; + __le16 revision; + __le16 features; + u8 opt_cmd[3]; + __le16 sec_cmd; + u8 num_of_param_pages; + u8 reserved0[18]; + + /* manufacturer information block */ + char manufacturer[12]; + char model[20]; + u8 jedec_id[6]; + u8 reserved1[10]; + + /* memory organization block */ + __le32 byte_per_page; + __le16 spare_bytes_per_page; + u8 reserved2[6]; + __le32 pages_per_block; + __le32 blocks_per_lun; + u8 lun_count; + u8 addr_cycles; + u8 bits_per_cell; + u8 programs_per_page; + u8 multi_plane_addr; + u8 multi_plane_op_attr; + u8 reserved3[38]; + + /* electrical parameter block */ + __le16 async_sdr_speed_grade; + __le16 toggle_ddr_speed_grade; + __le16 sync_ddr_speed_grade; + u8 async_sdr_features; + u8 toggle_ddr_features; + u8 sync_ddr_features; + __le16 t_prog; + __le16 t_bers; + __le16 t_r; + __le16 t_r_multi_plane; + __le16 t_ccs; + __le16 io_pin_capacitance_typ; + __le16 input_pin_capacitance_typ; + __le16 clk_pin_capacitance_typ; + u8 driver_strength_support; + __le16 t_adl; + u8 reserved4[36]; + + /* ECC and endurance block */ + u8 guaranteed_good_blocks; + __le16 guaranteed_block_endurance; + struct jedec_ecc_info ecc_info[4]; + u8 reserved5[29]; + + /* reserved */ + u8 reserved6[148]; + + /* vendor */ + __le16 vendor_rev_num; + u8 reserved7[88]; + + /* CRC for Parameter Page */ + __le16 crc; +} __packed; + +/** + * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices + * @lock: protection lock + * @active: the mtd device which holds the controller currently + * @wq: wait queue to sleep on if a NAND operation is in + * progress used instead of the per chip wait queue + * when a hw controller is available. + */ +struct nand_hw_control { + spinlock_t lock; + struct nand_chip *active; +}; + +static inline void nand_hw_control_init(struct nand_hw_control *nfc) +{ + nfc->active = NULL; + spin_lock_init(&nfc->lock); + init_waitqueue_head(&nfc->wq); +} + +/** + * struct nand_ecc_step_info - ECC step information of ECC engine + * @stepsize: data bytes per ECC step + * @strengths: array of supported strengths + * @nstrengths: number of supported strengths + */ +struct nand_ecc_step_info { + int stepsize; + const int *strengths; + int nstrengths; +}; + +/** + * struct nand_ecc_caps - capability of ECC engine + * @stepinfos: array of ECC step information + * @nstepinfos: number of ECC step information + * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step + */ +struct nand_ecc_caps { + const struct nand_ecc_step_info *stepinfos; + int nstepinfos; + int (*calc_ecc_bytes)(int step_size, int strength); +}; + +/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */ +#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \ +static const int __name##_strengths[] = { __VA_ARGS__ }; \ +static const struct nand_ecc_step_info __name##_stepinfo = { \ + .stepsize = __step, \ + .strengths = __name##_strengths, \ + .nstrengths = ARRAY_SIZE(__name##_strengths), \ +}; \ +static const struct nand_ecc_caps __name = { \ + .stepinfos = &__name##_stepinfo, \ + .nstepinfos = 1, \ + .calc_ecc_bytes = __calc, \ +} + +/** + * struct nand_ecc_ctrl - Control structure for ECC + * @mode: ECC mode + * @algo: ECC algorithm + * @steps: number of ECC steps per page + * @size: data bytes per ECC step + * @bytes: ECC bytes per step + * @strength: max number of correctible bits per ECC step + * @total: total number of ECC bytes per page + * @prepad: padding information for syndrome based ECC generators + * @postpad: padding information for syndrome based ECC generators + * @options: ECC specific options (see NAND_ECC_XXX flags defined above) + * @layout: ECC layout control struct pointer + * @priv: pointer to private ECC control data + * @hwctl: function to control hardware ECC generator. Must only + * be provided if an hardware ECC is available + * @calculate: function for ECC calculation or readback from ECC hardware + * @correct: function for ECC correction, matching to ECC generator (sw/hw). + * Should return a positive number representing the number of + * corrected bitflips, -EBADMSG if the number of bitflips exceed + * ECC strength, or any other error code if the error is not + * directly related to correction. + * If -EBADMSG is returned the input buffers should be left + * untouched. + * @read_page_raw: function to read a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and always return contiguous in-band and + * out-of-band data even if they're not stored + * contiguously on the NAND chip (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @write_page_raw: function to write a raw page without ECC. This function + * should hide the specific layout used by the ECC + * controller and consider the passed data as contiguous + * in-band and out-of-band data. ECC controller is + * responsible for doing the appropriate transformations + * to adapt to its specific layout (e.g. + * NAND_ECC_HW_SYNDROME interleaves in-band and + * out-of-band data). + * @read_page: function to read a page according to the ECC generator + * requirements; returns maximum number of bitflips corrected in + * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error + * @read_subpage: function to read parts of the page covered by ECC; + * returns same as read_page() + * @write_subpage: function to write parts of the page covered by ECC. + * @write_page: function to write a page according to the ECC generator + * requirements. + * @write_oob_raw: function to write chip OOB data without ECC + * @read_oob_raw: function to read chip OOB data without ECC + * @read_oob: function to read chip OOB data + * @write_oob: function to write chip OOB data + */ +struct nand_ecc_ctrl { + nand_ecc_modes_t mode; + enum nand_ecc_algo algo; + int steps; + int size; + int bytes; + int total; + int strength; + int prepad; + int postpad; + unsigned int options; + struct nand_ecclayout *layout; + void *priv; + void (*hwctl)(struct mtd_info *mtd, int mode); + int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, + uint8_t *ecc_code); + int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, + uint8_t *calc_ecc); + int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); + int (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); + int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page); + int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offs, uint32_t len, uint8_t *buf, int page); + int (*write_subpage)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, uint32_t data_len, + const uint8_t *data_buf, int oob_required, int page); + int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page); + int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, + int page); + int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page); + int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, + int page); +}; + +static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc) +{ + return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS); +} + +/** + * struct nand_buffers - buffer structure for read/write + * @ecccalc: buffer pointer for calculated ECC, size is oobsize. + * @ecccode: buffer pointer for ECC read from flash, size is oobsize. + * @databuf: buffer pointer for data, size is (page size + oobsize). + * + * Do not change the order of buffers. databuf and oobrbuf must be in + * consecutive order. + */ +struct nand_buffers { + uint8_t ecccalc[ALIGN(NAND_MAX_OOBSIZE, ARCH_DMA_MINALIGN)]; + uint8_t ecccode[ALIGN(NAND_MAX_OOBSIZE, ARCH_DMA_MINALIGN)]; + uint8_t databuf[ALIGN(NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE, + ARCH_DMA_MINALIGN)]; +}; + +/** + * struct nand_sdr_timings - SDR NAND chip timings + * + * This struct defines the timing requirements of a SDR NAND chip. + * These information can be found in every NAND datasheets and the timings + * meaning are described in the ONFI specifications: + * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing + * Parameters) + * + * All these timings are expressed in picoseconds. + * + * @tBERS_max: Block erase time + * @tCCS_min: Change column setup time + * @tPROG_max: Page program time + * @tR_max: Page read time + * @tALH_min: ALE hold time + * @tADL_min: ALE to data loading time + * @tALS_min: ALE setup time + * @tAR_min: ALE to RE# delay + * @tCEA_max: CE# access time + * @tCEH_min: CE# high hold time + * @tCH_min: CE# hold time + * @tCHZ_max: CE# high to output hi-Z + * @tCLH_min: CLE hold time + * @tCLR_min: CLE to RE# delay + * @tCLS_min: CLE setup time + * @tCOH_min: CE# high to output hold + * @tCS_min: CE# setup time + * @tDH_min: Data hold time + * @tDS_min: Data setup time + * @tFEAT_max: Busy time for Set Features and Get Features + * @tIR_min: Output hi-Z to RE# low + * @tITC_max: Interface and Timing Mode Change time + * @tRC_min: RE# cycle time + * @tREA_max: RE# access time + * @tREH_min: RE# high hold time + * @tRHOH_min: RE# high to output hold + * @tRHW_min: RE# high to WE# low + * @tRHZ_max: RE# high to output hi-Z + * @tRLOH_min: RE# low to output hold + * @tRP_min: RE# pulse width + * @tRR_min: Ready to RE# low (data only) + * @tRST_max: Device reset time, measured from the falling edge of R/B# to the + * rising edge of R/B#. + * @tWB_max: WE# high to SR[6] low + * @tWC_min: WE# cycle time + * @tWH_min: WE# high hold time + * @tWHR_min: WE# high to RE# low + * @tWP_min: WE# pulse width + * @tWW_min: WP# transition to WE# low + */ +struct nand_sdr_timings { + u64 tBERS_max; + u32 tCCS_min; + u64 tPROG_max; + u64 tR_max; + u32 tALH_min; + u32 tADL_min; + u32 tALS_min; + u32 tAR_min; + u32 tCEA_max; + u32 tCEH_min; + u32 tCH_min; + u32 tCHZ_max; + u32 tCLH_min; + u32 tCLR_min; + u32 tCLS_min; + u32 tCOH_min; + u32 tCS_min; + u32 tDH_min; + u32 tDS_min; + u32 tFEAT_max; + u32 tIR_min; + u32 tITC_max; + u32 tRC_min; + u32 tREA_max; + u32 tREH_min; + u32 tRHOH_min; + u32 tRHW_min; + u32 tRHZ_max; + u32 tRLOH_min; + u32 tRP_min; + u32 tRR_min; + u64 tRST_max; + u32 tWB_max; + u32 tWC_min; + u32 tWH_min; + u32 tWHR_min; + u32 tWP_min; + u32 tWW_min; +}; + +/** + * enum nand_data_interface_type - NAND interface timing type + * @NAND_SDR_IFACE: Single Data Rate interface + */ +enum nand_data_interface_type { + NAND_SDR_IFACE, +}; + +/** + * struct nand_data_interface - NAND interface timing + * @type: type of the timing + * @timings: The timing, type according to @type + */ +struct nand_data_interface { + enum nand_data_interface_type type; + union { + struct nand_sdr_timings sdr; + } timings; +}; + +/** + * nand_get_sdr_timings - get SDR timing from data interface + * @conf: The data interface + */ +static inline const struct nand_sdr_timings * +nand_get_sdr_timings(const struct nand_data_interface *conf) +{ + if (conf->type != NAND_SDR_IFACE) + return ERR_PTR(-EINVAL); + + return &conf->timings.sdr; +} + +/** + * struct nand_chip - NAND Private Flash Chip Data + * @mtd: MTD device registered to the MTD framework + * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the + * flash device + * @IO_ADDR_W: [BOARDSPECIFIC] address to write the 8 I/O lines of the + * flash device. + * @flash_node: [BOARDSPECIFIC] device node describing this instance + * @read_byte: [REPLACEABLE] read one byte from the chip + * @read_word: [REPLACEABLE] read one word from the chip + * @write_byte: [REPLACEABLE] write a single byte to the chip on the + * low 8 I/O lines + * @write_buf: [REPLACEABLE] write data from the buffer to the chip + * @read_buf: [REPLACEABLE] read data from the chip into the buffer + * @select_chip: [REPLACEABLE] select chip nr + * @block_bad: [REPLACEABLE] check if a block is bad, using OOB markers + * @block_markbad: [REPLACEABLE] mark a block bad + * @cmd_ctrl: [BOARDSPECIFIC] hardwarespecific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @dev_ready: [BOARDSPECIFIC] hardwarespecific function for accessing + * device ready/busy line. If set to NULL no access to + * ready/busy is available and the ready/busy information + * is read from the chip status register. + * @cmdfunc: [REPLACEABLE] hardwarespecific function for writing + * commands to the chip. + * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on + * ready. + * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for + * setting the read-retry mode. Mostly needed for MLC NAND. + * @ecc: [BOARDSPECIFIC] ECC control structure + * @buffers: buffer structure for read/write + * @buf_align: minimum buffer alignment required by a platform + * @hwcontrol: platform-specific hardware control structure + * @erase: [REPLACEABLE] erase function + * @scan_bbt: [REPLACEABLE] function to scan bad block table + * @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring + * data from array to read regs (tR). + * @state: [INTERN] the current state of the NAND device + * @oob_poi: "poison value buffer," used for laying out OOB data + * before writing + * @page_shift: [INTERN] number of address bits in a page (column + * address bits). + * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock + * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry + * @chip_shift: [INTERN] number of address bits in one chip + * @options: [BOARDSPECIFIC] various chip options. They can partly + * be set to inform nand_scan about special functionality. + * See the defines for further explanation. + * @bbt_options: [INTERN] bad block specific options. All options used + * here must come from bbm.h. By default, these options + * will be copied to the appropriate nand_bbt_descr's. + * @badblockpos: [INTERN] position of the bad block marker in the oob + * area. + * @badblockbits: [INTERN] minimum number of set bits in a good block's + * bad block marker position; i.e., BBM == 11110111b is + * not bad when badblockbits == 7 + * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC. + * @ecc_strength_ds: [INTERN] ECC correctability from the datasheet. + * Minimum amount of bit errors per @ecc_step_ds guaranteed + * to be correctable. If unknown, set to zero. + * @ecc_step_ds: [INTERN] ECC step required by the @ecc_strength_ds, + * also from the datasheet. It is the recommended ECC step + * size, if known; if unknown, set to zero. + * @onfi_timing_mode_default: [INTERN] default ONFI timing mode. This field is + * set to the actually used ONFI mode if the chip is + * ONFI compliant or deduced from the datasheet if + * the NAND chip is not ONFI compliant. + * @numchips: [INTERN] number of physical chips + * @chipsize: [INTERN] the size of one chip for multichip arrays + * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 + * @pagebuf: [INTERN] holds the pagenumber which is currently in + * data_buf. + * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is + * currently in data_buf. + * @subpagesize: [INTERN] holds the subpagesize + * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), + * non 0 if ONFI supported. + * @jedec_version: [INTERN] holds the chip JEDEC version (BCD encoded), + * non 0 if JEDEC supported. + * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is + * supported, 0 otherwise. + * @jedec_params: [INTERN] holds the JEDEC parameter page when JEDEC is + * supported, 0 otherwise. + * @read_retries: [INTERN] the number of read retry modes supported + * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand + * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand + * @setup_data_interface: [OPTIONAL] setup the data interface and timing. If + * chipnr is set to %NAND_DATA_IFACE_CHECK_ONLY this + * means the configuration should not be applied but + * only checked. + * @bbt: [INTERN] bad block table pointer + * @bbt_td: [REPLACEABLE] bad block table descriptor for flash + * lookup. + * @bbt_md: [REPLACEABLE] bad block table mirror descriptor + * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial + * bad block scan. + * @controller: [REPLACEABLE] a pointer to a hardware controller + * structure which is shared among multiple independent + * devices. + * @priv: [OPTIONAL] pointer to private chip data + * @write_page: [REPLACEABLE] High-level page write function + */ + +struct nand_chip { + struct mtd_info mtd; + void __iomem *IO_ADDR_R; + void __iomem *IO_ADDR_W; + + int flash_node; + + uint8_t (*read_byte)(struct mtd_info *mtd); + u16 (*read_word)(struct mtd_info *mtd); + void (*write_byte)(struct mtd_info *mtd, uint8_t byte); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + void (*select_chip)(struct mtd_info *mtd, int chip); + int (*block_bad)(struct mtd_info *mtd, loff_t ofs); + int (*block_markbad)(struct mtd_info *mtd, loff_t ofs); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + int (*dev_ready)(struct mtd_info *mtd); + void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, + int page_addr); + int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); + int (*erase)(struct mtd_info *mtd, int page); + int (*scan_bbt)(struct mtd_info *mtd); + int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int raw); + int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*onfi_get_features)(struct mtd_info *mtd, struct nand_chip *chip, + int feature_addr, uint8_t *subfeature_para); + int (*setup_read_retry)(struct mtd_info *mtd, int retry_mode); + int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface *conf); + + + int chip_delay; + unsigned int options; + unsigned int bbt_options; + + int page_shift; + int phys_erase_shift; + int bbt_erase_shift; + int chip_shift; + int numchips; + uint64_t chipsize; + int pagemask; + int pagebuf; + unsigned int pagebuf_bitflips; + int subpagesize; + uint8_t bits_per_cell; + uint16_t ecc_strength_ds; + uint16_t ecc_step_ds; + int onfi_timing_mode_default; + int badblockpos; + int badblockbits; + + int onfi_version; + int jedec_version; + struct nand_onfi_params onfi_params; + struct nand_jedec_params jedec_params; + + struct nand_data_interface *data_interface; + + int read_retries; + + flstate_t state; + + uint8_t *oob_poi; + struct nand_hw_control *controller; + struct nand_ecclayout *ecclayout; + + struct nand_ecc_ctrl ecc; + struct nand_buffers *buffers; + unsigned long buf_align; + struct nand_hw_control hwcontrol; + + uint8_t *bbt; + struct nand_bbt_descr *bbt_td; + struct nand_bbt_descr *bbt_md; + + struct nand_bbt_descr *badblock_pattern; + + void *priv; +}; + +static inline void nand_set_flash_node(struct nand_chip *chip, + ofnode node) +{ + chip->flash_node = ofnode_to_offset(node); +} + +static inline ofnode nand_get_flash_node(struct nand_chip *chip) +{ + return offset_to_ofnode(chip->flash_node); +} + +static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd) +{ + return container_of(mtd, struct nand_chip, mtd); +} + +static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip) +{ + return &chip->mtd; +} + +static inline void *nand_get_controller_data(struct nand_chip *chip) +{ + return chip->priv; +} + +static inline void nand_set_controller_data(struct nand_chip *chip, void *priv) +{ + chip->priv = priv; +} + +/* + * NAND Flash Manufacturer ID Codes + */ +#define NAND_MFR_TOSHIBA 0x98 +#define NAND_MFR_SAMSUNG 0xec +#define NAND_MFR_FUJITSU 0x04 +#define NAND_MFR_NATIONAL 0x8f +#define NAND_MFR_RENESAS 0x07 +#define NAND_MFR_STMICRO 0x20 +#define NAND_MFR_HYNIX 0xad +#define NAND_MFR_MICRON 0x2c +#define NAND_MFR_AMD 0x01 +#define NAND_MFR_MACRONIX 0xc2 +#define NAND_MFR_EON 0x92 +#define NAND_MFR_SANDISK 0x45 +#define NAND_MFR_INTEL 0x89 +#define NAND_MFR_ATO 0x9b + +/* The maximum expected count of bytes in the NAND ID sequence */ +#define NAND_MAX_ID_LEN 8 + +/* + * A helper for defining older NAND chips where the second ID byte fully + * defined the chip, including the geometry (chip size, eraseblock size, page + * size). All these chips have 512 bytes NAND page size. + */ +#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \ + { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \ + .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) } + +/* + * A helper for defining newer chips which report their page size and + * eraseblock size via the extended ID bytes. + * + * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with + * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the + * device ID now only represented a particular total chip size (and voltage, + * buswidth), and the page size, eraseblock size, and OOB size could vary while + * using the same device ID. + */ +#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \ + { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \ + .options = (opts) } + +#define NAND_ECC_INFO(_strength, _step) \ + { .strength_ds = (_strength), .step_ds = (_step) } +#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds) +#define NAND_ECC_STEP(type) ((type)->ecc.step_ds) + +/** + * struct nand_flash_dev - NAND Flash Device ID Structure + * @name: a human-readable name of the NAND chip + * @dev_id: the device ID (the second byte of the full chip ID array) + * @mfr_id: manufecturer ID part of the full chip ID array (refers the same + * memory address as @id[0]) + * @dev_id: device ID part of the full chip ID array (refers the same memory + * address as @id[1]) + * @id: full device ID array + * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as + * well as the eraseblock size) is determined from the extended NAND + * chip ID array) + * @chipsize: total chip size in MiB + * @erasesize: eraseblock size in bytes (determined from the extended ID if 0) + * @options: stores various chip bit options + * @id_len: The valid length of the @id. + * @oobsize: OOB size + * @ecc: ECC correctability and step information from the datasheet. + * @ecc.strength_ds: The ECC correctability from the datasheet, same as the + * @ecc_strength_ds in nand_chip{}. + * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the + * @ecc_step_ds in nand_chip{}, also from the datasheet. + * For example, the "4bit ECC for each 512Byte" can be set with + * NAND_ECC_INFO(4, 512). + * @onfi_timing_mode_default: the default ONFI timing mode entered after a NAND + * reset. Should be deduced from timings described + * in the datasheet. + * + */ +struct nand_flash_dev { + char *name; + union { + struct { + uint8_t mfr_id; + uint8_t dev_id; + }; + uint8_t id[NAND_MAX_ID_LEN]; + }; + unsigned int pagesize; + unsigned int chipsize; + unsigned int erasesize; + unsigned int options; + uint16_t id_len; + uint16_t oobsize; + struct { + uint16_t strength_ds; + uint16_t step_ds; + } ecc; + int onfi_timing_mode_default; +}; + +/** + * struct nand_manufacturers - NAND Flash Manufacturer ID Structure + * @name: Manufacturer name + * @id: manufacturer ID code of device. +*/ +struct nand_manufacturers { + int id; + char *name; +}; + +extern struct nand_flash_dev nand_flash_ids[]; +extern struct nand_manufacturers nand_manuf_ids[]; + +int nand_default_bbt(struct mtd_info *mtd); +int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs); +int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs); +int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt); +int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, + int allowbbt); +int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, uint8_t *buf); + +/* +* Constants for oob configuration +*/ +#define NAND_SMALL_BADBLOCK_POS 5 +#define NAND_LARGE_BADBLOCK_POS 0 + +/** + * struct platform_nand_chip - chip level device structure + * @nr_chips: max. number of chips to scan for + * @chip_offset: chip number offset + * @nr_partitions: number of partitions pointed to by partitions (or zero) + * @partitions: mtd partition list + * @chip_delay: R/B delay value in us + * @options: Option flags, e.g. 16bit buswidth + * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH + * @part_probe_types: NULL-terminated array of probe types + */ +struct platform_nand_chip { + int nr_chips; + int chip_offset; + int nr_partitions; + struct mtd_partition *partitions; + int chip_delay; + unsigned int options; + unsigned int bbt_options; + const char **part_probe_types; +}; + +/* Keep gcc happy */ +struct platform_device; + +/** + * struct platform_nand_ctrl - controller level device structure + * @probe: platform specific function to probe/setup hardware + * @remove: platform specific function to remove/teardown hardware + * @hwcontrol: platform specific hardware control structure + * @dev_ready: platform specific function to read ready/busy pin + * @select_chip: platform specific chip select function + * @cmd_ctrl: platform specific function for controlling + * ALE/CLE/nCE. Also used to write command and address + * @write_buf: platform specific function for write buffer + * @read_buf: platform specific function for read buffer + * @read_byte: platform specific function to read one byte from chip + * @priv: private data to transport driver specific settings + * + * All fields are optional and depend on the hardware driver requirements + */ +struct platform_nand_ctrl { + int (*probe)(struct platform_device *pdev); + void (*remove)(struct platform_device *pdev); + void (*hwcontrol)(struct mtd_info *mtd, int cmd); + int (*dev_ready)(struct mtd_info *mtd); + void (*select_chip)(struct mtd_info *mtd, int chip); + void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); + void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); + void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); + unsigned char (*read_byte)(struct mtd_info *mtd); + void *priv; +}; + +/** + * struct platform_nand_data - container structure for platform-specific data + * @chip: chip level chip structure + * @ctrl: controller level device structure + */ +struct platform_nand_data { + struct platform_nand_chip chip; + struct platform_nand_ctrl ctrl; +}; + +#ifdef CONFIG_SYS_NAND_ONFI_DETECTION +/* return the supported features. */ +static inline int onfi_feature(struct nand_chip *chip) +{ + return chip->onfi_version ? le16_to_cpu(chip->onfi_params.features) : 0; +} + +/* return the supported asynchronous timing mode. */ +static inline int onfi_get_async_timing_mode(struct nand_chip *chip) +{ + if (!chip->onfi_version) + return ONFI_TIMING_MODE_UNKNOWN; + return le16_to_cpu(chip->onfi_params.async_timing_mode); +} + +/* return the supported synchronous timing mode. */ +static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) +{ + if (!chip->onfi_version) + return ONFI_TIMING_MODE_UNKNOWN; + return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); +} +#else +static inline int onfi_feature(struct nand_chip *chip) +{ + return 0; +} + +static inline int onfi_get_async_timing_mode(struct nand_chip *chip) +{ + return ONFI_TIMING_MODE_UNKNOWN; +} + +static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) +{ + return ONFI_TIMING_MODE_UNKNOWN; +} +#endif + +int onfi_init_data_interface(struct nand_chip *chip, + struct nand_data_interface *iface, + enum nand_data_interface_type type, + int timing_mode); + +/* + * Check if it is a SLC nand. + * The !nand_is_slc() can be used to check the MLC/TLC nand chips. + * We do not distinguish the MLC and TLC now. + */ +static inline bool nand_is_slc(struct nand_chip *chip) +{ + return chip->bits_per_cell == 1; +} + +/** + * Check if the opcode's address should be sent only on the lower 8 bits + * @command: opcode to check + */ +static inline int nand_opcode_8bits(unsigned int command) +{ + switch (command) { + case NAND_CMD_READID: + case NAND_CMD_PARAM: + case NAND_CMD_GET_FEATURES: + case NAND_CMD_SET_FEATURES: + return 1; + default: + break; + } + return 0; +} + +/* return the supported JEDEC features. */ +static inline int jedec_feature(struct nand_chip *chip) +{ + return chip->jedec_version ? le16_to_cpu(chip->jedec_params.features) + : 0; +} + +/* Standard NAND functions from nand_base.c */ +void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len); +void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len); +void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len); +void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len); +uint8_t nand_read_byte(struct mtd_info *mtd); + +/* get timing characteristics from ONFI timing mode. */ +const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); +/* get data interface from ONFI timing mode 0, used after reset. */ +const struct nand_data_interface *nand_get_default_data_interface(void); + +int nand_check_erased_ecc_chunk(void *data, int datalen, + void *ecc, int ecclen, + void *extraoob, int extraooblen, + int threshold); + +int nand_check_ecc_caps(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail); + +int nand_match_ecc_req(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail); + +int nand_maximize_ecc(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail); + +/* Reset and initialize a NAND device */ +int nand_reset(struct nand_chip *chip, int chipnr); + +/* NAND operation helpers */ +int nand_reset_op(struct nand_chip *chip); +int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, + unsigned int len); +int nand_status_op(struct nand_chip *chip, u8 *status); +int nand_exit_status_op(struct nand_chip *chip); +int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock); +int nand_read_page_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, void *buf, unsigned int len); +int nand_change_read_column_op(struct nand_chip *chip, + unsigned int offset_in_page, void *buf, + unsigned int len, bool force_8bit); +int nand_read_oob_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, void *buf, unsigned int len); +int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, const void *buf, + unsigned int len); +int nand_prog_page_end_op(struct nand_chip *chip); +int nand_prog_page_op(struct nand_chip *chip, unsigned int page, + unsigned int offset_in_page, const void *buf, + unsigned int len); +int nand_change_write_column_op(struct nand_chip *chip, + unsigned int offset_in_page, const void *buf, + unsigned int len, bool force_8bit); +int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, + bool force_8bit); +int nand_write_data_op(struct nand_chip *chip, const void *buf, + unsigned int len, bool force_8bit); + +#endif /* __LINUX_MTD_RAWNAND_H */ diff --git a/include/linux/mtd/samsung_onenand.h b/include/linux/mtd/samsung_onenand.h new file mode 100644 index 0000000..7774fec --- /dev/null +++ b/include/linux/mtd/samsung_onenand.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2005-2009 Samsung Electronics + * Minkyu Kang + * Kyungmin Park + */ + +#ifndef __SAMSUNG_ONENAND_H__ +#define __SAMSUNG_ONENAND_H__ + +/* + * OneNAND Controller + */ + +#ifndef __ASSEMBLY__ +struct samsung_onenand { + unsigned int mem_cfg; /* 0x0000 */ + unsigned char res1[0xc]; + unsigned int burst_len; /* 0x0010 */ + unsigned char res2[0xc]; + unsigned int mem_reset; /* 0x0020 */ + unsigned char res3[0xc]; + unsigned int int_err_stat; /* 0x0030 */ + unsigned char res4[0xc]; + unsigned int int_err_mask; /* 0x0040 */ + unsigned char res5[0xc]; + unsigned int int_err_ack; /* 0x0050 */ + unsigned char res6[0xc]; + unsigned int ecc_err_stat; /* 0x0060 */ + unsigned char res7[0xc]; + unsigned int manufact_id; /* 0x0070 */ + unsigned char res8[0xc]; + unsigned int device_id; /* 0x0080 */ + unsigned char res9[0xc]; + unsigned int data_buf_size; /* 0x0090 */ + unsigned char res10[0xc]; + unsigned int boot_buf_size; /* 0x00A0 */ + unsigned char res11[0xc]; + unsigned int buf_amount; /* 0x00B0 */ + unsigned char res12[0xc]; + unsigned int tech; /* 0x00C0 */ + unsigned char res13[0xc]; + unsigned int fba; /* 0x00D0 */ + unsigned char res14[0xc]; + unsigned int fpa; /* 0x00E0 */ + unsigned char res15[0xc]; + unsigned int fsa; /* 0x00F0 */ + unsigned char res16[0x3c]; + unsigned int sync_mode; /* 0x0130 */ + unsigned char res17[0xc]; + unsigned int trans_spare; /* 0x0140 */ + unsigned char res18[0x3c]; + unsigned int err_page_addr; /* 0x0180 */ + unsigned char res19[0x1c]; + unsigned int int_pin_en; /* 0x01A0 */ + unsigned char res20[0x1c]; + unsigned int acc_clock; /* 0x01C0 */ + unsigned char res21[0x1c]; + unsigned int err_blk_addr; /* 0x01E0 */ + unsigned char res22[0xc]; + unsigned int flash_ver_id; /* 0x01F0 */ + unsigned char res23[0x6c]; + unsigned int watchdog_cnt_low; /* 0x0260 */ + unsigned char res24[0xc]; + unsigned int watchdog_cnt_hi; /* 0x0270 */ + unsigned char res25[0xc]; + unsigned int sync_write; /* 0x0280 */ + unsigned char res26[0x1c]; + unsigned int cold_reset; /* 0x02A0 */ + unsigned char res27[0xc]; + unsigned int ddp_device; /* 0x02B0 */ + unsigned char res28[0xc]; + unsigned int multi_plane; /* 0x02C0 */ + unsigned char res29[0x1c]; + unsigned int trans_mode; /* 0x02E0 */ + unsigned char res30[0x1c]; + unsigned int ecc_err_stat2; /* 0x0300 */ + unsigned char res31[0xc]; + unsigned int ecc_err_stat3; /* 0x0310 */ + unsigned char res32[0xc]; + unsigned int ecc_err_stat4; /* 0x0320 */ + unsigned char res33[0x1c]; + unsigned int dev_page_size; /* 0x0340 */ + unsigned char res34[0x4c]; + unsigned int int_mon_status; /* 0x0390 */ +}; +#endif + +#define ONENAND_MEM_RESET_HOT 0x3 +#define ONENAND_MEM_RESET_COLD 0x2 +#define ONENAND_MEM_RESET_WARM 0x1 + +#define INT_ERR_ALL 0x3fff +#define CACHE_OP_ERR (1 << 13) +#define RST_CMP (1 << 12) +#define RDY_ACT (1 << 11) +#define INT_ACT (1 << 10) +#define UNSUP_CMD (1 << 9) +#define LOCKED_BLK (1 << 8) +#define BLK_RW_CMP (1 << 7) +#define ERS_CMP (1 << 6) +#define PGM_CMP (1 << 5) +#define LOAD_CMP (1 << 4) +#define ERS_FAIL (1 << 3) +#define PGM_FAIL (1 << 2) +#define INT_TO (1 << 1) +#define LD_FAIL_ECC_ERR (1 << 0) + +#define TSRF (1 << 0) + +/* common initialize function */ +extern void s3c_onenand_init(struct mtd_info *); +extern int s5pc110_chip_probe(struct mtd_info *); +extern int s5pc210_chip_probe(struct mtd_info *); + +#endif diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h new file mode 100644 index 0000000..f9964a7 --- /dev/null +++ b/include/linux/mtd/spi-nor.h @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2014 Freescale Semiconductor, Inc. + * Synced from Linux v4.19 + */ + +#ifndef __LINUX_MTD_SPI_NOR_H +#define __LINUX_MTD_SPI_NOR_H + +#include +#include +#include + +/* + * Manufacturer IDs + * + * The first byte returned from the flash after sending opcode SPINOR_OP_RDID. + * Sometimes these are the same as CFI IDs, but sometimes they aren't. + */ +#define SNOR_MFR_ATMEL CFI_MFR_ATMEL +#define SNOR_MFR_GIGADEVICE 0xc8 +#define SNOR_MFR_INTEL CFI_MFR_INTEL +#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro <--> Micron */ +#define SNOR_MFR_MICRON CFI_MFR_MICRON /* ST Micro <--> Micron */ +#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX +#define SNOR_MFR_SPANSION CFI_MFR_AMD +#define SNOR_MFR_SST CFI_MFR_SST +#define SNOR_MFR_WINBOND 0xef /* Also used by some Spansion */ + +/* + * Note on opcode nomenclature: some opcodes have a format like + * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number + * of I/O lines used for the opcode, address, and data (respectively). The + * FUNCTION has an optional suffix of '4', to represent an opcode which + * requires a 4-byte (32-bit) address. + */ + +/* Flash opcodes. */ +#define SPINOR_OP_WREN 0x06 /* Write enable */ +#define SPINOR_OP_RDSR 0x05 /* Read status register */ +#define SPINOR_OP_WRSR 0x01 /* Write status register 1 byte */ +#define SPINOR_OP_RDSR2 0x3f /* Read status register 2 */ +#define SPINOR_OP_WRSR2 0x3e /* Write status register 2 */ +#define SPINOR_OP_READ 0x03 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ_FAST 0x0b /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ_1_1_2 0x3b /* Read data bytes (Dual Output SPI) */ +#define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ +#define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ +#define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ +#define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ +#define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ +#define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ +#define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ +#define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ +#define SPINOR_OP_CHIP_ERASE 0xc7 /* Erase whole flash chip */ +#define SPINOR_OP_SE 0xd8 /* Sector erase (usually 64KiB) */ +#define SPINOR_OP_RDID 0x9f /* Read JEDEC ID */ +#define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ +#define SPINOR_OP_RDCR 0x35 /* Read configuration register */ +#define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ +#define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ +#define SPINOR_OP_RDEAR 0xc8 /* Read Extended Address Register */ +#define SPINOR_OP_WREAR 0xc5 /* Write Extended Address Register */ + +/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ +#define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ +#define SPINOR_OP_READ_FAST_4B 0x0c /* Read data bytes (high frequency) */ +#define SPINOR_OP_READ_1_1_2_4B 0x3c /* Read data bytes (Dual Output SPI) */ +#define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ +#define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ +#define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ +#define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ +#define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ +#define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ +#define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ +#define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ +#define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ + +/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */ +#define SPINOR_OP_READ_1_1_1_DTR 0x0d +#define SPINOR_OP_READ_1_2_2_DTR 0xbd +#define SPINOR_OP_READ_1_4_4_DTR 0xed + +#define SPINOR_OP_READ_1_1_1_DTR_4B 0x0e +#define SPINOR_OP_READ_1_2_2_DTR_4B 0xbe +#define SPINOR_OP_READ_1_4_4_DTR_4B 0xee + +/* Used for SST flashes only. */ +#define SPINOR_OP_BP 0x02 /* Byte program */ +#define SPINOR_OP_WRDI 0x04 /* Write disable */ +#define SPINOR_OP_AAI_WP 0xad /* Auto address increment word program */ + +/* Used for SST26* flashes only. */ +#define SPINOR_OP_READ_BPR 0x72 /* Read block protection register */ +#define SPINOR_OP_WRITE_BPR 0x42 /* Write block protection register */ + +/* Used for S3AN flashes only */ +#define SPINOR_OP_XSE 0x50 /* Sector erase */ +#define SPINOR_OP_XPP 0x82 /* Page program */ +#define SPINOR_OP_XRDSR 0xd7 /* Read status register */ + +#define XSR_PAGESIZE BIT(0) /* Page size in Po2 or Linear */ +#define XSR_RDY BIT(7) /* Ready */ + +/* Used for Macronix and Winbond flashes. */ +#define SPINOR_OP_EN4B 0xb7 /* Enter 4-byte mode */ +#define SPINOR_OP_EX4B 0xe9 /* Exit 4-byte mode */ + +/* Used for Spansion flashes only. */ +#define SPINOR_OP_BRWR 0x17 /* Bank register write */ +#define SPINOR_OP_BRRD 0x16 /* Bank register read */ +#define SPINOR_OP_CLSR 0x30 /* Clear status register 1 */ + +/* Used for Micron flashes only. */ +#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ +#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ + +/* Status Register bits. */ +#define SR_WIP BIT(0) /* Write in progress */ +#define SR_WEL BIT(1) /* Write enable latch */ +/* meaning of other SR_* bits may differ between vendors */ +#define SR_BP0 BIT(2) /* Block protect 0 */ +#define SR_BP1 BIT(3) /* Block protect 1 */ +#define SR_BP2 BIT(4) /* Block protect 2 */ +#define SR_TB BIT(5) /* Top/Bottom protect */ +#define SR_SRWD BIT(7) /* SR write protect */ +/* Spansion/Cypress specific status bits */ +#define SR_E_ERR BIT(5) +#define SR_P_ERR BIT(6) + +#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */ + +/* Enhanced Volatile Configuration Register bits */ +#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ + +/* Flag Status Register bits */ +#define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ +#define FSR_E_ERR BIT(5) /* Erase operation status */ +#define FSR_P_ERR BIT(4) /* Program operation status */ +#define FSR_PT_ERR BIT(1) /* Protection error bit */ + +/* Configuration Register bits. */ +#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ + +/* Status Register 2 bits. */ +#define SR2_QUAD_EN_BIT7 BIT(7) + +/* Supported SPI protocols */ +#define SNOR_PROTO_INST_MASK GENMASK(23, 16) +#define SNOR_PROTO_INST_SHIFT 16 +#define SNOR_PROTO_INST(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \ + SNOR_PROTO_INST_MASK) + +#define SNOR_PROTO_ADDR_MASK GENMASK(15, 8) +#define SNOR_PROTO_ADDR_SHIFT 8 +#define SNOR_PROTO_ADDR(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \ + SNOR_PROTO_ADDR_MASK) + +#define SNOR_PROTO_DATA_MASK GENMASK(7, 0) +#define SNOR_PROTO_DATA_SHIFT 0 +#define SNOR_PROTO_DATA(_nbits) \ + ((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \ + SNOR_PROTO_DATA_MASK) + +#define SNOR_PROTO_IS_DTR BIT(24) /* Double Transfer Rate */ + +#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits) \ + (SNOR_PROTO_INST(_inst_nbits) | \ + SNOR_PROTO_ADDR(_addr_nbits) | \ + SNOR_PROTO_DATA(_data_nbits)) +#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits) \ + (SNOR_PROTO_IS_DTR | \ + SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)) + +enum spi_nor_protocol { + SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1), + SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2), + SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4), + SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8), + SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2), + SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4), + SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8), + SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2), + SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4), + SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8), + + SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1), + SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2), + SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4), + SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8), +}; + +static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto) +{ + return !!(proto & SNOR_PROTO_IS_DTR); +} + +static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >> + SNOR_PROTO_INST_SHIFT; +} + +static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >> + SNOR_PROTO_ADDR_SHIFT; +} + +static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto) +{ + return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >> + SNOR_PROTO_DATA_SHIFT; +} + +static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto) +{ + return spi_nor_get_protocol_data_nbits(proto); +} + +#define SPI_NOR_MAX_CMD_SIZE 8 +enum spi_nor_ops { + SPI_NOR_OPS_READ = 0, + SPI_NOR_OPS_WRITE, + SPI_NOR_OPS_ERASE, + SPI_NOR_OPS_LOCK, + SPI_NOR_OPS_UNLOCK, +}; + +enum spi_nor_option_flags { + SNOR_F_USE_FSR = BIT(0), + SNOR_F_HAS_SR_TB = BIT(1), + SNOR_F_NO_OP_CHIP_ERASE = BIT(2), + SNOR_F_S3AN_ADDR_DEFAULT = BIT(3), + SNOR_F_READY_XSR_RDY = BIT(4), + SNOR_F_USE_CLSR = BIT(5), + SNOR_F_BROKEN_RESET = BIT(6), +}; + +/** + * struct flash_info - Forward declaration of a structure used internally by + * spi_nor_scan() + */ +struct flash_info; + +/* + * TODO: Remove, once all users of spi_flash interface are moved to MTD + * + * struct spi_flash { + * Defined below (keep this text to enable searching for spi_flash decl) + * } + */ +#define spi_flash spi_nor + +/** + * struct spi_nor - Structure for defining a the SPI NOR layer + * @mtd: point to a mtd_info structure + * @lock: the lock for the read/write/erase/lock/unlock operations + * @dev: point to a spi device, or a spi nor controller device. + * @info: spi-nor part JDEC MFR id and other info + * @page_size: the page size of the SPI NOR + * @addr_width: number of address bytes + * @erase_opcode: the opcode for erasing a sector + * @read_opcode: the read opcode + * @read_dummy: the dummy needed by the read operation + * @program_opcode: the program opcode + * @bank_read_cmd: Bank read cmd + * @bank_write_cmd: Bank write cmd + * @bank_curr: Current flash bank + * @sst_write_second: used by the SST write operation + * @flags: flag options for the current SPI-NOR (SNOR_F_*) + * @read_proto: the SPI protocol for read operations + * @write_proto: the SPI protocol for write operations + * @reg_proto the SPI protocol for read_reg/write_reg/erase operations + * @cmd_buf: used by the write_reg + * @prepare: [OPTIONAL] do some preparations for the + * read/write/erase/lock/unlock operations + * @unprepare: [OPTIONAL] do some post work after the + * read/write/erase/lock/unlock operations + * @read_reg: [DRIVER-SPECIFIC] read out the register + * @write_reg: [DRIVER-SPECIFIC] write data to the register + * @read: [DRIVER-SPECIFIC] read data from the SPI NOR + * @write: [DRIVER-SPECIFIC] write data to the SPI NOR + * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR + * at the offset @offs; if not provided by the driver, + * spi-nor will send the erase opcode via write_reg() + * @flash_lock: [FLASH-SPECIFIC] lock a region of the SPI NOR + * @flash_unlock: [FLASH-SPECIFIC] unlock a region of the SPI NOR + * @flash_is_locked: [FLASH-SPECIFIC] check if a region of the SPI NOR is + * @quad_enable: [FLASH-SPECIFIC] enables SPI NOR quad mode + * completely locked + * @priv: the private data + */ +struct spi_nor { + struct mtd_info mtd; + struct udevice *dev; + struct spi_slave *spi; + const struct flash_info *info; + u32 page_size; + u8 addr_width; + u8 erase_opcode; + u8 read_opcode; + u8 read_dummy; + u8 program_opcode; +#ifdef CONFIG_SPI_FLASH_BAR + u8 bank_read_cmd; + u8 bank_write_cmd; + u8 bank_curr; +#endif + enum spi_nor_protocol read_proto; + enum spi_nor_protocol write_proto; + enum spi_nor_protocol reg_proto; + bool sst_write_second; + u32 flags; + u8 cmd_buf[SPI_NOR_MAX_CMD_SIZE]; + + int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops); + void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops); + int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len); + + ssize_t (*read)(struct spi_nor *nor, loff_t from, + size_t len, u_char *read_buf); + ssize_t (*write)(struct spi_nor *nor, loff_t to, + size_t len, const u_char *write_buf); + int (*erase)(struct spi_nor *nor, loff_t offs); + + int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); + int (*quad_enable)(struct spi_nor *nor); + + void *priv; +/* Compatibility for spi_flash, remove once sf layer is merged with mtd */ + const char *name; + u32 size; + u32 sector_size; + u32 erase_size; +}; + +static inline void spi_nor_set_flash_node(struct spi_nor *nor, + const struct device_node *np) +{ + mtd_set_of_node(&nor->mtd, np); +} + +static inline const struct +device_node *spi_nor_get_flash_node(struct spi_nor *nor) +{ + return mtd_get_of_node(&nor->mtd); +} + +/** + * struct spi_nor_hwcaps - Structure for describing the hardware capabilies + * supported by the SPI controller (bus master). + * @mask: the bitmask listing all the supported hw capabilies + */ +struct spi_nor_hwcaps { + u32 mask; +}; + +/* + *(Fast) Read capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * As a matter of performances, it is relevant to use Octo SPI protocols first, + * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly + * (Slow) Read. + */ +#define SNOR_HWCAPS_READ_MASK GENMASK(14, 0) +#define SNOR_HWCAPS_READ BIT(0) +#define SNOR_HWCAPS_READ_FAST BIT(1) +#define SNOR_HWCAPS_READ_1_1_1_DTR BIT(2) + +#define SNOR_HWCAPS_READ_DUAL GENMASK(6, 3) +#define SNOR_HWCAPS_READ_1_1_2 BIT(3) +#define SNOR_HWCAPS_READ_1_2_2 BIT(4) +#define SNOR_HWCAPS_READ_2_2_2 BIT(5) +#define SNOR_HWCAPS_READ_1_2_2_DTR BIT(6) + +#define SNOR_HWCAPS_READ_QUAD GENMASK(10, 7) +#define SNOR_HWCAPS_READ_1_1_4 BIT(7) +#define SNOR_HWCAPS_READ_1_4_4 BIT(8) +#define SNOR_HWCAPS_READ_4_4_4 BIT(9) +#define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) + +#define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11) +#define SNOR_HWCAPS_READ_1_1_8 BIT(11) +#define SNOR_HWCAPS_READ_1_8_8 BIT(12) +#define SNOR_HWCAPS_READ_8_8_8 BIT(13) +#define SNOR_HWCAPS_READ_1_8_8_DTR BIT(14) + +/* + * Page Program capabilities. + * MUST be ordered by priority: the higher bit position, the higher priority. + * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the + * legacy SPI 1-1-1 protocol. + * Note that Dual Page Programs are not supported because there is no existing + * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory + * implements such commands. + */ +#define SNOR_HWCAPS_PP_MASK GENMASK(22, 16) +#define SNOR_HWCAPS_PP BIT(16) + +#define SNOR_HWCAPS_PP_QUAD GENMASK(19, 17) +#define SNOR_HWCAPS_PP_1_1_4 BIT(17) +#define SNOR_HWCAPS_PP_1_4_4 BIT(18) +#define SNOR_HWCAPS_PP_4_4_4 BIT(19) + +#define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20) +#define SNOR_HWCAPS_PP_1_1_8 BIT(20) +#define SNOR_HWCAPS_PP_1_8_8 BIT(21) +#define SNOR_HWCAPS_PP_8_8_8 BIT(22) + +/** + * spi_nor_scan() - scan the SPI NOR + * @nor: the spi_nor structure + * + * The drivers can use this function to scan the SPI NOR. + * In the scanning, it will try to get all the necessary information to + * fill the mtd_info{} and the spi_nor{}. + * + * Return: 0 for success, others for failure. + */ +int spi_nor_scan(struct spi_nor *nor); + +#endif diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h new file mode 100644 index 0000000..be01e1e --- /dev/null +++ b/include/linux/mtd/spinand.h @@ -0,0 +1,433 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2016-2017 Micron Technology, Inc. + * + * Authors: + * Peter Pan + */ +#ifndef __LINUX_MTD_SPINAND_H +#define __LINUX_MTD_SPINAND_H + +#ifndef __UBOOT__ +#include +#include +#include +#include +#include +#include +#include +#else +#include +#include +#include +#include +#endif + +/** + * Standard SPI NAND flash operations + */ + +#define SPINAND_RESET_OP \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_WR_EN_DIS_OP(enable) \ + SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_READID_OP(ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \ + SPI_MEM_OP_NO_ADDR, \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_SET_FEATURE_OP(reg, valptr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \ + SPI_MEM_OP_ADDR(1, reg, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(1, valptr, 1)) + +#define SPINAND_GET_FEATURE_OP(reg, valptr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \ + SPI_MEM_OP_ADDR(1, reg, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_IN(1, valptr, 1)) + +#define SPINAND_BLK_ERASE_OP(addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_PAGE_READ_OP(addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1)) + +#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + +#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + +#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ + SPI_MEM_OP_ADDR(2, addr, 2), \ + SPI_MEM_OP_DUMMY(ndummy, 2), \ + SPI_MEM_OP_DATA_IN(len, buf, 2)) + +#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ + SPI_MEM_OP_ADDR(2, addr, 4), \ + SPI_MEM_OP_DUMMY(ndummy, 4), \ + SPI_MEM_OP_DATA_IN(len, buf, 4)) + +#define SPINAND_PROG_EXEC_OP(addr) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \ + SPI_MEM_OP_ADDR(3, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_NO_DATA) + +#define SPINAND_PROG_LOAD(reset, addr, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(len, buf, 1)) + +#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \ + SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \ + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_NO_DUMMY, \ + SPI_MEM_OP_DATA_OUT(len, buf, 4)) + +/** + * Standard SPI NAND flash commands + */ +#define SPINAND_CMD_PROG_LOAD_X4 0x32 +#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34 + +/* feature register */ +#define REG_BLOCK_LOCK 0xa0 +#define BL_ALL_UNLOCKED 0x00 + +/* configuration register */ +#define REG_CFG 0xb0 +#define CFG_OTP_ENABLE BIT(6) +#define CFG_ECC_ENABLE BIT(4) +#define CFG_QUAD_ENABLE BIT(0) + +/* status register */ +#define REG_STATUS 0xc0 +#define STATUS_BUSY BIT(0) +#define STATUS_ERASE_FAILED BIT(2) +#define STATUS_PROG_FAILED BIT(3) +#define STATUS_ECC_MASK GENMASK(5, 4) +#define STATUS_ECC_NO_BITFLIPS (0 << 4) +#define STATUS_ECC_HAS_BITFLIPS (1 << 4) +#define STATUS_ECC_UNCOR_ERROR (2 << 4) + +struct spinand_op; +struct spinand_device; + +#define SPINAND_MAX_ID_LEN 4 + +/** + * struct spinand_id - SPI NAND id structure + * @data: buffer containing the id bytes. Currently 4 bytes large, but can + * be extended if required + * @len: ID length + * + * struct_spinand_id->data contains all bytes returned after a READ_ID command, + * including dummy bytes if the chip does not emit ID bytes right after the + * READ_ID command. The responsibility to extract real ID bytes is left to + * struct_manufacurer_ops->detect(). + */ +struct spinand_id { + u8 data[SPINAND_MAX_ID_LEN]; + int len; +}; + +/** + * struct manufacurer_ops - SPI NAND manufacturer specific operations + * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed + * the core calls the struct_manufacurer_ops->detect() hook of each + * registered manufacturer until one of them return 1. Note that + * the first thing to check in this hook is that the manufacturer ID + * in struct_spinand_device->id matches the manufacturer whose + * ->detect() hook has been called. Should return 1 if there's a + * match, 0 if the manufacturer ID does not match and a negative + * error code otherwise. When true is returned, the core assumes + * that properties of the NAND chip (spinand->base.memorg and + * spinand->base.eccreq) have been filled + * @init: initialize a SPI NAND device + * @cleanup: cleanup a SPI NAND device + * + * Each SPI NAND manufacturer driver should implement this interface so that + * NAND chips coming from this vendor can be detected and initialized properly. + */ +struct spinand_manufacturer_ops { + int (*detect)(struct spinand_device *spinand); + int (*init)(struct spinand_device *spinand); + void (*cleanup)(struct spinand_device *spinand); +}; + +/** + * struct spinand_manufacturer - SPI NAND manufacturer instance + * @id: manufacturer ID + * @name: manufacturer name + * @ops: manufacturer operations + */ +struct spinand_manufacturer { + u8 id; + char *name; + const struct spinand_manufacturer_ops *ops; +}; + +/* SPI NAND manufacturers */ +extern const struct spinand_manufacturer gigadevice_spinand_manufacturer; +extern const struct spinand_manufacturer macronix_spinand_manufacturer; +extern const struct spinand_manufacturer micron_spinand_manufacturer; +extern const struct spinand_manufacturer winbond_spinand_manufacturer; + +/** + * struct spinand_op_variants - SPI NAND operation variants + * @ops: the list of variants for a given operation + * @nops: the number of variants + * + * Some operations like read-from-cache/write-to-cache have several variants + * depending on the number of IO lines you use to transfer data or address + * cycles. This structure is a way to describe the different variants supported + * by a chip and let the core pick the best one based on the SPI mem controller + * capabilities. + */ +struct spinand_op_variants { + const struct spi_mem_op *ops; + unsigned int nops; +}; + +#define SPINAND_OP_VARIANTS(name, ...) \ + const struct spinand_op_variants name = { \ + .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \ + .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \ + sizeof(struct spi_mem_op), \ + } + +/** + * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND + * chip + * @get_status: get the ECC status. Should return a positive number encoding + * the number of corrected bitflips if correction was possible or + * -EBADMSG if there are uncorrectable errors. I can also return + * other negative error codes if the error is not caused by + * uncorrectable bitflips + * @ooblayout: the OOB layout used by the on-die ECC implementation + */ +struct spinand_ecc_info { + int (*get_status)(struct spinand_device *spinand, u8 status); + const struct mtd_ooblayout_ops *ooblayout; +}; + +#define SPINAND_HAS_QE_BIT BIT(0) + +/** + * struct spinand_info - Structure used to describe SPI NAND chips + * @model: model name + * @devid: device ID + * @flags: OR-ing of the SPINAND_XXX flags + * @memorg: memory organization + * @eccreq: ECC requirements + * @eccinfo: on-die ECC info + * @op_variants: operations variants + * @op_variants.read_cache: variants of the read-cache operation + * @op_variants.write_cache: variants of the write-cache operation + * @op_variants.update_cache: variants of the update-cache operation + * @select_target: function used to select a target/die. Required only for + * multi-die chips + * + * Each SPI NAND manufacturer driver should have a spinand_info table + * describing all the chips supported by the driver. + */ +struct spinand_info { + const char *model; + u8 devid; + u32 flags; + struct nand_memory_organization memorg; + struct nand_ecc_req eccreq; + struct spinand_ecc_info eccinfo; + struct { + const struct spinand_op_variants *read_cache; + const struct spinand_op_variants *write_cache; + const struct spinand_op_variants *update_cache; + } op_variants; + int (*select_target)(struct spinand_device *spinand, + unsigned int target); +}; + +#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \ + { \ + .read_cache = __read, \ + .write_cache = __write, \ + .update_cache = __update, \ + } + +#define SPINAND_ECCINFO(__ooblayout, __get_status) \ + .eccinfo = { \ + .ooblayout = __ooblayout, \ + .get_status = __get_status, \ + } + +#define SPINAND_SELECT_TARGET(__func) \ + .select_target = __func, + +#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ + __flags, ...) \ + { \ + .model = __model, \ + .devid = __id, \ + .memorg = __memorg, \ + .eccreq = __eccreq, \ + .op_variants = __op_variants, \ + .flags = __flags, \ + __VA_ARGS__ \ + } + +/** + * struct spinand_device - SPI NAND device instance + * @base: NAND device instance + * @slave: pointer to the SPI slave object + * @lock: lock used to serialize accesses to the NAND + * @id: NAND ID as returned by READ_ID + * @flags: NAND flags + * @op_templates: various SPI mem op templates + * @op_templates.read_cache: read cache op template + * @op_templates.write_cache: write cache op template + * @op_templates.update_cache: update cache op template + * @select_target: select a specific target/die. Usually called before sending + * a command addressing a page or an eraseblock embedded in + * this die. Only required if your chip exposes several dies + * @cur_target: currently selected target/die + * @eccinfo: on-die ECC information + * @cfg_cache: config register cache. One entry per die + * @databuf: bounce buffer for data + * @oobbuf: bounce buffer for OOB data + * @scratchbuf: buffer used for everything but page accesses. This is needed + * because the spi-mem interface explicitly requests that buffers + * passed in spi_mem_op be DMA-able, so we can't based the bufs on + * the stack + * @manufacturer: SPI NAND manufacturer information + * @priv: manufacturer private data + */ +struct spinand_device { + struct nand_device base; +#ifndef __UBOOT__ + struct spi_mem *spimem; + struct mutex lock; +#else + struct spi_slave *slave; +#endif + struct spinand_id id; + u32 flags; + + struct { + const struct spi_mem_op *read_cache; + const struct spi_mem_op *write_cache; + const struct spi_mem_op *update_cache; + } op_templates; + + int (*select_target)(struct spinand_device *spinand, + unsigned int target); + unsigned int cur_target; + + struct spinand_ecc_info eccinfo; + + u8 *cfg_cache; + u8 *databuf; + u8 *oobbuf; + u8 *scratchbuf; + const struct spinand_manufacturer *manufacturer; + void *priv; +}; + +/** + * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance + * @mtd: MTD instance + * + * Return: the SPI NAND device attached to @mtd. + */ +static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd) +{ + return container_of(mtd_to_nanddev(mtd), struct spinand_device, base); +} + +/** + * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device + * @spinand: SPI NAND device + * + * Return: the MTD device embedded in @spinand. + */ +static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand) +{ + return nanddev_to_mtd(&spinand->base); +} + +/** + * nand_to_spinand() - Get the SPI NAND device embedding an NAND object + * @nand: NAND object + * + * Return: the SPI NAND device embedding @nand. + */ +static inline struct spinand_device *nand_to_spinand(struct nand_device *nand) +{ + return container_of(nand, struct spinand_device, base); +} + +/** + * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object + * @spinand: SPI NAND device + * + * Return: the NAND device embedded in @spinand. + */ +static inline struct nand_device * +spinand_to_nand(struct spinand_device *spinand) +{ + return &spinand->base; +} + +/** + * spinand_set_of_node - Attach a DT node to a SPI NAND device + * @spinand: SPI NAND device + * @np: DT node + * + * Attach a DT node to a SPI NAND device. + */ +static inline void spinand_set_of_node(struct spinand_device *spinand, + const struct device_node *np) +{ + nanddev_set_of_node(&spinand->base, np); +} + +int spinand_match_and_init(struct spinand_device *dev, + const struct spinand_info *table, + unsigned int table_size, u8 devid); + +int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); +int spinand_select_target(struct spinand_device *spinand, unsigned int target); + +#endif /* __LINUX_MTD_SPINAND_H */ diff --git a/include/linux/mtd/st_smi.h b/include/linux/mtd/st_smi.h new file mode 100644 index 0000000..6058969 --- /dev/null +++ b/include/linux/mtd/st_smi.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2009 + * Vipin Kumar, ST Micoelectronics, vipin.kumar@st.com. + */ + +#ifndef ST_SMI_H +#define ST_SMI_H + +/* 0xF800.0000 . 0xFBFF.FFFF 64MB SMI (Serial Flash Mem) */ +/* 0xFC00.0000 . 0xFC1F.FFFF 2MB SMI (Serial Flash Reg.) */ + +#define FLASH_START_ADDRESS CONFIG_SYS_FLASH_BASE +#define FLASH_BANK_SIZE CONFIG_SYS_FLASH_BANK_SIZE + +#define SMIBANK0_BASE (FLASH_START_ADDRESS) +#define SMIBANK1_BASE (SMIBANK0_BASE + FLASH_BANK_SIZE) +#define SMIBANK2_BASE (SMIBANK1_BASE + FLASH_BANK_SIZE) +#define SMIBANK3_BASE (SMIBANK2_BASE + FLASH_BANK_SIZE) + +#define BANK0 0 +#define BANK1 1 +#define BANK2 2 +#define BANK3 3 + +struct smi_regs { + u32 smi_cr1; + u32 smi_cr2; + u32 smi_sr; + u32 smi_tr; + u32 smi_rr; +}; + +/* CONTROL REG 1 */ +#define BANK_EN 0x0000000F /* enables all banks */ +#define DSEL_TIME 0x00000060 /* Deselect time */ +#define PRESCAL5 0x00000500 /* AHB_CK prescaling value */ +#define PRESCALA 0x00000A00 /* AHB_CK prescaling value */ +#define PRESCAL3 0x00000300 /* AHB_CK prescaling value */ +#define PRESCAL4 0x00000400 /* AHB_CK prescaling value */ +#define SW_MODE 0x10000000 /* enables SW Mode */ +#define WB_MODE 0x20000000 /* Write Burst Mode */ +#define FAST_MODE 0x00008000 /* Fast Mode */ +#define HOLD1 0x00010000 + +/* CONTROL REG 2 */ +#define RD_STATUS_REG 0x00000400 /* reads status reg */ +#define WE 0x00000800 /* Write Enable */ +#define BANK0_SEL 0x00000000 /* Select Banck0 */ +#define BANK1_SEL 0x00001000 /* Select Banck1 */ +#define BANK2_SEL 0x00002000 /* Select Banck2 */ +#define BANK3_SEL 0x00003000 /* Select Banck3 */ +#define BANKSEL_SHIFT 12 +#define SEND 0x00000080 /* Send data */ +#define TX_LEN_1 0x00000001 /* data length = 1 byte */ +#define TX_LEN_2 0x00000002 /* data length = 2 byte */ +#define TX_LEN_3 0x00000003 /* data length = 3 byte */ +#define TX_LEN_4 0x00000004 /* data length = 4 byte */ +#define RX_LEN_1 0x00000010 /* data length = 1 byte */ +#define RX_LEN_2 0x00000020 /* data length = 2 byte */ +#define RX_LEN_3 0x00000030 /* data length = 3 byte */ +#define RX_LEN_4 0x00000040 /* data length = 4 byte */ +#define TFIE 0x00000100 /* Tx Flag Interrupt Enable */ +#define WCIE 0x00000200 /* WCF Interrupt Enable */ + +/* STATUS_REG */ +#define INT_WCF_CLR 0xFFFFFDFF /* clear: WCF clear */ +#define INT_TFF_CLR 0xFFFFFEFF /* clear: TFF clear */ +#define WIP_BIT 0x00000001 /* WIP Bit of SPI SR */ +#define WEL_BIT 0x00000002 /* WEL Bit of SPI SR */ +#define RSR 0x00000005 /* Read Status regiser */ +#define TFF 0x00000100 /* Transfer Finished FLag */ +#define WCF 0x00000200 /* Transfer Finished FLag */ +#define ERF1 0x00000400 /* Error Flag 1 */ +#define ERF2 0x00000800 /* Error Flag 2 */ +#define WM0 0x00001000 /* WM Bank 0 */ +#define WM1 0x00002000 /* WM Bank 1 */ +#define WM2 0x00004000 /* WM Bank 2 */ +#define WM3 0x00008000 /* WM Bank 3 */ +#define WM_SHIFT 12 + +/* TR REG */ +#define READ_ID 0x0000009F /* Read Identification */ +#define BULK_ERASE 0x000000C7 /* BULK erase */ +#define SECTOR_ERASE 0x000000D8 /* SECTOR erase */ +#define WRITE_ENABLE 0x00000006 /* Wenable command to FLASH */ + +struct flash_dev { + u32 density; + ulong size; + ushort sector_count; +}; + +#define SFLASH_PAGE_SIZE 0x100 /* flash page size */ +#define XFER_FINISH_TOUT 15 /* xfer finish timeout(in ms) */ +#define WMODE_TOUT 15 /* write enable timeout(in ms) */ + +extern void smi_init(void); + +#endif diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h new file mode 100644 index 0000000..badf6a0 --- /dev/null +++ b/include/linux/mtd/ubi.h @@ -0,0 +1,289 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (c) International Business Machines Corp., 2006 + * + * Author: Artem Bityutskiy (Битюцкий Артём) + */ + +#ifndef __LINUX_UBI_H__ +#define __LINUX_UBI_H__ + +#include +#ifndef __UBOOT__ +#include +#include +#include +#endif + +/* All voumes/LEBs */ +#define UBI_ALL -1 + +/* + * Maximum number of scatter gather list entries, + * we use only 64 to have a lower memory foot print. + */ +#define UBI_MAX_SG_COUNT 64 + +/* + * enum ubi_open_mode - UBI volume open mode constants. + * + * UBI_READONLY: read-only mode + * UBI_READWRITE: read-write mode + * UBI_EXCLUSIVE: exclusive mode + * UBI_METAONLY: modify only the volume meta-data, + * i.e. the data stored in the volume table, but not in any of volume LEBs. + */ +enum { + UBI_READONLY = 1, + UBI_READWRITE, + UBI_EXCLUSIVE, + UBI_METAONLY +}; + +/** + * struct ubi_volume_info - UBI volume description data structure. + * @vol_id: volume ID + * @ubi_num: UBI device number this volume belongs to + * @size: how many physical eraseblocks are reserved for this volume + * @used_bytes: how many bytes of data this volume contains + * @used_ebs: how many physical eraseblocks of this volume actually contain any + * data + * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) + * @corrupted: non-zero if the volume is corrupted (static volumes only) + * @upd_marker: non-zero if the volume has update marker set + * @alignment: volume alignment + * @usable_leb_size: how many bytes are available in logical eraseblocks of + * this volume + * @name_len: volume name length + * @name: volume name + * @cdev: UBI volume character device major and minor numbers + * + * The @corrupted flag is only relevant to static volumes and is always zero + * for dynamic ones. This is because UBI does not care about dynamic volume + * data protection and only cares about protecting static volume data. + * + * The @upd_marker flag is set if the volume update operation was interrupted. + * Before touching the volume data during the update operation, UBI first sets + * the update marker flag for this volume. If the volume update operation was + * further interrupted, the update marker indicates this. If the update marker + * is set, the contents of the volume is certainly damaged and a new volume + * update operation has to be started. + * + * To put it differently, @corrupted and @upd_marker fields have different + * semantics: + * o the @corrupted flag means that this static volume is corrupted for some + * reasons, but not because an interrupted volume update + * o the @upd_marker field means that the volume is damaged because of an + * interrupted update operation. + * + * I.e., the @corrupted flag is never set if the @upd_marker flag is set. + * + * The @used_bytes and @used_ebs fields are only really needed for static + * volumes and contain the number of bytes stored in this static volume and how + * many eraseblock this data occupies. In case of dynamic volumes, the + * @used_bytes field is equivalent to @size*@usable_leb_size, and the @used_ebs + * field is equivalent to @size. + * + * In general, logical eraseblock size is a property of the UBI device, not + * of the UBI volume. Indeed, the logical eraseblock size depends on the + * physical eraseblock size and on how much bytes UBI headers consume. But + * because of the volume alignment (@alignment), the usable size of logical + * eraseblocks if a volume may be less. The following equation is true: + * @usable_leb_size = LEB size - (LEB size mod @alignment), + * where LEB size is the logical eraseblock size defined by the UBI device. + * + * The alignment is multiple to the minimal flash input/output unit size or %1 + * if all the available space is used. + * + * To put this differently, alignment may be considered is a way to change + * volume logical eraseblock sizes. + */ +struct ubi_volume_info { + int ubi_num; + int vol_id; + int size; + long long used_bytes; + int used_ebs; + int vol_type; + int corrupted; + int upd_marker; + int alignment; + int usable_leb_size; + int name_len; + const char *name; + dev_t cdev; +}; + +/** + * struct ubi_sgl - UBI scatter gather list data structure. + * @list_pos: current position in @sg[] + * @page_pos: current position in @sg[@list_pos] + * @sg: the scatter gather list itself + * + * ubi_sgl is a wrapper around a scatter list which keeps track of the + * current position in the list and the current list item such that + * it can be used across multiple ubi_leb_read_sg() calls. + */ +struct ubi_sgl { + int list_pos; + int page_pos; +#ifndef __UBOOT__ + struct scatterlist sg[UBI_MAX_SG_COUNT]; +#endif +}; + +/** + * ubi_sgl_init - initialize an UBI scatter gather list data structure. + * @usgl: the UBI scatter gather struct itself + * + * Please note that you still have to use sg_init_table() or any adequate + * function to initialize the unterlaying struct scatterlist. + */ +static inline void ubi_sgl_init(struct ubi_sgl *usgl) +{ + usgl->list_pos = 0; + usgl->page_pos = 0; +} + +/** + * struct ubi_device_info - UBI device description data structure. + * @ubi_num: ubi device number + * @leb_size: logical eraseblock size on this UBI device + * @leb_start: starting offset of logical eraseblocks within physical + * eraseblocks + * @min_io_size: minimal I/O unit size + * @max_write_size: maximum amount of bytes the underlying flash can write at a + * time (MTD write buffer size) + * @ro_mode: if this device is in read-only mode + * @cdev: UBI character device major and minor numbers + * + * Note, @leb_size is the logical eraseblock size offered by the UBI device. + * Volumes of this UBI device may have smaller logical eraseblock size if their + * alignment is not equivalent to %1. + * + * The @max_write_size field describes flash write maximum write unit. For + * example, NOR flash allows for changing individual bytes, so @min_io_size is + * %1. However, it does not mean than NOR flash has to write data byte-by-byte. + * Instead, CFI NOR flashes have a write-buffer of, e.g., 64 bytes, and when + * writing large chunks of data, they write 64-bytes at a time. Obviously, this + * improves write throughput. + * + * Also, the MTD device may have N interleaved (striped) flash chips + * underneath, in which case @min_io_size can be physical min. I/O size of + * single flash chip, while @max_write_size can be N * @min_io_size. + * + * The @max_write_size field is always greater or equivalent to @min_io_size. + * E.g., some NOR flashes may have (@min_io_size = 1, @max_write_size = 64). In + * contrast, NAND flashes usually have @min_io_size = @max_write_size = NAND + * page size. + */ +struct ubi_device_info { + int ubi_num; + int leb_size; + int leb_start; + int min_io_size; + int max_write_size; + int ro_mode; +#ifndef __UBOOT__ + dev_t cdev; +#endif +}; + +/* + * Volume notification types. + * @UBI_VOLUME_ADDED: a volume has been added (an UBI device was attached or a + * volume was created) + * @UBI_VOLUME_REMOVED: a volume has been removed (an UBI device was detached + * or a volume was removed) + * @UBI_VOLUME_RESIZED: a volume has been re-sized + * @UBI_VOLUME_RENAMED: a volume has been re-named + * @UBI_VOLUME_UPDATED: data has been written to a volume + * + * These constants define which type of event has happened when a volume + * notification function is invoked. + */ +enum { + UBI_VOLUME_ADDED, + UBI_VOLUME_REMOVED, + UBI_VOLUME_RESIZED, + UBI_VOLUME_RENAMED, + UBI_VOLUME_UPDATED, +}; + +/* + * struct ubi_notification - UBI notification description structure. + * @di: UBI device description object + * @vi: UBI volume description object + * + * UBI notifiers are called with a pointer to an object of this type. The + * object describes the notification. Namely, it provides a description of the + * UBI device and UBI volume the notification informs about. + */ +struct ubi_notification { + struct ubi_device_info di; + struct ubi_volume_info vi; +}; + +/* UBI descriptor given to users when they open UBI volumes */ +struct ubi_volume_desc; + +int ubi_get_device_info(int ubi_num, struct ubi_device_info *di); +void ubi_get_volume_info(struct ubi_volume_desc *desc, + struct ubi_volume_info *vi); +struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); +struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, + int mode); +struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode); + +#ifndef __UBOOT__ +typedef int (*notifier_fn_t)(void *nb, + unsigned long action, void *data); + +struct notifier_block { + notifier_fn_t notifier_call; + struct notifier_block *next; + void *next; + int priority; +}; + +int ubi_register_volume_notifier(struct notifier_block *nb, + int ignore_existing); +int ubi_unregister_volume_notifier(struct notifier_block *nb); +#endif + +void ubi_close_volume(struct ubi_volume_desc *desc); +int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, + int len, int check); +int ubi_leb_read_sg(struct ubi_volume_desc *desc, int lnum, struct ubi_sgl *sgl, + int offset, int len, int check); +int ubi_leb_write(struct ubi_volume_desc *desc, int lnum, const void *buf, + int offset, int len); +int ubi_leb_change(struct ubi_volume_desc *desc, int lnum, const void *buf, + int len); +int ubi_leb_erase(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); +int ubi_leb_map(struct ubi_volume_desc *desc, int lnum); +int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); +int ubi_sync(int ubi_num); +int ubi_flush(int ubi_num, int vol_id, int lnum); + +/* + * This function is the same as the 'ubi_leb_read()' function, but it does not + * provide the checking capability. + */ +static inline int ubi_read(struct ubi_volume_desc *desc, int lnum, char *buf, + int offset, int len) +{ + return ubi_leb_read(desc, lnum, buf, offset, len, 0); +} + +/* + * This function is the same as the 'ubi_leb_read_sg()' function, but it does + * not provide the checking capability. + */ +static inline int ubi_read_sg(struct ubi_volume_desc *desc, int lnum, + struct ubi_sgl *sgl, int offset, int len) +{ + return ubi_leb_read_sg(desc, lnum, sgl, offset, len, 0); +} +#endif /* !__LINUX_UBI_H__ */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h new file mode 100644 index 0000000..40dad25 --- /dev/null +++ b/include/linux/netdevice.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Definitions for the Interfaces handler. + * + * Version: @(#)dev.h 1.0.10 08/12/93 + * + * Authors: Ross Biro + * Fred N. van Kempen, + * Corey Minyard + * Donald J. Becker, + * Alan Cox, + * Bjorn Ekwall. + * Pekka Riikonen + * + * Moved to /usr/include/linux for NET3 + */ +#ifndef _LINUX_NETDEVICE_H +#define _LINUX_NETDEVICE_H + +/* + * Network device statistics. Akin to the 2.0 ether stats but + * with byte counters. + */ + +struct net_device_stats { + unsigned long rx_packets; /* total packets received */ + unsigned long tx_packets; /* total packets transmitted */ + unsigned long rx_bytes; /* total bytes received */ + unsigned long tx_bytes; /* total bytes transmitted */ + unsigned long rx_errors; /* bad packets received */ + unsigned long tx_errors; /* packet transmit problems */ + unsigned long rx_dropped; /* no space in linux buffers */ + unsigned long tx_dropped; /* no space available in linux */ + unsigned long multicast; /* multicast packets received */ + unsigned long collisions; + + /* detailed rx_errors: */ + unsigned long rx_length_errors; + unsigned long rx_over_errors; /* receiver ring buff overflow */ + unsigned long rx_crc_errors; /* recved pkt with crc error */ + unsigned long rx_frame_errors; /* recv'd frame alignment error */ + unsigned long rx_fifo_errors; /* recv'r fifo overrun */ + unsigned long rx_missed_errors; /* receiver missed packet */ + + /* detailed tx_errors */ + unsigned long tx_aborted_errors; + unsigned long tx_carrier_errors; + unsigned long tx_fifo_errors; + unsigned long tx_heartbeat_errors; + unsigned long tx_window_errors; + + /* for cslip etc */ + unsigned long rx_compressed; + unsigned long tx_compressed; +}; + +#endif /* _LINUX_NETDEVICE_H */ diff --git a/include/linux/poison.h b/include/linux/poison.h new file mode 100644 index 0000000..b3d873b --- /dev/null +++ b/include/linux/poison.h @@ -0,0 +1,11 @@ +#ifndef _LINUX_POISON_H +#define _LINUX_POISON_H + +/********** include/linux/list.h **********/ +/* + * used to verify that nobody uses non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x0) +#define LIST_POISON2 ((void *) 0x0) + +#endif diff --git a/include/linux/posix_types.h b/include/linux/posix_types.h new file mode 100644 index 0000000..bd37e1f --- /dev/null +++ b/include/linux/posix_types.h @@ -0,0 +1,48 @@ +#ifndef _LINUX_POSIX_TYPES_H +#define _LINUX_POSIX_TYPES_H + +#include + +/* + * This allows for 1024 file descriptors: if NR_OPEN is ever grown + * beyond that you'll have to change this too. But 1024 fd's seem to be + * enough even for such "real" unices like OSF/1, so hopefully this is + * one limit that doesn't have to be changed [again]. + * + * Note that POSIX wants the FD_CLEAR(fd,fdsetp) defines to be in + * (and thus ) - but this is a more logical + * place for them. Solved by having dummy defines in . + */ + +/* + * Those macros may have been defined in . But we always + * use the ones here. + */ +#undef __NFDBITS +#define __NFDBITS (8 * sizeof(unsigned long)) + +#undef __FD_SETSIZE +#define __FD_SETSIZE 1024 + +#undef __FDSET_LONGS +#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS) + +#undef __FDELT +#define __FDELT(d) ((d) / __NFDBITS) + +#undef __FDMASK +#define __FDMASK(d) (1UL << ((d) % __NFDBITS)) + +typedef struct { + unsigned long fds_bits [__FDSET_LONGS]; +} __kernel_fd_set; + +/* Type of a signal handler. */ +typedef void (*__kernel_sighandler_t)(int); + +/* Type of a SYSV IPC key. */ +typedef int __kernel_key_t; + +#include + +#endif /* _LINUX_POSIX_TYPES_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h new file mode 100644 index 0000000..088513a --- /dev/null +++ b/include/linux/printk.h @@ -0,0 +1,79 @@ +#ifndef __KERNEL_PRINTK__ +#define __KERNEL_PRINTK__ + +#include +#include + +#define KERN_EMERG +#define KERN_ALERT +#define KERN_CRIT +#define KERN_ERR +#define KERN_WARNING +#define KERN_NOTICE +#define KERN_INFO +#define KERN_DEBUG +#define KERN_CONT + +#define printk(fmt, ...) \ + printf(fmt, ##__VA_ARGS__) + +/* + * Dummy printk for disabled debugging statements to use whilst maintaining + * gcc's format checking. + */ +#define no_printk(fmt, ...) \ +({ \ + if (0) \ + printk(fmt, ##__VA_ARGS__); \ + 0; \ +}) + +#define __printk(level, fmt, ...) \ +({ \ + level < CONFIG_LOGLEVEL ? printk(fmt, ##__VA_ARGS__) : 0; \ +}) + +#ifndef pr_fmt +#define pr_fmt(fmt) fmt +#endif + +#define pr_emerg(fmt, ...) \ + __printk(0, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_alert(fmt, ...) \ + __printk(1, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_crit(fmt, ...) \ + __printk(2, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_err(fmt, ...) \ + __printk(3, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warning(fmt, ...) \ + __printk(4, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_warn pr_warning +#define pr_notice(fmt, ...) \ + __printk(5, pr_fmt(fmt), ##__VA_ARGS__) +#define pr_info(fmt, ...) \ + __printk(6, pr_fmt(fmt), ##__VA_ARGS__) + +#define pr_cont(fmt, ...) \ + printk(fmt, ##__VA_ARGS__) + +/* pr_devel() should produce zero code unless DEBUG is defined */ +#ifdef DEBUG +#define pr_devel(fmt, ...) \ + __printk(7, pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_devel(fmt, ...) \ + no_printk(pr_fmt(fmt), ##__VA_ARGS__) +#endif + +#ifdef DEBUG +#define pr_debug(fmt, ...) \ + __printk(7, pr_fmt(fmt), ##__VA_ARGS__) +#else +#define pr_debug(fmt, ...) \ + no_printk(pr_fmt(fmt), ##__VA_ARGS__) +#endif + +#define printk_once(fmt, ...) \ + printk(fmt, ##__VA_ARGS__) + +#endif diff --git a/include/linux/psci.h b/include/linux/psci.h new file mode 100644 index 0000000..9433df8 --- /dev/null +++ b/include/linux/psci.h @@ -0,0 +1,101 @@ +/* + * ARM Power State and Coordination Interface (PSCI) header + * + * This header holds common PSCI defines and macros shared + * by: ARM kernel, ARM64 kernel, KVM ARM/ARM64 and user space. + * + * Copyright (C) 2014 Linaro Ltd. + * Author: Anup Patel + */ + +#ifndef _UAPI_LINUX_PSCI_H +#define _UAPI_LINUX_PSCI_H + +/* + * PSCI v0.1 interface + * + * The PSCI v0.1 function numbers are implementation defined. + * + * Only PSCI return values such as: SUCCESS, NOT_SUPPORTED, + * INVALID_PARAMS, and DENIED defined below are applicable + * to PSCI v0.1. + */ + +/* PSCI v0.2 interface */ +#define PSCI_0_2_FN_BASE 0x84000000 +#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n)) +#define PSCI_0_2_64BIT 0x40000000 +#define PSCI_0_2_FN64_BASE \ + (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT) +#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n)) + +#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0) +#define PSCI_0_2_FN_CPU_SUSPEND PSCI_0_2_FN(1) +#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2) +#define PSCI_0_2_FN_CPU_ON PSCI_0_2_FN(3) +#define PSCI_0_2_FN_AFFINITY_INFO PSCI_0_2_FN(4) +#define PSCI_0_2_FN_MIGRATE PSCI_0_2_FN(5) +#define PSCI_0_2_FN_MIGRATE_INFO_TYPE PSCI_0_2_FN(6) +#define PSCI_0_2_FN_MIGRATE_INFO_UP_CPU PSCI_0_2_FN(7) +#define PSCI_0_2_FN_SYSTEM_OFF PSCI_0_2_FN(8) +#define PSCI_0_2_FN_SYSTEM_RESET PSCI_0_2_FN(9) + +#define PSCI_0_2_FN64_CPU_SUSPEND PSCI_0_2_FN64(1) +#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3) +#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4) +#define PSCI_0_2_FN64_MIGRATE PSCI_0_2_FN64(5) +#define PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU PSCI_0_2_FN64(7) + +/* PSCI v0.2 power state encoding for CPU_SUSPEND function */ +#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff +#define PSCI_0_2_POWER_STATE_ID_SHIFT 0 +#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16 +#define PSCI_0_2_POWER_STATE_TYPE_MASK \ + (0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT) +#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24 +#define PSCI_0_2_POWER_STATE_AFFL_MASK \ + (0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT) + +/* PSCI v0.2 affinity level state returned by AFFINITY_INFO */ +#define PSCI_0_2_AFFINITY_LEVEL_ON 0 +#define PSCI_0_2_AFFINITY_LEVEL_OFF 1 +#define PSCI_0_2_AFFINITY_LEVEL_ON_PENDING 2 + +/* PSCI v0.2 multicore support in Trusted OS returned by MIGRATE_INFO_TYPE */ +#define PSCI_0_2_TOS_UP_MIGRATE 0 +#define PSCI_0_2_TOS_UP_NO_MIGRATE 1 +#define PSCI_0_2_TOS_MP 2 + +/* PSCI version decoding (independent of PSCI version) */ +#define PSCI_VERSION_MAJOR_SHIFT 16 +#define PSCI_VERSION_MINOR_MASK \ + ((1U << PSCI_VERSION_MAJOR_SHIFT) - 1) +#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK +#define PSCI_VERSION_MAJOR(ver) \ + (((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT) +#define PSCI_VERSION_MINOR(ver) \ + ((ver) & PSCI_VERSION_MINOR_MASK) + +/* PSCI return values (inclusive of all PSCI versions) */ +#define PSCI_RET_SUCCESS 0 +#define PSCI_RET_NOT_SUPPORTED -1 +#define PSCI_RET_INVALID_PARAMS -2 +#define PSCI_RET_DENIED -3 +#define PSCI_RET_ALREADY_ON -4 +#define PSCI_RET_ON_PENDING -5 +#define PSCI_RET_INTERNAL_FAILURE -6 +#define PSCI_RET_NOT_PRESENT -7 +#define PSCI_RET_DISABLED -8 + +#ifdef CONFIG_ARM_PSCI_FW +unsigned long invoke_psci_fn(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3); +#else +unsigned long invoke_psci_fn(unsigned long a0, unsigned long a1, + unsigned long a2, unsigned long a3) +{ + return PSCI_RET_DISABLED; +} +#endif + +#endif /* _UAPI_LINUX_PSCI_H */ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h new file mode 100644 index 0000000..1d2b176 --- /dev/null +++ b/include/linux/rbtree.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + + linux/include/linux/rbtree.h + + To use rbtrees you'll have to implement your own insert and search cores. + This will avoid us to use callbacks and to drop drammatically performances. + I know it's not the cleaner way, but in C (not in C++) to get + performances and genericity... + + See Documentation/rbtree.txt for documentation and samples. +*/ + +#ifndef _LINUX_RBTREE_H +#define _LINUX_RBTREE_H + +#ifndef __UBOOT__ +#include +#endif +#include + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); + /* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + + +#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) + +#define RB_ROOT (struct rb_root) { NULL, } +#define rb_entry(ptr, type, member) container_of(ptr, type, member) + +#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL) + +/* 'empty' nodes are nodes that are known not to be inserted in an rbree */ +#define RB_EMPTY_NODE(node) \ + ((node)->__rb_parent_color == (unsigned long)(node)) +#define RB_CLEAR_NODE(node) \ + ((node)->__rb_parent_color = (unsigned long)(node)) + + +extern void rb_insert_color(struct rb_node *, struct rb_root *); +extern void rb_erase(struct rb_node *, struct rb_root *); + + +/* Find logical next and previous nodes in a tree */ +extern struct rb_node *rb_next(const struct rb_node *); +extern struct rb_node *rb_prev(const struct rb_node *); +extern struct rb_node *rb_first(const struct rb_root *); +extern struct rb_node *rb_last(const struct rb_root *); + +/* Postorder iteration - always visit the parent after its children */ +extern struct rb_node *rb_first_postorder(const struct rb_root *); +extern struct rb_node *rb_next_postorder(const struct rb_node *); + +/* Fast replacement of a single node without remove/rebalance/add/rebalance */ +extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, + struct rb_root *root); + +static inline void rb_link_node(struct rb_node * node, struct rb_node * parent, + struct rb_node ** rb_link) +{ + node->__rb_parent_color = (unsigned long)parent; + node->rb_left = node->rb_right = NULL; + + *rb_link = node; +} + +#define rb_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ + ____ptr ? rb_entry(____ptr, type, member) : NULL; \ + }) + +/** + * rbtree_postorder_for_each_entry_safe - iterate over rb_root in post order of + * given type safe against removal of rb_node entry + * + * @pos: the 'type *' to use as a loop cursor. + * @n: another 'type *' to use as temporary storage + * @root: 'rb_root *' of the rbtree. + * @field: the name of the rb_node field within 'type'. + */ +#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ + for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ + pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ + typeof(*pos), field); 1; }); \ + pos = n) + +#endif /* _LINUX_RBTREE_H */ diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h new file mode 100644 index 0000000..da04156 --- /dev/null +++ b/include/linux/rbtree_augmented.h @@ -0,0 +1,219 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + Red Black Trees + (C) 1999 Andrea Arcangeli + (C) 2002 David Woodhouse + (C) 2012 Michel Lespinasse + + linux/include/linux/rbtree_augmented.h +*/ + +#ifndef _LINUX_RBTREE_AUGMENTED_H +#define _LINUX_RBTREE_AUGMENTED_H + +#include +#include + +/* + * Please note - only struct rb_augment_callbacks and the prototypes for + * rb_insert_augmented() and rb_erase_augmented() are intended to be public. + * The rest are implementation details you are not expected to depend on. + * + * See Documentation/rbtree.txt for documentation and samples. + */ + +struct rb_augment_callbacks { + void (*propagate)(struct rb_node *node, struct rb_node *stop); + void (*copy)(struct rb_node *old, struct rb_node *new); + void (*rotate)(struct rb_node *old, struct rb_node *new); +}; + +extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); +static inline void +rb_insert_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + __rb_insert_augmented(node, root, augment->rotate); +} + +#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \ + rbtype, rbaugmented, rbcompute) \ +static inline void \ +rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \ +{ \ + while (rb != stop) { \ + rbstruct *node = rb_entry(rb, rbstruct, rbfield); \ + rbtype augmented = rbcompute(node); \ + if (node->rbaugmented == augmented) \ + break; \ + node->rbaugmented = augmented; \ + rb = rb_parent(&node->rbfield); \ + } \ +} \ +static inline void \ +rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ +} \ +static void \ +rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \ +{ \ + rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \ + rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \ + new->rbaugmented = old->rbaugmented; \ + old->rbaugmented = rbcompute(old); \ +} \ +rbstatic const struct rb_augment_callbacks rbname = { \ + rbname ## _propagate, rbname ## _copy, rbname ## _rotate \ +}; + + +#define RB_RED 0 +#define RB_BLACK 1 + +#define __rb_parent(pc) ((struct rb_node *)(pc & ~3)) + +#define __rb_color(pc) ((pc) & 1) +#define __rb_is_black(pc) __rb_color(pc) +#define __rb_is_red(pc) (!__rb_color(pc)) +#define rb_color(rb) __rb_color((rb)->__rb_parent_color) +#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color) +#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color) + +static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p) +{ + rb->__rb_parent_color = rb_color(rb) | (unsigned long)p; +} + +static inline void rb_set_parent_color(struct rb_node *rb, + struct rb_node *p, int color) +{ + rb->__rb_parent_color = (unsigned long)p | color; +} + +static inline void +__rb_change_child(struct rb_node *old, struct rb_node *new, + struct rb_node *parent, struct rb_root *root) +{ + if (parent) { + if (parent->rb_left == old) + parent->rb_left = new; + else + parent->rb_right = new; + } else + root->rb_node = new; +} + +extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root, + void (*augment_rotate)(struct rb_node *old, struct rb_node *new)); + +static __always_inline struct rb_node * +__rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *child = node->rb_right, *tmp = node->rb_left; + struct rb_node *parent, *rebalance; + unsigned long pc; + + if (!tmp) { + /* + * Case 1: node to erase has no more than 1 child (easy!) + * + * Note that if there is one child it must be red due to 5) + * and node must be black due to 4). We adjust colors locally + * so as to bypass __rb_erase_color() later on. + */ + pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, child, parent, root); + if (child) { + child->__rb_parent_color = pc; + rebalance = NULL; + } else + rebalance = __rb_is_black(pc) ? parent : NULL; + tmp = parent; + } else if (!child) { + /* Still case 1, but this time the child is node->rb_left */ + tmp->__rb_parent_color = pc = node->__rb_parent_color; + parent = __rb_parent(pc); + __rb_change_child(node, tmp, parent, root); + rebalance = NULL; + tmp = parent; + } else { + struct rb_node *successor = child, *child2; + tmp = child->rb_left; + if (!tmp) { + /* + * Case 2: node's successor is its right child + * + * (n) (s) + * / \ / \ + * (x) (s) -> (x) (c) + * \ + * (c) + */ + parent = successor; + child2 = successor->rb_right; + augment->copy(node, successor); + } else { + /* + * Case 3: node's successor is leftmost under + * node's right child subtree + * + * (n) (s) + * / \ / \ + * (x) (y) -> (x) (y) + * / / + * (p) (p) + * / / + * (s) (c) + * \ + * (c) + */ + do { + parent = successor; + successor = tmp; + tmp = tmp->rb_left; + } while (tmp); + parent->rb_left = child2 = successor->rb_right; + successor->rb_right = child; + rb_set_parent(child, successor); + augment->copy(node, successor); + augment->propagate(parent, successor); + } + + successor->rb_left = tmp = node->rb_left; + rb_set_parent(tmp, successor); + + pc = node->__rb_parent_color; + tmp = __rb_parent(pc); + __rb_change_child(node, successor, tmp, root); + if (child2) { + successor->__rb_parent_color = pc; + rb_set_parent_color(child2, parent, RB_BLACK); + rebalance = NULL; + } else { + unsigned long pc2 = successor->__rb_parent_color; + successor->__rb_parent_color = pc; + rebalance = __rb_is_black(pc2) ? parent : NULL; + } + tmp = successor; + } + + augment->propagate(tmp, NULL); + return rebalance; +} + +static __always_inline void +rb_erase_augmented(struct rb_node *node, struct rb_root *root, + const struct rb_augment_callbacks *augment) +{ + struct rb_node *rebalance = __rb_erase_augmented(node, root, augment); + if (rebalance) + __rb_erase_color(rebalance, root, augment->rotate); +} + +#endif /* _LINUX_RBTREE_AUGMENTED_H */ diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h new file mode 100644 index 0000000..899fbb4 --- /dev/null +++ b/include/linux/screen_info.h @@ -0,0 +1,84 @@ +#ifndef _SCREEN_INFO_H +#define _SCREEN_INFO_H + +#include + +/* + * These are set up by the setup-routine at boot-time: + */ + +struct screen_info { + __u8 orig_x; /* 0x00 */ + __u8 orig_y; /* 0x01 */ + __u16 ext_mem_k; /* 0x02 */ + __u16 orig_video_page; /* 0x04 */ + __u8 orig_video_mode; /* 0x06 */ + __u8 orig_video_cols; /* 0x07 */ + __u8 flags; /* 0x08 */ + __u8 unused2; /* 0x09 */ + __u16 orig_video_ega_bx;/* 0x0a */ + __u16 unused3; /* 0x0c */ + __u8 orig_video_lines; /* 0x0e */ + __u8 orig_video_isVGA; /* 0x0f */ + __u16 orig_video_points;/* 0x10 */ + + /* VESA graphic mode -- linear frame buffer */ + __u16 lfb_width; /* 0x12 */ + __u16 lfb_height; /* 0x14 */ + __u16 lfb_depth; /* 0x16 */ + __u32 lfb_base; /* 0x18 */ + __u32 lfb_size; /* 0x1c */ + __u16 cl_magic, cl_offset; /* 0x20 */ + __u16 lfb_linelength; /* 0x24 */ + __u8 red_size; /* 0x26 */ + __u8 red_pos; /* 0x27 */ + __u8 green_size; /* 0x28 */ + __u8 green_pos; /* 0x29 */ + __u8 blue_size; /* 0x2a */ + __u8 blue_pos; /* 0x2b */ + __u8 rsvd_size; /* 0x2c */ + __u8 rsvd_pos; /* 0x2d */ + __u16 vesapm_seg; /* 0x2e */ + __u16 vesapm_off; /* 0x30 */ + __u16 pages; /* 0x32 */ + __u16 vesa_attributes; /* 0x34 */ + __u32 capabilities; /* 0x36 */ + __u8 _reserved[6]; /* 0x3a */ +} __attribute__((packed)); + +#define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ +#define VIDEO_TYPE_CGA 0x11 /* CGA Display */ +#define VIDEO_TYPE_EGAM 0x20 /* EGA/VGA in Monochrome Mode */ +#define VIDEO_TYPE_EGAC 0x21 /* EGA in Color Mode */ +#define VIDEO_TYPE_VGAC 0x22 /* VGA+ in Color Mode */ +#define VIDEO_TYPE_VLFB 0x23 /* VESA VGA in graphic mode */ + +#define VIDEO_TYPE_PICA_S3 0x30 /* ACER PICA-61 local S3 video */ +#define VIDEO_TYPE_MIPS_G364 0x31 /* MIPS Magnum 4000 G364 video */ +#define VIDEO_TYPE_SGI 0x33 /* Various SGI graphics hardware */ + +#define VIDEO_TYPE_TGAC 0x40 /* DEC TGA */ + +#define VIDEO_TYPE_SUN 0x50 /* Sun frame buffer. */ +#define VIDEO_TYPE_SUNPCI 0x51 /* Sun PCI based frame buffer. */ + +#define VIDEO_TYPE_PMAC 0x60 /* PowerMacintosh frame buffer. */ + +#define VIDEO_TYPE_EFI 0x70 /* EFI graphic mode */ + +#define VIDEO_FLAGS_NOCURSOR (1 << 0) /* The video mode has no cursor set */ + +#ifdef __KERNEL__ +extern struct screen_info screen_info; + +#define ORIG_X (screen_info.orig_x) +#define ORIG_Y (screen_info.orig_y) +#define ORIG_VIDEO_MODE (screen_info.orig_video_mode) +#define ORIG_VIDEO_COLS (screen_info.orig_video_cols) +#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx) +#define ORIG_VIDEO_LINES (screen_info.orig_video_lines) +#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA) +#define ORIG_VIDEO_POINTS (screen_info.orig_video_points) +#endif /* __KERNEL__ */ + +#endif /* _SCREEN_INFO_H */ diff --git a/include/linux/serial_reg.h b/include/linux/serial_reg.h new file mode 100644 index 0000000..29c3b5b --- /dev/null +++ b/include/linux/serial_reg.h @@ -0,0 +1,387 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * include/linux/serial_reg.h + * + * Copyright (C) 1992, 1994 by Theodore Ts'o. + * + * These are the UART port assignments, expressed as offsets from the base + * register. These assignments should hold for any serial port based on + * a 8250, 16450, or 16550(A). + */ + +#ifndef _LINUX_SERIAL_REG_H +#define _LINUX_SERIAL_REG_H + +/* + * DLAB=0 + */ +#define UART_RX 0 /* In: Receive buffer */ +#define UART_TX 0 /* Out: Transmit buffer */ + +#define UART_IER 1 /* Out: Interrupt Enable Register */ +#define UART_IER_MSI 0x08 /* Enable Modem status interrupt */ +#define UART_IER_RLSI 0x04 /* Enable receiver line status interrupt */ +#define UART_IER_THRI 0x02 /* Enable Transmitter holding register int. */ +#define UART_IER_RDI 0x01 /* Enable receiver data interrupt */ +/* + * Sleep mode for ST16650 and TI16750. For the ST16650, EFR[4]=1 + */ +#define UART_IERX_SLEEP 0x10 /* Enable sleep mode */ + +#define UART_IIR 2 /* In: Interrupt ID Register */ +#define UART_IIR_NO_INT 0x01 /* No interrupts pending */ +#define UART_IIR_ID 0x0e /* Mask for the interrupt ID */ +#define UART_IIR_MSI 0x00 /* Modem status interrupt */ +#define UART_IIR_THRI 0x02 /* Transmitter holding register empty */ +#define UART_IIR_RDI 0x04 /* Receiver data interrupt */ +#define UART_IIR_RLSI 0x06 /* Receiver line status interrupt */ + +#define UART_IIR_BUSY 0x07 /* DesignWare APB Busy Detect */ + +#define UART_IIR_RX_TIMEOUT 0x0c /* OMAP RX Timeout interrupt */ +#define UART_IIR_XOFF 0x10 /* OMAP XOFF/Special Character */ +#define UART_IIR_CTS_RTS_DSR 0x20 /* OMAP CTS/RTS/DSR Change */ + +#define UART_FCR 2 /* Out: FIFO Control Register */ +#define UART_FCR_ENABLE_FIFO 0x01 /* Enable the FIFO */ +#define UART_FCR_CLEAR_RCVR 0x02 /* Clear the RCVR FIFO */ +#define UART_FCR_CLEAR_XMIT 0x04 /* Clear the XMIT FIFO */ +#define UART_FCR_DMA_SELECT 0x08 /* For DMA applications */ +/* + * Note: The FIFO trigger levels are chip specific: + * RX:76 = 00 01 10 11 TX:54 = 00 01 10 11 + * PC16550D: 1 4 8 14 xx xx xx xx + * TI16C550A: 1 4 8 14 xx xx xx xx + * TI16C550C: 1 4 8 14 xx xx xx xx + * ST16C550: 1 4 8 14 xx xx xx xx + * ST16C650: 8 16 24 28 16 8 24 30 PORT_16650V2 + * NS16C552: 1 4 8 14 xx xx xx xx + * ST16C654: 8 16 56 60 8 16 32 56 PORT_16654 + * TI16C750: 1 16 32 56 xx xx xx xx PORT_16750 + * TI16C752: 8 16 56 60 8 16 32 56 + * Tegra: 1 4 8 14 16 8 4 1 PORT_TEGRA + */ +#define UART_FCR_R_TRIG_00 0x00 +#define UART_FCR_R_TRIG_01 0x40 +#define UART_FCR_R_TRIG_10 0x80 +#define UART_FCR_R_TRIG_11 0xc0 +#define UART_FCR_T_TRIG_00 0x00 +#define UART_FCR_T_TRIG_01 0x10 +#define UART_FCR_T_TRIG_10 0x20 +#define UART_FCR_T_TRIG_11 0x30 + +#define UART_FCR_TRIGGER_MASK 0xC0 /* Mask for the FIFO trigger range */ +#define UART_FCR_TRIGGER_1 0x00 /* Mask for trigger set at 1 */ +#define UART_FCR_TRIGGER_4 0x40 /* Mask for trigger set at 4 */ +#define UART_FCR_TRIGGER_8 0x80 /* Mask for trigger set at 8 */ +#define UART_FCR_TRIGGER_14 0xC0 /* Mask for trigger set at 14 */ +/* 16650 definitions */ +#define UART_FCR6_R_TRIGGER_8 0x00 /* Mask for receive trigger set at 1 */ +#define UART_FCR6_R_TRIGGER_16 0x40 /* Mask for receive trigger set at 4 */ +#define UART_FCR6_R_TRIGGER_24 0x80 /* Mask for receive trigger set at 8 */ +#define UART_FCR6_R_TRIGGER_28 0xC0 /* Mask for receive trigger set at 14 */ +#define UART_FCR6_T_TRIGGER_16 0x00 /* Mask for transmit trigger set at 16 */ +#define UART_FCR6_T_TRIGGER_8 0x10 /* Mask for transmit trigger set at 8 */ +#define UART_FCR6_T_TRIGGER_24 0x20 /* Mask for transmit trigger set at 24 */ +#define UART_FCR6_T_TRIGGER_30 0x30 /* Mask for transmit trigger set at 30 */ +#define UART_FCR7_64BYTE 0x20 /* Go into 64 byte mode (TI16C750) */ + +#define UART_LCR 3 /* Out: Line Control Register */ +/* + * Note: if the word length is 5 bits (UART_LCR_WLEN5), then setting + * UART_LCR_STOP will select 1.5 stop bits, not 2 stop bits. + */ +#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ +#define UART_LCR_SBC 0x40 /* Set break control */ +#define UART_LCR_SPAR 0x20 /* Stick parity (?) */ +#define UART_LCR_EPAR 0x10 /* Even parity select */ +#define UART_LCR_PARITY 0x08 /* Parity Enable */ +#define UART_LCR_STOP 0x04 /* Stop bits: 0=1 bit, 1=2 bits */ +#define UART_LCR_WLEN5 0x00 /* Wordlength: 5 bits */ +#define UART_LCR_WLEN6 0x01 /* Wordlength: 6 bits */ +#define UART_LCR_WLEN7 0x02 /* Wordlength: 7 bits */ +#define UART_LCR_WLEN8 0x03 /* Wordlength: 8 bits */ + +/* + * Access to some registers depends on register access / configuration + * mode. + */ +#define UART_LCR_CONF_MODE_A UART_LCR_DLAB /* Configutation mode A */ +#define UART_LCR_CONF_MODE_B 0xBF /* Configutation mode B */ + +#define UART_MCR 4 /* Out: Modem Control Register */ +#define UART_MCR_CLKSEL 0x80 /* Divide clock by 4 (TI16C752, EFR[4]=1) */ +#define UART_MCR_TCRTLR 0x40 /* Access TCR/TLR (TI16C752, EFR[4]=1) */ +#define UART_MCR_XONANY 0x20 /* Enable Xon Any (TI16C752, EFR[4]=1) */ +#define UART_MCR_AFE 0x20 /* Enable auto-RTS/CTS (TI16C550C/TI16C750) */ +#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ +#define UART_MCR_OUT2 0x08 /* Out2 complement */ +#define UART_MCR_OUT1 0x04 /* Out1 complement */ +#define UART_MCR_RTS 0x02 /* RTS complement */ +#define UART_MCR_DTR 0x01 /* DTR complement */ + +#define UART_LSR 5 /* In: Line Status Register */ +#define UART_LSR_FIFOE 0x80 /* Fifo error */ +#define UART_LSR_TEMT 0x40 /* Transmitter empty */ +#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ +#define UART_LSR_BI 0x10 /* Break interrupt indicator */ +#define UART_LSR_FE 0x08 /* Frame error indicator */ +#define UART_LSR_PE 0x04 /* Parity error indicator */ +#define UART_LSR_OE 0x02 /* Overrun error indicator */ +#define UART_LSR_DR 0x01 /* Receiver data ready */ +#define UART_LSR_BRK_ERROR_BITS 0x1E /* BI, FE, PE, OE bits */ + +#define UART_MSR 6 /* In: Modem Status Register */ +#define UART_MSR_DCD 0x80 /* Data Carrier Detect */ +#define UART_MSR_RI 0x40 /* Ring Indicator */ +#define UART_MSR_DSR 0x20 /* Data Set Ready */ +#define UART_MSR_CTS 0x10 /* Clear to Send */ +#define UART_MSR_DDCD 0x08 /* Delta DCD */ +#define UART_MSR_TERI 0x04 /* Trailing edge ring indicator */ +#define UART_MSR_DDSR 0x02 /* Delta DSR */ +#define UART_MSR_DCTS 0x01 /* Delta CTS */ +#define UART_MSR_ANY_DELTA 0x0F /* Any of the delta bits! */ + +#define UART_SCR 7 /* I/O: Scratch Register */ + +/* + * DLAB=1 + */ +#define UART_DLL 0 /* Out: Divisor Latch Low */ +#define UART_DLM 1 /* Out: Divisor Latch High */ + +/* + * LCR=0xBF (or DLAB=1 for 16C660) + */ +#define UART_EFR 2 /* I/O: Extended Features Register */ +#define UART_XR_EFR 9 /* I/O: Extended Features Register (XR17D15x) */ +#define UART_EFR_CTS 0x80 /* CTS flow control */ +#define UART_EFR_RTS 0x40 /* RTS flow control */ +#define UART_EFR_SCD 0x20 /* Special character detect */ +#define UART_EFR_ECB 0x10 /* Enhanced control bit */ +/* + * the low four bits control software flow control + */ + +/* + * LCR=0xBF, TI16C752, ST16650, ST16650A, ST16654 + */ +#define UART_XON1 4 /* I/O: Xon character 1 */ +#define UART_XON2 5 /* I/O: Xon character 2 */ +#define UART_XOFF1 6 /* I/O: Xoff character 1 */ +#define UART_XOFF2 7 /* I/O: Xoff character 2 */ + +/* + * EFR[4]=1 MCR[6]=1, TI16C752 + */ +#define UART_TI752_TCR 6 /* I/O: transmission control register */ +#define UART_TI752_TLR 7 /* I/O: trigger level register */ + +/* + * LCR=0xBF, XR16C85x + */ +#define UART_TRG 0 /* FCTR bit 7 selects Rx or Tx + * In: Fifo count + * Out: Fifo custom trigger levels */ +/* + * These are the definitions for the Programmable Trigger Register + */ +#define UART_TRG_1 0x01 +#define UART_TRG_4 0x04 +#define UART_TRG_8 0x08 +#define UART_TRG_16 0x10 +#define UART_TRG_32 0x20 +#define UART_TRG_64 0x40 +#define UART_TRG_96 0x60 +#define UART_TRG_120 0x78 +#define UART_TRG_128 0x80 + +#define UART_FCTR 1 /* Feature Control Register */ +#define UART_FCTR_RTS_NODELAY 0x00 /* RTS flow control delay */ +#define UART_FCTR_RTS_4DELAY 0x01 +#define UART_FCTR_RTS_6DELAY 0x02 +#define UART_FCTR_RTS_8DELAY 0x03 +#define UART_FCTR_IRDA 0x04 /* IrDa data encode select */ +#define UART_FCTR_TX_INT 0x08 /* Tx interrupt type select */ +#define UART_FCTR_TRGA 0x00 /* Tx/Rx 550 trigger table select */ +#define UART_FCTR_TRGB 0x10 /* Tx/Rx 650 trigger table select */ +#define UART_FCTR_TRGC 0x20 /* Tx/Rx 654 trigger table select */ +#define UART_FCTR_TRGD 0x30 /* Tx/Rx 850 programmable trigger select */ +#define UART_FCTR_SCR_SWAP 0x40 /* Scratch pad register swap */ +#define UART_FCTR_RX 0x00 /* Programmable trigger mode select */ +#define UART_FCTR_TX 0x80 /* Programmable trigger mode select */ + +/* + * LCR=0xBF, FCTR[6]=1 + */ +#define UART_EMSR 7 /* Extended Mode Select Register */ +#define UART_EMSR_FIFO_COUNT 0x01 /* Rx/Tx select */ +#define UART_EMSR_ALT_COUNT 0x02 /* Alternating count select */ + +/* + * The Intel XScale on-chip UARTs define these bits + */ +#define UART_IER_DMAE 0x80 /* DMA Requests Enable */ +#define UART_IER_UUE 0x40 /* UART Unit Enable */ +#define UART_IER_NRZE 0x20 /* NRZ coding Enable */ +#define UART_IER_RTOIE 0x10 /* Receiver Time Out Interrupt Enable */ + +#define UART_IIR_TOD 0x08 /* Character Timeout Indication Detected */ + +#define UART_FCR_PXAR1 0x00 /* receive FIFO threshold = 1 */ +#define UART_FCR_PXAR8 0x40 /* receive FIFO threshold = 8 */ +#define UART_FCR_PXAR16 0x80 /* receive FIFO threshold = 16 */ +#define UART_FCR_PXAR32 0xc0 /* receive FIFO threshold = 32 */ + +/* + * Intel MID on-chip HSU (High Speed UART) defined bits + */ +#define UART_FCR_HSU_64_1B 0x00 /* receive FIFO treshold = 1 */ +#define UART_FCR_HSU_64_16B 0x40 /* receive FIFO treshold = 16 */ +#define UART_FCR_HSU_64_32B 0x80 /* receive FIFO treshold = 32 */ +#define UART_FCR_HSU_64_56B 0xc0 /* receive FIFO treshold = 56 */ + +#define UART_FCR_HSU_16_1B 0x00 /* receive FIFO treshold = 1 */ +#define UART_FCR_HSU_16_4B 0x40 /* receive FIFO treshold = 4 */ +#define UART_FCR_HSU_16_8B 0x80 /* receive FIFO treshold = 8 */ +#define UART_FCR_HSU_16_14B 0xc0 /* receive FIFO treshold = 14 */ + +#define UART_FCR_HSU_64B_FIFO 0x20 /* chose 64 bytes FIFO */ +#define UART_FCR_HSU_16B_FIFO 0x00 /* chose 16 bytes FIFO */ + +#define UART_FCR_HALF_EMPT_TXI 0x00 /* trigger TX_EMPT IRQ for half empty */ +#define UART_FCR_FULL_EMPT_TXI 0x08 /* trigger TX_EMPT IRQ for full empty */ + +/* + * These register definitions are for the 16C950 + */ +#define UART_ASR 0x01 /* Additional Status Register */ +#define UART_RFL 0x03 /* Receiver FIFO level */ +#define UART_TFL 0x04 /* Transmitter FIFO level */ +#define UART_ICR 0x05 /* Index Control Register */ + +/* The 16950 ICR registers */ +#define UART_ACR 0x00 /* Additional Control Register */ +#define UART_CPR 0x01 /* Clock Prescalar Register */ +#define UART_TCR 0x02 /* Times Clock Register */ +#define UART_CKS 0x03 /* Clock Select Register */ +#define UART_TTL 0x04 /* Transmitter Interrupt Trigger Level */ +#define UART_RTL 0x05 /* Receiver Interrupt Trigger Level */ +#define UART_FCL 0x06 /* Flow Control Level Lower */ +#define UART_FCH 0x07 /* Flow Control Level Higher */ +#define UART_ID1 0x08 /* ID #1 */ +#define UART_ID2 0x09 /* ID #2 */ +#define UART_ID3 0x0A /* ID #3 */ +#define UART_REV 0x0B /* Revision */ +#define UART_CSR 0x0C /* Channel Software Reset */ +#define UART_NMR 0x0D /* Nine-bit Mode Register */ +#define UART_CTR 0xFF + +/* + * The 16C950 Additional Control Register + */ +#define UART_ACR_RXDIS 0x01 /* Receiver disable */ +#define UART_ACR_TXDIS 0x02 /* Transmitter disable */ +#define UART_ACR_DSRFC 0x04 /* DSR Flow Control */ +#define UART_ACR_TLENB 0x20 /* 950 trigger levels enable */ +#define UART_ACR_ICRRD 0x40 /* ICR Read enable */ +#define UART_ACR_ASREN 0x80 /* Additional status enable */ + + + +/* + * These definitions are for the RSA-DV II/S card, from + * + * Kiyokazu SUTO + */ + +#define UART_RSA_BASE (-8) + +#define UART_RSA_MSR ((UART_RSA_BASE) + 0) /* I/O: Mode Select Register */ + +#define UART_RSA_MSR_SWAP (1 << 0) /* Swap low/high 8 bytes in I/O port addr */ +#define UART_RSA_MSR_FIFO (1 << 2) /* Enable the external FIFO */ +#define UART_RSA_MSR_FLOW (1 << 3) /* Enable the auto RTS/CTS flow control */ +#define UART_RSA_MSR_ITYP (1 << 4) /* Level (1) / Edge triger (0) */ + +#define UART_RSA_IER ((UART_RSA_BASE) + 1) /* I/O: Interrupt Enable Register */ + +#define UART_RSA_IER_Rx_FIFO_H (1 << 0) /* Enable Rx FIFO half full int. */ +#define UART_RSA_IER_Tx_FIFO_H (1 << 1) /* Enable Tx FIFO half full int. */ +#define UART_RSA_IER_Tx_FIFO_E (1 << 2) /* Enable Tx FIFO empty int. */ +#define UART_RSA_IER_Rx_TOUT (1 << 3) /* Enable char receive timeout int */ +#define UART_RSA_IER_TIMER (1 << 4) /* Enable timer interrupt */ + +#define UART_RSA_SRR ((UART_RSA_BASE) + 2) /* IN: Status Read Register */ + +#define UART_RSA_SRR_Tx_FIFO_NEMP (1 << 0) /* Tx FIFO is not empty (1) */ +#define UART_RSA_SRR_Tx_FIFO_NHFL (1 << 1) /* Tx FIFO is not half full (1) */ +#define UART_RSA_SRR_Tx_FIFO_NFUL (1 << 2) /* Tx FIFO is not full (1) */ +#define UART_RSA_SRR_Rx_FIFO_NEMP (1 << 3) /* Rx FIFO is not empty (1) */ +#define UART_RSA_SRR_Rx_FIFO_NHFL (1 << 4) /* Rx FIFO is not half full (1) */ +#define UART_RSA_SRR_Rx_FIFO_NFUL (1 << 5) /* Rx FIFO is not full (1) */ +#define UART_RSA_SRR_Rx_TOUT (1 << 6) /* Character reception timeout occurred (1) */ +#define UART_RSA_SRR_TIMER (1 << 7) /* Timer interrupt occurred */ + +#define UART_RSA_FRR ((UART_RSA_BASE) + 2) /* OUT: FIFO Reset Register */ + +#define UART_RSA_TIVSR ((UART_RSA_BASE) + 3) /* I/O: Timer Interval Value Set Register */ + +#define UART_RSA_TCR ((UART_RSA_BASE) + 4) /* OUT: Timer Control Register */ + +#define UART_RSA_TCR_SWITCH (1 << 0) /* Timer on */ + +/* + * The RSA DSV/II board has two fixed clock frequencies. One is the + * standard rate, and the other is 8 times faster. + */ +#define SERIAL_RSA_BAUD_BASE (921600) +#define SERIAL_RSA_BAUD_BASE_LO (SERIAL_RSA_BAUD_BASE / 8) + +/* + * Extra serial register definitions for the internal UARTs + * in TI OMAP processors. + */ +#define UART_OMAP_MDR1 0x08 /* Mode definition register */ +#define UART_OMAP_MDR2 0x09 /* Mode definition register 2 */ +#define UART_OMAP_SCR 0x10 /* Supplementary control register */ +#define UART_OMAP_SSR 0x11 /* Supplementary status register */ +#define UART_OMAP_EBLR 0x12 /* BOF length register */ +#define UART_OMAP_OSC_12M_SEL 0x13 /* OMAP1510 12MHz osc select */ +#define UART_OMAP_MVER 0x14 /* Module version register */ +#define UART_OMAP_SYSC 0x15 /* System configuration register */ +#define UART_OMAP_SYSS 0x16 /* System status register */ +#define UART_OMAP_WER 0x17 /* Wake-up enable register */ + +/* + * These are the definitions for the MDR1 register + */ +#define UART_OMAP_MDR1_16X_MODE 0x00 /* UART 16x mode */ +#define UART_OMAP_MDR1_SIR_MODE 0x01 /* SIR mode */ +#define UART_OMAP_MDR1_16X_ABAUD_MODE 0x02 /* UART 16x auto-baud */ +#define UART_OMAP_MDR1_13X_MODE 0x03 /* UART 13x mode */ +#define UART_OMAP_MDR1_MIR_MODE 0x04 /* MIR mode */ +#define UART_OMAP_MDR1_FIR_MODE 0x05 /* FIR mode */ +#define UART_OMAP_MDR1_CIR_MODE 0x06 /* CIR mode */ +#define UART_OMAP_MDR1_DISABLE 0x07 /* Disable (default state) */ + +/* + * These are definitions for the Exar XR17V35X and XR17(C|D)15X + */ +#define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */ +#define UART_EXAR_SLEEP 0x8b /* Sleep mode */ +#define UART_EXAR_DVID 0x8d /* Device identification */ + +#define UART_EXAR_FCTR 0x08 /* Feature Control Register */ +#define UART_FCTR_EXAR_IRDA 0x08 /* IrDa data encode select */ +#define UART_FCTR_EXAR_485 0x10 /* Auto 485 half duplex dir ctl */ +#define UART_FCTR_EXAR_TRGA 0x00 /* FIFO trigger table A */ +#define UART_FCTR_EXAR_TRGB 0x60 /* FIFO trigger table B */ +#define UART_FCTR_EXAR_TRGC 0x80 /* FIFO trigger table C */ +#define UART_FCTR_EXAR_TRGD 0xc0 /* FIFO trigger table D programmable */ + +#define UART_EXAR_TXTRG 0x0a /* Tx FIFO trigger level write-only */ +#define UART_EXAR_RXTRG 0x0b /* Rx FIFO trigger level write-only */ + +#endif /* _LINUX_SERIAL_REG_H */ + diff --git a/include/linux/sizes.h b/include/linux/sizes.h new file mode 100644 index 0000000..fbde0bc --- /dev/null +++ b/include/linux/sizes.h @@ -0,0 +1,51 @@ +/* + * include/linux/sizes.h + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef __LINUX_SIZES_H__ +#define __LINUX_SIZES_H__ + +#include + +#define SZ_1 0x00000001 +#define SZ_2 0x00000002 +#define SZ_4 0x00000004 +#define SZ_8 0x00000008 +#define SZ_16 0x00000010 +#define SZ_32 0x00000020 +#define SZ_64 0x00000040 +#define SZ_128 0x00000080 +#define SZ_256 0x00000100 +#define SZ_512 0x00000200 + +#define SZ_1K 0x00000400 +#define SZ_2K 0x00000800 +#define SZ_4K 0x00001000 +#define SZ_8K 0x00002000 +#define SZ_16K 0x00004000 +#define SZ_32K 0x00008000 +#define SZ_64K 0x00010000 +#define SZ_128K 0x00020000 +#define SZ_256K 0x00040000 +#define SZ_512K 0x00080000 + +#define SZ_1M 0x00100000 +#define SZ_2M 0x00200000 +#define SZ_4M 0x00400000 +#define SZ_8M 0x00800000 +#define SZ_16M 0x01000000 +#define SZ_32M 0x02000000 +#define SZ_64M 0x04000000 +#define SZ_128M 0x08000000 +#define SZ_256M 0x10000000 +#define SZ_512M 0x20000000 + +#define SZ_1G 0x40000000 +#define SZ_2G 0x80000000 + +#define SZ_4G _AC(0x100000000, ULL) + +#endif /* __LINUX_SIZES_H__ */ diff --git a/include/linux/soc/ti/cppi5.h b/include/linux/soc/ti/cppi5.h new file mode 100644 index 0000000..34038b3 --- /dev/null +++ b/include/linux/soc/ti/cppi5.h @@ -0,0 +1,995 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * CPPI5 descriptors interface + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef __TI_CPPI5_H__ +#define __TI_CPPI5_H__ + +#include +#include + +/** + * Descriptor header, present in all types of descriptors + */ +struct cppi5_desc_hdr_t { + u32 pkt_info0; /* Packet info word 0 (n/a in Buffer desc) */ + u32 pkt_info1; /* Packet info word 1 (n/a in Buffer desc) */ + u32 pkt_info2; /* Packet info word 2 Buffer reclamation info */ + u32 src_dst_tag; /* Packet info word 3 (n/a in Buffer desc) */ +} __packed; + +/** + * Host-mode packet and buffer descriptor definition + */ +struct cppi5_host_desc_t { + struct cppi5_desc_hdr_t hdr; + u64 next_desc; /* w4/5: Linking word */ + u64 buf_ptr; /* w6/7: Buffer pointer */ + u32 buf_info1; /* w8: Buffer valid data length */ + u32 org_buf_len; /* w9: Original buffer length */ + u64 org_buf_ptr; /* w10/11: Original buffer pointer */ + u32 epib[0]; /* Extended Packet Info Data (optional, 4 words) */ + /* + * Protocol Specific Data (optional, 0-128 bytes in multiples of 4), + * and/or Other Software Data (0-N bytes, optional) + */ +} __packed; + +#define CPPI5_DESC_MIN_ALIGN (16U) + +#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U) +#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U) + +#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U) +#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30) +#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U) +#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U) +#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U) +#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29) +/* + * Protocol Specific Words location: + * 0 - located in the descriptor, + * 1 = located in the SOP Buffer immediately prior to the data. + */ +#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28) +#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U) +#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22) +#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0) +#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0) + +#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U) +#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28) +#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U) +#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24) +#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U) +#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14) +#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0) +#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0) + +#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U) +#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27) +/* Return Policy: 0 - Entire packet 1 - Each buffer */ +#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18) +/* + * Early Return: + * 0 = desc pointers should be returned after all reads have been completed + * 1 = desc pointers should be returned immediately upon fetching + * the descriptor and beginning to transfer data. + */ +#define CPPI5_INFO2_HDESC_EARLYRET BIT(17) +/* + * Return Push Policy: + * 0 = Descriptor must be returned to tail of queue + * 1 = Descriptor must be returned to head of queue + */ +#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16) +#define CPPI5_INFO2_DESC_RETQ_SHIFT (0) +#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0) + +#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U) +#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16) +#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0) +#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0) + +#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0) +#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0) + +#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0) +#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0) + +/* + * Host Packet Descriptor Extended Packet Info Block + */ +struct cppi5_desc_epib_t { + u32 timestamp; /* w0: application specific timestamp */ + u32 sw_info0; /* w1: Software Info 0 */ + u32 sw_info1; /* w2: Software Info 1 */ + u32 sw_info2; /* w3: Software Info 2 */ +}; + +/** + * Monolithic-mode packet descriptor + */ +struct cppi5_monolithic_desc_t { + struct cppi5_desc_hdr_t hdr; + u32 epib[0]; /* Extended Packet Info Data (optional, 4 words) */ + /* + * Protocol Specific Data (optional, 0-128 bytes in multiples of 4), + * and/or Other Software Data (0-N bytes, optional) + */ +}; + +#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U) +#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18) + +/* + * Reload Enable: + * 0 = Finish the packet and place the descriptor back on the return queue + * 1 = Vector to the Reload Index and resume processing + */ +#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U) +#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20) +#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff) +#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX +#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U) +#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14) +#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f) +#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0) +#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0) + +#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U) +#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U) +#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U) + +static inline void cppi5_desc_dump(void *desc, u32 size) +{ + print_hex_dump(KERN_ERR "dump udmap_desc: ", DUMP_PREFIX_NONE, + 32, 4, desc, size, false); +} + +/** + * cppi5_desc_get_type - get descriptor type + * @desc_hdr: packet descriptor/TR header + * + * Returns descriptor type: + * CPPI5_INFO0_DESC_TYPE_VAL_HOST + * CPPI5_INFO0_DESC_TYPE_VAL_MONO + * CPPI5_INFO0_DESC_TYPE_VAL_TR + */ +static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr) +{ + WARN_ON(!desc_hdr); + + return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >> + CPPI5_INFO0_HDESC_TYPE_SHIFT; +} + +/** + * cppi5_desc_get_errflags - get Error Flags from Desc + * @desc_hdr: packet/TR descriptor header + * + * Returns Error Flags from Packet/TR Descriptor + */ +static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr) +{ + WARN_ON(!desc_hdr); + + return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >> + CPPI5_INFO1_DESC_PKTERROR_SHIFT; +} + +/** + * cppi5_desc_get_pktids - get Packet and Flow ids from Desc + * @desc_hdr: packet/TR descriptor header + * @pkt_id: Packet ID + * @flow_id: Flow ID + * + * Returns Packet and Flow ids from packet/TR descriptor + */ +static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr, + u32 *pkt_id, u32 *flow_id) +{ + WARN_ON(!desc_hdr); + + *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >> + CPPI5_INFO1_DESC_PKTID_SHIFT; + *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >> + CPPI5_INFO1_DESC_FLOWID_SHIFT; +} + +/** + * cppi5_desc_set_pktids - set Packet and Flow ids in Desc + * @desc_hdr: packet/TR descriptor header + * @pkt_id: Packet ID + * @flow_id: Flow ID + */ +static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr, + u32 pkt_id, u32 flow_id) +{ + WARN_ON(!desc_hdr); + + desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) & + CPPI5_INFO1_DESC_PKTID_MASK; + desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) & + CPPI5_INFO1_DESC_FLOWID_MASK; +} + +/** + * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc + * @desc_hdr: packet/TR descriptor header + * @flags: fags, supported values + * CPPI5_INFO2_HDESC_RETPOLICY + * CPPI5_INFO2_HDESC_EARLYRET + * CPPI5_INFO2_DESC_RETPUSHPOLICY + * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved + */ +static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr, + u32 flags, u32 return_ring_id) +{ + WARN_ON(!desc_hdr); + + desc_hdr->pkt_info2 |= flags; + desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK; +} + +/** + * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc + * @desc_hdr: packet/TR descriptor header + * @src_tag_id: Source Tag + * @dst_tag_id: Dest Tag + * + * Returns Packet Src/Dst Tags from packet/TR descriptor + */ +static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr, + u32 *src_tag_id, u32 *dst_tag_id) +{ + WARN_ON(!desc_hdr); + + if (src_tag_id) + *src_tag_id = (desc_hdr->src_dst_tag & + CPPI5_INFO3_DESC_SRCTAG_MASK) >> + CPPI5_INFO3_DESC_SRCTAG_SHIFT; + if (dst_tag_id) + *dst_tag_id = desc_hdr->src_dst_tag & + CPPI5_INFO3_DESC_DSTTAG_MASK; +} + +/** + * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc + * @desc_hdr: packet/TR descriptor header + * @src_tag_id: Source Tag + * @dst_tag_id: Dest Tag + * + * Returns Packet Src/Dst Tags from packet/TR descriptor + */ +static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr, + u32 src_tag_id, u32 dst_tag_id) +{ + WARN_ON(!desc_hdr); + + desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) & + CPPI5_INFO3_DESC_SRCTAG_MASK; + desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK; +} + +/** + * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size + * @epib: is EPIB present + * @psdata_size: PSDATA size + * @sw_data_size: SWDATA size + * + * Returns required Host Packet Descriptor size + * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE + */ +static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size, + u32 sw_data_size) +{ + u32 desc_size; + + if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE) + return 0; + //TODO_GS: align + desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size + + sw_data_size; + + if (epib) + desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE; + + return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN); +} + +/** + * cppi5_hdesc_init - Init Host Packet Descriptor size + * @desc: Host packet descriptor + * @flags: supported values + * CPPI5_INFO0_HDESC_EPIB_PRESENT + * CPPI5_INFO0_HDESC_PSINFO_LOCATION + * @psdata_size: PSDATA size + * + * Returns required Host Packet Descriptor size + * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE + */ +static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags, + u32 psdata_size) +{ + WARN_ON(!desc); + WARN_ON(psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE); + WARN_ON(flags & ~(CPPI5_INFO0_HDESC_EPIB_PRESENT | + CPPI5_INFO0_HDESC_PSINFO_LOCATION)); + + desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST << + CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags); + desc->hdr.pkt_info0 |= ((psdata_size >> 2) << + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; + desc->next_desc = 0; +} + +/** + * cppi5_hdesc_update_flags - Replace descriptor flags + * @desc: Host packet descriptor + * @flags: supported values + * CPPI5_INFO0_HDESC_EPIB_PRESENT + * CPPI5_INFO0_HDESC_PSINFO_LOCATION + */ +static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc, + u32 flags) +{ + WARN_ON(!desc); + WARN_ON(flags & ~(CPPI5_INFO0_HDESC_EPIB_PRESENT | + CPPI5_INFO0_HDESC_PSINFO_LOCATION)); + + desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT | + CPPI5_INFO0_HDESC_PSINFO_LOCATION); + desc->hdr.pkt_info0 |= flags; +} + +/** + * cppi5_hdesc_update_psdata_size - Replace PSdata size + * @desc: Host packet descriptor + * @psdata_size: PSDATA size + */ +static inline void cppi5_hdesc_update_psdata_size( + struct cppi5_host_desc_t *desc, u32 psdata_size) +{ + WARN_ON(!desc); + WARN_ON(psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE); + + desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; + desc->hdr.pkt_info0 |= ((psdata_size >> 2) << + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK; +} + +/** + * cppi5_hdesc_get_psdata_size - get PSdata size in bytes + * @desc: Host packet descriptor + */ +static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size = 0; + + WARN_ON(!desc); + + if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)) + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + return (psdata_size << 2); +} + +/** + * cppi5_hdesc_get_pktlen - get Packet Length from HDesc + * @desc: Host packet descriptor + * + * Returns Packet Length from Host Packet Descriptor + */ +static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc) +{ + WARN_ON(!desc); + + return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK); +} + +/** + * cppi5_hdesc_set_pktlen - set Packet Length in HDesc + * @desc: Host packet descriptor + */ +static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc, + u32 pkt_len) +{ + WARN_ON(!desc); + + desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK); +} + +/** + * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc + * @desc: Host packet descriptor + * + * Returns Protocol Specific Flags from Host Packet Descriptor + */ +static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc) +{ + WARN_ON(!desc); + + return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >> + CPPI5_INFO1_HDESC_PSFLGS_SHIFT; +} + +/** + * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc + * @desc: Host packet descriptor + */ +static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc, + u32 ps_flags) +{ + WARN_ON(!desc); + + desc->hdr.pkt_info1 |= (ps_flags << + CPPI5_INFO1_HDESC_PSFLGS_SHIFT) & + CPPI5_INFO1_HDESC_PSFLGS_MASK; +} + +/** + * cppi5_hdesc_get_errflags - get Packet Type from HDesc + * @desc: Host packet descriptor + */ +static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc) +{ + WARN_ON(!desc); + + return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >> + CPPI5_INFO2_HDESC_PKTTYPE_SHIFT; +} + +/** + * cppi5_hdesc_get_errflags - set Packet Type in HDesc + * @desc: Host packet descriptor + * @pkt_type: Packet Type + */ +static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc, + u32 pkt_type) +{ + WARN_ON(!desc); + desc->hdr.pkt_info2 |= + (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) & + CPPI5_INFO2_HDESC_PKTTYPE_MASK; +} + +/** + * cppi5_hdesc_attach_buf - attach buffer to HDesc + * @desc: Host packet descriptor + * @buf: Buffer physical address + * @buf_data_len: Buffer length + * @obuf: Original Buffer physical address + * @obuf_len: Original Buffer length + * + * Attaches buffer to Host Packet Descriptor + */ +static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc, + dma_addr_t buf, u32 buf_data_len, + dma_addr_t obuf, u32 obuf_len) +{ + WARN_ON(!desc); + WARN_ON(!buf && !obuf); + + desc->buf_ptr = buf; + desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK; + desc->org_buf_ptr = obuf; + desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK; +} + +static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc, + dma_addr_t *obuf, u32 *obuf_len) +{ + WARN_ON(!desc); + WARN_ON(!obuf); + WARN_ON(!obuf_len); + + *obuf = desc->org_buf_ptr; + *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK; +} + +static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc) +{ + WARN_ON(!desc); + + desc->buf_ptr = desc->org_buf_ptr; + desc->buf_info1 = desc->org_buf_len; +} + +/** + * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc + * @desc: Host Packet Descriptor + * @buf_desc: Host Buffer Descriptor physical address + * + * add and link Host Buffer Descriptor to HDesc + */ +static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc, + dma_addr_t hbuf_desc) +{ + WARN_ON(!desc); + WARN_ON(!hbuf_desc); + + desc->next_desc = hbuf_desc; +} + +static inline dma_addr_t cppi5_hdesc_get_next_hbdesc( + struct cppi5_host_desc_t *desc) +{ + WARN_ON(!desc); + + return (dma_addr_t)desc->next_desc; +} + +static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc) +{ + WARN_ON(!desc); + + desc->hdr = (struct cppi5_desc_hdr_t) { 0 }; + desc->next_desc = 0; +} + +/** + * cppi5_hdesc_epib_present - check if EPIB present + * @desc_hdr: packet descriptor/TR header + * + * Returns true if EPIB present in the packet + */ +static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr) +{ + WARN_ON(!desc_hdr); + return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT); +} + +/** + * cppi5_hdesc_get_psdata - Get pointer on PSDATA + * @desc: Host packet descriptor + * + * Returns pointer on PSDATA in HDesc. + * NULL - if ps_data placed at the start of data buffer. + */ +static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size; + void *psdata; + + WARN_ON(!desc); + + if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION) + return NULL; + + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + if (!psdata_size) + return NULL; + + psdata = &desc->epib; + + if (cppi5_hdesc_epib_present(&desc->hdr)) + psdata += CPPI5_INFO0_HDESC_EPIB_SIZE; + + return psdata; +} + +static inline u32 *cppi5_hdesc_get_psdata32(struct cppi5_host_desc_t *desc) +{ + return (u32 *)cppi5_hdesc_get_psdata(desc); +} + +/** + * cppi5_hdesc_get_swdata - Get pointer on swdata + * @desc: Host packet descriptor + * + * Returns pointer on SWDATA in HDesc. + * NOTE. It's caller responsibility to be sure hdesc actually has swdata. + */ +static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc) +{ + u32 psdata_size = 0; + void *swdata; + + WARN_ON(!desc); + + if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)) + psdata_size = (desc->hdr.pkt_info0 & + CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >> + CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT; + + swdata = &desc->epib; + + if (cppi5_hdesc_epib_present(&desc->hdr)) + swdata += CPPI5_INFO0_HDESC_EPIB_SIZE; + + swdata += (psdata_size << 2); + + return swdata; +} + +/* ================================== TR ================================== */ + +#define CPPI5_TR_TYPE_SHIFT (0U) +#define CPPI5_TR_TYPE_MASK GENMASK(3, 0) +#define CPPI5_TR_STATIC BIT(4) +#define CPPI5_TR_WAIT BIT(5) +#define CPPI5_TR_EVENT_SIZE_SHIFT (6U) +#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6) +#define CPPI5_TR_TRIGGER0_SHIFT (8U) +#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8) +#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U) +#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10) +#define CPPI5_TR_TRIGGER1_SHIFT (12U) +#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12) +#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U) +#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14) +#define CPPI5_TR_CMD_ID_SHIFT (16U) +#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16) +#define CPPI5_TR_CSF_FLAGS_SHIFT (24U) +#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24) +#define CPPI5_TR_CSF_SA_INDIRECT BIT(0) +#define CPPI5_TR_CSF_DA_INDIRECT BIT(1) +#define CPPI5_TR_CSF_SUPR_EVT BIT(2) +#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U) +#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4) +#define CPPI5_TR_CSF_EOP BIT(7) + +/* Udmap TR flags Type field specifies the type of TR. */ +enum cppi5_tr_types { + /* type0: One dimensional data move */ + CPPI5_TR_TYPE0 = 0, + /* type1: Two dimensional data move */ + CPPI5_TR_TYPE1, + /* type2: Three dimensional data move */ + CPPI5_TR_TYPE2, + /* type3: Four dimensional data move */ + CPPI5_TR_TYPE3, + /* type4: Four dimensional data move with data formatting */ + CPPI5_TR_TYPE4, + /* type5: Four dimensional Cache Warm */ + CPPI5_TR_TYPE5, + /* type6-7: Reserved */ + /* type8: Four Dimensional Block Move */ + CPPI5_TR_TYPE8 = 8, + /* type9: Four Dimensional Block Move with Repacking */ + CPPI5_TR_TYPE9, + /* type10: Two Dimensional Block Move */ + CPPI5_TR_TYPE10, + /* type11: Two Dimensional Block Move with Repacking */ + CPPI5_TR_TYPE11, + /* type12-14: Reserved */ + /* type15 Four Dimensional Block Move with Repacking and Indirection */ + CPPI5_TR_TYPE15 = 15, + CPPI5_TR_TYPE_MAX +}; + +/* + * Udmap TR Flags EVENT_SIZE field specifies when an event is generated + * for each TR. + */ +enum cppi5_tr_event_size { + /* When TR is complete and all status for the TR has been received */ + CPPI5_TR_EVENT_SIZE_COMPLETION, + /* + * Type 0: when the last data transaction is sent for the TR; + * Type 1-11: when ICNT1 is decremented + */ + CPPI5_TR_EVENT_SIZE_ICNT1_DEC, + /* + * Type 0-1,10-11: when the last data transaction is sent for the TR; + * All other types: when ICNT2 is decremented + */ + CPPI5_TR_EVENT_SIZE_ICNT2_DEC, + /* + * Type 0-2,10-11: when the last data transaction is sent for the TR; + * All other types: when ICNT3 is decremented + */ + CPPI5_TR_EVENT_SIZE_ICNT3_DEC, + CPPI5_TR_EVENT_SIZE_MAX +}; + +/* + * Udmap TR Flags TRIGGERx field specifies the type of trigger used to + * enable the TR to transfer data as specified by TRIGGERx_TYPE field. + */ +enum cppi5_tr_trigger { + CPPI5_TR_TRIGGER_NONE, /* No Trigger */ + CPPI5_TR_TRIGGER_GLOBAL0, /* Global Trigger 0 */ + CPPI5_TR_TRIGGER_GLOBAL1, /* Global Trigger 1 */ + CPPI5_TR_TRIGGER_LOCAL_EVENT, /* Local Event */ + CPPI5_TR_TRIGGER_MAX +}; + +/* + * Udmap TR Flags TRIGGERx_TYPE field specifies the type of data transfer + * that will be enabled by receiving a trigger as specified by TRIGGERx. + */ +enum cppi5_tr_trigger_type { + /* The second inner most loop (ICNT1) will be decremented by 1 */ + CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC, + /* The third inner most loop (ICNT2) will be decremented by 1 */ + CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, + /* The outer most loop (ICNT3) will be decremented by 1 */ + CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC, + /* The entire TR will be allowed to complete */ + CPPI5_TR_TRIGGER_TYPE_ALL, + CPPI5_TR_TRIGGER_TYPE_MAX +}; + +typedef u32 cppi5_tr_flags_t; + +/* Type 0 (One dimensional data move) TR (16 byte) */ +struct cppi5_tr_type0_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 unused; + u64 addr; +} __aligned(16) __packed; + +/* Type 1 (Two dimensional data move) TR (32 byte) */ +struct cppi5_tr_type1_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; +} __aligned(32) __packed; + +/* Type 2 (Three dimensional data move) TR (32 byte) */ +struct cppi5_tr_type2_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 unused; + s32 dim2; +} __aligned(32) __packed; + +/* Type 3 (Four dimensional data move) TR (32 byte) */ +struct cppi5_tr_type3_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 icnt3; + s32 dim2; + s32 dim3; +} __aligned(32) __packed; + +/* + * Type 15 (Four Dimensional Block Copy with Repacking and + * Indirection Support) TR (64 byte). + */ +struct cppi5_tr_type15_t { + cppi5_tr_flags_t flags; + u16 icnt0; + u16 icnt1; + u64 addr; + s32 dim1; + u16 icnt2; + u16 icnt3; + s32 dim2; + s32 dim3; + u32 _reserved; + s32 ddim1; + u64 daddr; + s32 ddim2; + s32 ddim3; + u16 dicnt0; + u16 dicnt1; + u16 dicnt2; + u16 dicnt3; +} __aligned(64) __packed; + +struct cppi5_tr_resp_t { + u8 status; + u8 reserved; + u8 cmd_id; + u8 flags; +} __packed; + +#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U) +#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0) +#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U) +#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4) +#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U) +#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16) +#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U) +#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24) + +/* + * Udmap TR Response Status Type field is used to determine + * what type of status is being returned. + */ +enum cppi5_tr_resp_status_type { + CPPI5_TR_RESPONSE_STATUS_COMPLETE, /* None */ + CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR, /* Transfer Error */ + CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR, /* Aborted Error */ + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR, /* Submission Error */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR, /* Unsup. Feature */ + CPPI5_TR_RESPONSE_STATUS_MAX +}; + +/* + * Udmap TR Response Status field values which corresponds + * CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR + */ +enum cppi5_tr_resp_status_submission { + /* ICNT0 was 0 */ + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0, + /* Channel FIFO was full when TR received */ + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL, + /* Channel is not owned by the submitter */ + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN, + CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX +}; + +/* + * Udmap TR Response Status field values which corresponds + * CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR + */ +enum cppi5_tr_resp_status_unsupported { + /* TR Type not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE, + /* STATIC not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC, + /* EOL not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL, + /* CONFIGURATION SPECIFIC not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC, + /* AMODE not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE, + /* ELTYPE not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE, + /* DFMT not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT, + /* SECTR not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR, + /* AMODE SPECIFIC field not supported */ + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC, + CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX +}; + +/** + * cppi5_trdesc_calc_size - Calculate TR Descriptor size + * @tr_count: number of TR records + * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128] + * + * Returns required TR Descriptor size + */ +static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size) +{ + /* + * The Size of a TR descriptor is: + * 1 x tr_size : the first 16 bytes is used by the packet info block + + * tr_count x tr_size : Transfer Request Records + + * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records + */ + return tr_size * (tr_count + 1) + + sizeof(struct cppi5_tr_resp_t) * tr_count; +} + +/** + * cppi5_trdesc_init - Init TR Descriptor + * @desc: TR Descriptor + * @tr_count: number of TR records + * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128] + * @reload_idx: Absolute index to jump to on the 2nd and following passes + * through the TR packet. + * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff + * indicates infinite looping. + * + * Init TR Descriptor + */ +static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr, + u32 tr_count, u32 tr_size, u32 reload_idx, + u32 reload_count) +{ + WARN_ON(!desc_hdr); + WARN_ON(tr_count & ~CPPI5_INFO0_TRDESC_LASTIDX_MASK); + WARN_ON(reload_idx > CPPI5_INFO0_TRDESC_RLDIDX_MAX); + WARN_ON(reload_count > CPPI5_INFO0_TRDESC_RLDCNT_MAX); + + desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR << + CPPI5_INFO0_HDESC_TYPE_SHIFT; + desc_hdr->pkt_info0 |= (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) & + CPPI5_INFO0_TRDESC_RLDCNT_MASK; + desc_hdr->pkt_info0 |= (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) & + CPPI5_INFO0_TRDESC_RLDIDX_MASK; + desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK; + + desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) << + CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) & + CPPI5_INFO1_TRDESC_RECSIZE_MASK; +} + +/** + * cppi5_tr_init - Init TR record + * @flags: Pointer to the TR's flags + * @type: TR type + * @static_tr: TR is static + * @wait: Wait for TR completion before allow the next TR to start + * @event_size: output event generation cfg + * @cmd_id: TR identifier (application specifics) + * + * Init TR record + */ +static inline void cppi5_tr_init(cppi5_tr_flags_t *flags, + enum cppi5_tr_types type, bool static_tr, + bool wait, enum cppi5_tr_event_size event_size, + u32 cmd_id) +{ + WARN_ON(!flags); + + *flags = type; + *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) & + CPPI5_TR_EVENT_SIZE_MASK; + + *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) & + CPPI5_TR_CMD_ID_MASK; + + if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9)) + *flags |= CPPI5_TR_STATIC; + + if (wait) + *flags |= CPPI5_TR_WAIT; +} + +/** + * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type + * @flags: Pointer to the TR's flags + * @trigger0: trigger0 selection + * @trigger0_type: type of data transfer that will be enabled by trigger0 + * @trigger1: trigger1 selection + * @trigger1_type: type of data transfer that will be enabled by trigger1 + * + * Configure the triggers for the TR + */ +static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags, + enum cppi5_tr_trigger trigger0, + enum cppi5_tr_trigger_type trigger0_type, + enum cppi5_tr_trigger trigger1, + enum cppi5_tr_trigger_type trigger1_type) +{ + WARN_ON(!flags); + + *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) & + CPPI5_TR_TRIGGER0_MASK; + *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) & + CPPI5_TR_TRIGGER0_TYPE_MASK; + + *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) & + CPPI5_TR_TRIGGER1_MASK; + *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) & + CPPI5_TR_TRIGGER1_TYPE_MASK; +} + +/** + * cppi5_tr_cflag_set - Update the Configuration specific flags + * @flags: Pointer to the TR's flags + * @csf: Configuration specific flags + * + * Set a bit in Configuration Specific Flags section of the TR flags. + */ +static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf) +{ + WARN_ON(!flags); + + *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) & + CPPI5_TR_CSF_FLAGS_MASK; +} + +#endif /* __TI_CPPI5_H__ */ diff --git a/include/linux/soc/ti/k3-navss-ringacc.h b/include/linux/soc/ti/k3-navss-ringacc.h new file mode 100644 index 0000000..487dfe9 --- /dev/null +++ b/include/linux/soc/ti/k3-navss-ringacc.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * TI K3 AM65x NAVSS Ring accelerator Manager (RA) subsystem driver + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef __SOC_TI_K3_NAVSS_RINGACC_API_H_ +#define __SOC_TI_K3_NAVSS_RINGACC_API_H_ + +#include + +/** + * enum k3_nav_ring_mode - &struct k3_nav_ring_cfg mode + * + * RA ring operational modes + * + * @K3_NAV_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access + * @K3_NAV_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires + * that all accesses to the queue must go through this IP so that all + * accesses to the memory are controlled and ordered. This IP then + * controls the entire state of the queue, and SW has no directly control, + * such as through doorbells and cannot access the storage memory directly. + * This is particularly useful when more than one SW or HW entity can be + * the producer and/or consumer at the same time + * @K3_NAV_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus + * stores credentials with each message, requiring the element size to be + * doubled to fit the credentials. Any exposed memory should be protected + * by a firewall from unwanted access + * @K3_NAV_RINGACC_RING_MODE_QM: Queue manager mode. This takes the credentials + * mode and adds packet length per element, along with additional read only + * fields for element count and accumulated queue length. The QM mode only + * operates with an 8 byte element size (any other element size is + * illegal), and like in credentials mode each operation uses 2 element + * slots to store the credentials and length fields + */ +enum k3_nav_ring_mode { + K3_NAV_RINGACC_RING_MODE_RING = 0, + K3_NAV_RINGACC_RING_MODE_MESSAGE, + K3_NAV_RINGACC_RING_MODE_CREDENTIALS, + K3_NAV_RINGACC_RING_MODE_QM, + k3_NAV_RINGACC_RING_MODE_INVALID +}; + +/** + * enum k3_nav_ring_size - &struct k3_nav_ring_cfg elm_size + * + * RA ring element's sizes in bytes. + */ +enum k3_nav_ring_size { + K3_NAV_RINGACC_RING_ELSIZE_4 = 0, + K3_NAV_RINGACC_RING_ELSIZE_8, + K3_NAV_RINGACC_RING_ELSIZE_16, + K3_NAV_RINGACC_RING_ELSIZE_32, + K3_NAV_RINGACC_RING_ELSIZE_64, + K3_NAV_RINGACC_RING_ELSIZE_128, + K3_NAV_RINGACC_RING_ELSIZE_256, + K3_NAV_RINGACC_RING_ELSIZE_INVALID +}; + +struct k3_nav_ringacc; +struct k3_nav_ring; + +/** + * enum k3_nav_ring_cfg - RA ring configuration structure + * + * @size: Ring size, number of elements + * @elm_size: Ring element size + * @mode: Ring operational mode + * @flags: Ring configuration flags. Possible values: + * @K3_NAV_RINGACC_RING_SHARED: when set allows to request the same ring + * few times. It's usable when the same ring is used as Free Host PD ring + * for different flows, for example. + * Note: Locking should be done by consumer if required + */ +struct k3_nav_ring_cfg { + u32 size; + enum k3_nav_ring_size elm_size; + enum k3_nav_ring_mode mode; +#define K3_NAV_RINGACC_RING_SHARED BIT(1) + u32 flags; +}; + +#define K3_NAV_RINGACC_RING_ID_ANY (-1) +#define K3_NAV_RINGACC_RING_USE_PROXY BIT(1) + +/** + * k3_nav_ringacc_request_ring - request ring from ringacc + * @ringacc: pointer on ringacc + * @id: ring id or K3_NAV_RINGACC_RING_ID_ANY for any general purpose ring + * @flags: + * @K3_NAV_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and + * used to access ring memory. Sopported only for rings in + * Message/Credentials/Queue mode. + * + * Returns pointer on the Ring - struct k3_nav_ring + * or NULL in case of failure. + */ +struct k3_nav_ring *k3_nav_ringacc_request_ring(struct k3_nav_ringacc *ringacc, + int id, u32 flags); + +/** + * k3_nav_ringacc_get_dev - get pointer on RA device + * @ringacc: pointer on RA + * + * Returns device pointer + */ +struct udevice *k3_nav_ringacc_get_dev(struct k3_nav_ringacc *ringacc); + +/** + * k3_nav_ringacc_ring_reset - ring reset + * @ring: pointer on Ring + * + * Resets ring internal state ((hw)occ, (hw)idx). + * TODO_GS: ? Ring can be reused without reconfiguration + */ +void k3_nav_ringacc_ring_reset(struct k3_nav_ring *ring); +/** + * k3_nav_ringacc_ring_reset - ring reset for DMA rings + * @ring: pointer on Ring + * + * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings + * which are read by K3 UDMA, like TX or Free Host PD rings. + */ +void k3_nav_ringacc_ring_reset_dma(struct k3_nav_ring *ring, u32 occ); + +/** + * k3_nav_ringacc_ring_free - ring free + * @ring: pointer on Ring + * + * Resets ring and free all alocated resources. + */ +int k3_nav_ringacc_ring_free(struct k3_nav_ring *ring); + +/** + * k3_nav_ringacc_get_ring_id - Get the Ring ID + * @ring: pointer on ring + * + * Returns the Ring ID + */ +u32 k3_nav_ringacc_get_ring_id(struct k3_nav_ring *ring); + +/** + * k3_nav_ringacc_ring_cfg - ring configure + * @ring: pointer on ring + * @cfg: Ring configuration parameters (see &struct k3_nav_ring_cfg) + * + * Configures ring, including ring memory allocation. + * Returns 0 on success, errno otherwise. + */ +int k3_nav_ringacc_ring_cfg(struct k3_nav_ring *ring, + struct k3_nav_ring_cfg *cfg); + +/** + * k3_nav_ringacc_ring_get_size - get ring size + * @ring: pointer on ring + * + * Returns ring size in number of elements. + */ +u32 k3_nav_ringacc_ring_get_size(struct k3_nav_ring *ring); + +/** + * k3_nav_ringacc_ring_get_free - get free elements + * @ring: pointer on ring + * + * Returns number of free elements in the ring. + */ +u32 k3_nav_ringacc_ring_get_free(struct k3_nav_ring *ring); + +/** + * k3_nav_ringacc_ring_get_occ - get ring occupancy + * @ring: pointer on ring + * + * Returns total number of valid entries on the ring + */ +u32 k3_nav_ringacc_ring_get_occ(struct k3_nav_ring *ring); + +/** + * k3_nav_ringacc_ring_is_full - checks if ring is full + * @ring: pointer on ring + * + * Returns true if the ring is full + */ +u32 k3_nav_ringacc_ring_is_full(struct k3_nav_ring *ring); + +/** + * k3_nav_ringacc_ring_push - push element to the ring tail + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element to the ring tail. Size of the ring element is + * determined by ring configuration &struct k3_nav_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + */ +int k3_nav_ringacc_ring_push(struct k3_nav_ring *ring, void *elem); + +/** + * k3_nav_ringacc_ring_pop - pop element from the ring head + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element from the ring head. Size of the ring element is + * determined by ring configuration &struct k3_nav_ring_cfg elm_size.. + * + * Returns 0 on success, errno otherwise. + */ +int k3_nav_ringacc_ring_pop(struct k3_nav_ring *ring, void *elem); + +/** + * k3_nav_ringacc_ring_push_head - push element to the ring head + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element to the ring head. Size of the ring element is + * determined by ring configuration &struct k3_nav_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + * Not Supported by ring modes: K3_NAV_RINGACC_RING_MODE_RING + */ +int k3_nav_ringacc_ring_push_head(struct k3_nav_ring *ring, void *elem); + +/** + * k3_nav_ringacc_ring_pop_tail - pop element from the ring tail + * @ring: pointer on ring + * @elem: pointer on ring element buffer + * + * Push one ring element from the ring tail. Size of the ring element is + * determined by ring configuration &struct k3_nav_ring_cfg elm_size. + * + * Returns 0 on success, errno otherwise. + * Not Supported by ring modes: K3_NAV_RINGACC_RING_MODE_RING + */ +int k3_nav_ringacc_ring_pop_tail(struct k3_nav_ring *ring, void *elem); + +#endif /* __SOC_TI_K3_NAVSS_RINGACC_API_H_ */ diff --git a/include/linux/soc/ti/k3-sec-proxy.h b/include/linux/soc/ti/k3-sec-proxy.h new file mode 100644 index 0000000..f34854c --- /dev/null +++ b/include/linux/soc/ti/k3-sec-proxy.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Texas Instruments' K3 Secure proxy + * + * Copyright (C) 2017-2018 Texas Instruments Incorporated - http://www.ti.com/ + * Lokesh Vutla + * + */ + +#ifndef K3_SEC_PROXY_H +#define K3_SEC_PROXY_H + +/** + * struct k3_sec_proxy_msg - Secure proxy message structure + * @len: Length of data in the Buffer + * @buf: Buffer pointer + * + * This is the structure for data used in mbox_send() and mbox_recv(). + */ +struct k3_sec_proxy_msg { + size_t len; + u32 *buf; +}; + +#endif /* K3_SEC_PROXY_H */ diff --git a/include/linux/soc/ti/ti-udma.h b/include/linux/soc/ti/ti-udma.h new file mode 100644 index 0000000..e9d4226 --- /dev/null +++ b/include/linux/soc/ti/ti-udma.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi + */ + +#ifndef __TI_UDMA_H +#define __TI_UDMA_H + +/** + * struct ti_udma_drv_packet_data - TI UDMA transfer specific data + * + * @pkt_type: Packet Type - specific for each DMA client HW + * @dest_tag: Destination tag The source pointer. + * + * TI UDMA transfer specific data passed as part of DMA transfer to + * the DMA client HW in UDMA descriptors. + */ +struct ti_udma_drv_packet_data { + u32 pkt_type; + u32 dest_tag; +}; + +#endif /* __TI_UDMA_H */ diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h new file mode 100644 index 0000000..1cba8d9 --- /dev/null +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -0,0 +1,693 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Texas Instruments System Control Interface Protocol + * Based on include/linux/soc/ti/ti_sci_protocol.h from Linux. + * + * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/ + * Nishanth Menon + * Lokesh Vutla + */ + +#ifndef __TISCI_PROTOCOL_H +#define __TISCI_PROTOCOL_H + +/** + * struct ti_sci_version_info - version information structure + * @abi_major: Major ABI version. Change here implies risk of backward + * compatibility break. + * @abi_minor: Minor ABI version. Change here implies new feature addition, + * or compatible change in ABI. + * @firmware_revision: Firmware revision (not usually used). + * @firmware_description: Firmware description (not usually used). + */ +struct ti_sci_version_info { + u8 abi_major; + u8 abi_minor; + u16 firmware_revision; + char firmware_description[32]; +}; + +struct ti_sci_handle; + +/** + * struct ti_sci_board_ops - Board config operations + * @board_config: Command to set the board configuration + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @board_config_rm: Command to set the board resource management + * configuration + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @board_config_security: Command to set the board security configuration + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @board_config_pm: Command to trigger and set the board power and clock + * management related configuration + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + */ +struct ti_sci_board_ops { + int (*board_config)(const struct ti_sci_handle *handle, + u64 addr, u32 size); + int (*board_config_rm)(const struct ti_sci_handle *handle, + u64 addr, u32 size); + int (*board_config_security)(const struct ti_sci_handle *handle, + u64 addr, u32 size); + int (*board_config_pm)(const struct ti_sci_handle *handle, + u64 addr, u32 size); +}; + +/** + * struct ti_sci_dev_ops - Device control operations + * @get_device: Command to request for device managed by TISCI + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @idle_device: Command to idle a device managed by TISCI + * Returns 0 for successful exclusive request, else returns + * corresponding error message. + * @put_device: Command to release a device managed by TISCI + * Returns 0 for successful release, else returns corresponding + * error message. + * @is_valid: Check if the device ID is a valid ID. + * Returns 0 if the ID is valid, else returns corresponding error. + * @get_context_loss_count: Command to retrieve context loss counter - this + * increments every time the device looses context. Overflow + * is possible. + * - count: pointer to u32 which will retrieve counter + * Returns 0 for successful information request and count has + * proper data, else returns corresponding error message. + * @is_idle: Reports back about device idle state + * - req_state: Returns requested idle state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_stop: Reports back about device stop state + * - req_state: Returns requested stop state + * - current_state: Returns current stop state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_on: Reports back about device ON(or active) state + * - req_state: Returns requested ON state + * - current_state: Returns current ON state + * Returns 0 for successful information request and req_state and + * current_state has proper data, else returns corresponding error + * message. + * @is_transitioning: Reports back if the device is in the middle of transition + * of state. + * -current_state: Returns 'true' if currently transitioning. + * @set_device_resets: Command to configure resets for device managed by TISCI. + * -reset_state: Device specific reset bit field + * Returns 0 for successful request, else returns + * corresponding error message. + * @get_device_resets: Command to read state of resets for device managed + * by TISCI. + * -reset_state: pointer to u32 which will retrieve resets + * Returns 0 for successful request, else returns + * corresponding error message. + * @release_exclusive_devices: Command to release all the exclusive devices + * attached to this host. This should be used very carefully + * and only at the end of execution of your software. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -id: Device Identifier + * + * Request for the device - NOTE: the client MUST maintain integrity of + * usage count by balancing get_device with put_device. No refcounting is + * managed by driver for that purpose. + */ +struct ti_sci_dev_ops { + int (*get_device)(const struct ti_sci_handle *handle, u32 id); + int (*get_device_exclusive)(const struct ti_sci_handle *handle, u32 id); + int (*idle_device)(const struct ti_sci_handle *handle, u32 id); + int (*idle_device_exclusive)(const struct ti_sci_handle *handle, + u32 id); + int (*put_device)(const struct ti_sci_handle *handle, u32 id); + int (*is_valid)(const struct ti_sci_handle *handle, u32 id); + int (*get_context_loss_count)(const struct ti_sci_handle *handle, + u32 id, u32 *count); + int (*is_idle)(const struct ti_sci_handle *handle, u32 id, + bool *requested_state); + int (*is_stop)(const struct ti_sci_handle *handle, u32 id, + bool *req_state, bool *current_state); + int (*is_on)(const struct ti_sci_handle *handle, u32 id, + bool *req_state, bool *current_state); + int (*is_transitioning)(const struct ti_sci_handle *handle, u32 id, + bool *current_state); + int (*set_device_resets)(const struct ti_sci_handle *handle, u32 id, + u32 reset_state); + int (*get_device_resets)(const struct ti_sci_handle *handle, u32 id, + u32 *reset_state); + int (*release_exclusive_devices)(const struct ti_sci_handle *handle); +}; + +/** + * struct ti_sci_clk_ops - Clock control operations + * @get_clock: Request for activation of clock and manage by processor + * - needs_ssc: 'true' if Spread Spectrum clock is desired. + * - can_change_freq: 'true' if frequency change is desired. + * - enable_input_term: 'true' if input termination is desired. + * @idle_clock: Request for Idling a clock managed by processor + * @put_clock: Release the clock to be auto managed by TISCI + * @is_auto: Is the clock being auto managed + * - req_state: state indicating if the clock is auto managed + * @is_on: Is the clock ON + * - req_state: if the clock is requested to be forced ON + * - current_state: if the clock is currently ON + * @is_off: Is the clock OFF + * - req_state: if the clock is requested to be forced OFF + * - current_state: if the clock is currently Gated + * @set_parent: Set the clock source of a specific device clock + * - parent_id: Parent clock identifier to set. + * @get_parent: Get the current clock source of a specific device clock + * - parent_id: Parent clock identifier which is the parent. + * @get_num_parents: Get the number of parents of the current clock source + * - num_parents: returns the number of parent clocks. + * @get_best_match_freq: Find a best matching frequency for a frequency + * range. + * - match_freq: Best matching frequency in Hz. + * @set_freq: Set the Clock frequency + * @get_freq: Get the Clock frequency + * - current_freq: Frequency in Hz that the clock is at. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -did: Device identifier this request is for + * -cid: Clock identifier for the device for this request. + * Each device has it's own set of clock inputs. This indexes + * which clock input to modify. + * -min_freq: The minimum allowable frequency in Hz. This is the minimum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * -target_freq: The target clock frequency in Hz. A frequency will be + * processed as close to this target frequency as possible. + * -max_freq: The maximum allowable frequency in Hz. This is the maximum + * allowable programmed frequency and does not account for clock + * tolerances and jitter. + * + * Request for the clock - NOTE: the client MUST maintain integrity of + * usage count by balancing get_clock with put_clock. No refcounting is + * managed by driver for that purpose. + */ +struct ti_sci_clk_ops { + int (*get_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool needs_ssc, bool can_change_freq, + bool enable_input_term); + int (*idle_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); + int (*put_clock)(const struct ti_sci_handle *handle, u32 did, u8 cid); + int (*is_auto)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state); + int (*is_on)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state, bool *current_state); + int (*is_off)(const struct ti_sci_handle *handle, u32 did, u8 cid, + bool *req_state, bool *current_state); + int (*set_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u8 parent_id); + int (*get_parent)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u8 *parent_id); + int (*get_num_parents)(const struct ti_sci_handle *handle, u32 did, + u8 cid, u8 *num_parents); + int (*get_best_match_freq)(const struct ti_sci_handle *handle, u32 did, + u8 cid, u64 min_freq, u64 target_freq, + u64 max_freq, u64 *match_freq); + int (*set_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u64 min_freq, u64 target_freq, u64 max_freq); + int (*get_freq)(const struct ti_sci_handle *handle, u32 did, u8 cid, + u64 *current_freq); +}; + +/** + * struct ti_sci_rm_core_ops - Resource management core operations + * @get_range: Get a range of resources belonging to ti sci host. + * @get_rage_from_shost: Get a range of resources belonging to + * specified host id. + * - s_host: Host processing entity to which the + * resources are allocated + * + * NOTE: for these functions, all the parameters are consolidated and defined + * as below: + * - handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * - dev_id: TISCI device ID. + * - subtype: Resource assignment subtype that is being requested + * from the given device. + * - range_start: Start index of the resource range + * - range_end: Number of resources in the range + */ +struct ti_sci_rm_core_ops { + int (*get_range)(const struct ti_sci_handle *handle, u32 dev_id, + u8 subtype, u16 *range_start, u16 *range_num); + int (*get_range_from_shost)(const struct ti_sci_handle *handle, + u32 dev_id, u8 subtype, u8 s_host, + u16 *range_start, u16 *range_num); +}; + +/** + * struct ti_sci_core_ops - SoC Core Operations + * @reboot_device: Reboot the SoC + * Returns 0 for successful request(ideally should never return), + * else returns corresponding error value. + * @query_msmc: Query the size of available msmc + * Return 0 for successful query else appropriate error value. + */ +struct ti_sci_core_ops { + int (*reboot_device)(const struct ti_sci_handle *handle); + int (*query_msmc)(const struct ti_sci_handle *handle, + u64 *msmc_start, u64 *msmc_end); +}; + +/** + * struct ti_sci_proc_ops - Processor specific operations. + * + * @proc_request: Request for controlling a physical processor. + * The requesting host should be in the processor access list. + * @proc_release: Relinquish a physical processor control + * @proc_handover: Handover a physical processor control to another host + * in the permitted list. + * @set_proc_boot_cfg: Base configuration of the processor + * @set_proc_boot_ctrl: Setup limited control flags in specific cases. + * @proc_auth_boot_image: + * @get_proc_boot_status: Get the state of physical processor + * @proc_shutdown_no_wait: Shutdown a core without requesting or waiting for a + * response. + * + * NOTE: for all these functions, the following parameters are generic in + * nature: + * -handle: Pointer to TISCI handle as retrieved by *ti_sci_get_handle + * -pid: Processor ID + * + */ +struct ti_sci_proc_ops { + int (*proc_request)(const struct ti_sci_handle *handle, u8 pid); + int (*proc_release)(const struct ti_sci_handle *handle, u8 pid); + int (*proc_handover)(const struct ti_sci_handle *handle, u8 pid, + u8 hid); + int (*set_proc_boot_cfg)(const struct ti_sci_handle *handle, u8 pid, + u64 bv, u32 cfg_set, u32 cfg_clr); + int (*set_proc_boot_ctrl)(const struct ti_sci_handle *handle, u8 pid, + u32 ctrl_set, u32 ctrl_clr); + int (*proc_auth_boot_image)(const struct ti_sci_handle *handle, + u64 *image_addr, u32 *image_size); + int (*get_proc_boot_status)(const struct ti_sci_handle *handle, u8 pid, + u64 *bv, u32 *cfg_flags, u32 *ctrl_flags, + u32 *sts_flags); + int (*proc_shutdown_no_wait)(const struct ti_sci_handle *handle, + u8 pid); +}; + +#define TI_SCI_RING_MODE_RING (0) +#define TI_SCI_RING_MODE_MESSAGE (1) +#define TI_SCI_RING_MODE_CREDENTIALS (2) +#define TI_SCI_RING_MODE_QM (3) + +#define TI_SCI_MSG_UNUSED_SECONDARY_HOST TI_SCI_RM_NULL_U8 + +/* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0) +/* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1) + /* RA config.count parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2) +/* RA config.mode parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3) +/* RA config.size parameter is valid for RM ring configure TI_SCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4) +/* RA config.order_id parameter is valid for RM ring configure TISCI message */ +#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5) + +#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \ + (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \ + TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID) + +/** + * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations + * @config: configure the SoC Navigator Subsystem Ring Accelerator ring + * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring + * configuration + */ +struct ti_sci_rm_ringacc_ops { + int (*config)(const struct ti_sci_handle *handle, + u32 valid_params, u16 nav_id, u16 index, + u32 addr_lo, u32 addr_hi, u32 count, u8 mode, + u8 size, u8 order_id + ); + int (*get_config)(const struct ti_sci_handle *handle, + u32 nav_id, u32 index, u8 *mode, + u32 *addr_lo, u32 *addr_hi, u32 *count, + u8 *size, u8 *order_id); +}; + +/** + * struct ti_sci_rm_psil_ops - PSI-L thread operations + * @pair: pair PSI-L source thread to a destination thread. + * If the src_thread is mapped to UDMA tchan, the corresponding channel's + * TCHAN_THRD_ID register is updated. + * If the dst_thread is mapped to UDMA rchan, the corresponding channel's + * RCHAN_THRD_ID register is updated. + * @unpair: unpair PSI-L source thread from a destination thread. + * If the src_thread is mapped to UDMA tchan, the corresponding channel's + * TCHAN_THRD_ID register is cleared. + * If the dst_thread is mapped to UDMA rchan, the corresponding channel's + * RCHAN_THRD_ID register is cleared. + */ +struct ti_sci_rm_psil_ops { + int (*pair)(const struct ti_sci_handle *handle, u32 nav_id, + u32 src_thread, u32 dst_thread); + int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id, + u32 src_thread, u32 dst_thread); +}; + +/* UDMAP channel types */ +#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */ +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12 +#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13 + +/* UDMAP channel atypes */ +#define TI_SCI_RM_UDMAP_ATYPE_PHYS 0 +#define TI_SCI_RM_UDMAP_ATYPE_INTERMEDIATE 1 +#define TI_SCI_RM_UDMAP_ATYPE_VIRTUAL 2 + +/* UDMAP channel scheduling priorities */ +#define TI_SCI_RM_UDMAP_SCHED_PRIOR_HIGH 0 +#define TI_SCI_RM_UDMAP_SCHED_PRIOR_MEDHIGH 1 +#define TI_SCI_RM_UDMAP_SCHED_PRIOR_MEDLOW 2 +#define TI_SCI_RM_UDMAP_SCHED_PRIOR_LOW 3 + +#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0 +#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2 + +/* UDMAP TX/RX channel valid_params common declarations */ +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8) + +/** + * Configures a Navigator Subsystem UDMAP transmit channel + * + * Configures a Navigator Subsystem UDMAP transmit channel registers. + * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req + */ +struct ti_sci_msg_rm_udmap_tx_ch_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13) + u16 nav_id; + u16 index; + u8 tx_pause_on_err; + u8 tx_filt_einfo; + u8 tx_filt_pswords; + u8 tx_atype; + u8 tx_chan_type; + u8 tx_supr_tdpkt; + u16 tx_fetch_size; + u8 tx_credit_count; + u16 txcq_qnum; + u8 tx_priority; + u8 tx_qos; + u8 tx_orderid; + u16 fdepth; + u8 tx_sched_priority; +}; + +/** + * Configures a Navigator Subsystem UDMAP receive channel + * + * Configures a Navigator Subsystem UDMAP receive channel registers. + * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req + */ +struct ti_sci_msg_rm_udmap_rx_ch_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12) + u16 nav_id; + u16 index; + u16 rx_fetch_size; + u16 rxcq_qnum; + u8 rx_priority; + u8 rx_qos; + u8 rx_orderid; + u8 rx_sched_priority; + u16 flowid_start; + u16 flowid_cnt; + u8 rx_pause_on_err; + u8 rx_atype; + u8 rx_chan_type; + u8 rx_ignore_short; + u8 rx_ignore_long; +}; + +/** + * Configures a Navigator Subsystem UDMAP receive flow + * + * Configures a Navigator Subsystem UDMAP receive flow's registers. + * See @tis_ci_msg_rm_udmap_flow_cfg_req + */ +struct ti_sci_msg_rm_udmap_flow_cfg { + u32 valid_params; +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17) +#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18) + u16 nav_id; + u16 flow_index; + u8 rx_einfo_present; + u8 rx_psinfo_present; + u8 rx_error_handling; + u8 rx_desc_type; + u16 rx_sop_offset; + u16 rx_dest_qnum; + u8 rx_src_tag_hi; + u8 rx_src_tag_lo; + u8 rx_dest_tag_hi; + u8 rx_dest_tag_lo; + u8 rx_src_tag_hi_sel; + u8 rx_src_tag_lo_sel; + u8 rx_dest_tag_hi_sel; + u8 rx_dest_tag_lo_sel; + u16 rx_fdq0_sz0_qnum; + u16 rx_fdq1_qnum; + u16 rx_fdq2_qnum; + u16 rx_fdq3_qnum; + u8 rx_ps_location; +}; + +/** + * struct ti_sci_rm_udmap_ops - UDMA Management operations + * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel. + * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel. + * @rx_flow_cfg: configure SoC Navigator Subsystem UDMA receive flow. + */ +struct ti_sci_rm_udmap_ops { + int (*tx_ch_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params); + int (*rx_ch_cfg)(const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params); + int (*rx_flow_cfg)( + const struct ti_sci_handle *handle, + const struct ti_sci_msg_rm_udmap_flow_cfg *params); +}; + +/** + * struct ti_sci_msg_fwl_region_cfg - Request and Response for firewalls settings + * + * @fwl_id: Firewall ID in question + * @region: Region or channel number to set config info + * This field is unused in case of a simple firewall and must be initialized + * to zero. In case of a region based firewall, this field indicates the + * region in question. (index starting from 0) In case of a channel based + * firewall, this field indicates the channel in question (index starting + * from 0) + * @n_permission_regs: Number of permission registers to set + * @control: Contents of the firewall CONTROL register to set + * @permissions: Contents of the firewall PERMISSION register to set + * @start_address: Contents of the firewall START_ADDRESS register to set + * @end_address: Contents of the firewall END_ADDRESS register to set + */ +struct ti_sci_msg_fwl_region { + u16 fwl_id; + u16 region; + u32 n_permission_regs; + u32 control; + u32 permissions[3]; + u64 start_address; + u64 end_address; +} __packed; + +/** + * \brief Request and Response for firewall owner change + * + * @fwl_id: Firewall ID in question + * @region: Region or channel number to set config info + * This field is unused in case of a simple firewall and must be initialized + * to zero. In case of a region based firewall, this field indicates the + * region in question. (index starting from 0) In case of a channel based + * firewall, this field indicates the channel in question (index starting + * from 0) + * @n_permission_regs: Number of permission registers <= 3 + * @control: Control register value for this region + * @owner_index: New owner index to change to. Owner indexes are setup in DMSC firmware boot configuration data + * @owner_privid: New owner priv-id, used to lookup owner_index is not known, must be set to zero otherwise + * @owner_permission_bits: New owner permission bits + */ +struct ti_sci_msg_fwl_owner { + u16 fwl_id; + u16 region; + u8 owner_index; + u8 owner_privid; + u16 owner_permission_bits; +} __packed; + +/** + * struct ti_sci_fwl_ops - Firewall specific operations + * @set_fwl_region: Request for configuring the firewall permissions. + * @get_fwl_region: Request for retrieving the firewall permissions. + * @change_fwl_owner: Request for a change of firewall owner. + */ +struct ti_sci_fwl_ops { + int (*set_fwl_region)(const struct ti_sci_handle *handle, const struct ti_sci_msg_fwl_region *region); + int (*get_fwl_region)(const struct ti_sci_handle *handle, struct ti_sci_msg_fwl_region *region); + int (*change_fwl_owner)(const struct ti_sci_handle *handle, struct ti_sci_msg_fwl_owner *owner); +}; + +/** + * struct ti_sci_ops - Function support for TI SCI + * @board_ops: Miscellaneous operations + * @dev_ops: Device specific operations + * @clk_ops: Clock specific operations + * @core_ops: Core specific operations + * @proc_ops: Processor specific operations + * @ring_ops: Ring Accelerator Management operations + * @fw_ops: Firewall specific operations + */ +struct ti_sci_ops { + struct ti_sci_board_ops board_ops; + struct ti_sci_dev_ops dev_ops; + struct ti_sci_clk_ops clk_ops; + struct ti_sci_core_ops core_ops; + struct ti_sci_proc_ops proc_ops; + struct ti_sci_rm_core_ops rm_core_ops; + struct ti_sci_rm_ringacc_ops rm_ring_ops; + struct ti_sci_rm_psil_ops rm_psil_ops; + struct ti_sci_rm_udmap_ops rm_udmap_ops; + struct ti_sci_fwl_ops fwl_ops; +}; + +/** + * struct ti_sci_handle - Handle returned to TI SCI clients for usage. + * @ops: operations that are made available to TI SCI clients + * @version: structure containing version information + */ +struct ti_sci_handle { + struct ti_sci_ops ops; + struct ti_sci_version_info version; +}; + +#define TI_SCI_RESOURCE_NULL 0xffff + +/** + * struct ti_sci_resource_desc - Description of TI SCI resource instance range. + * @start: Start index of the resource. + * @num: Number of resources. + * @res_map: Bitmap to manage the allocation of these resources. + */ +struct ti_sci_resource_desc { + u16 start; + u16 num; + unsigned long *res_map; +}; + +/** + * struct ti_sci_resource - Structure representing a resource assigned + * to a device. + * @sets: Number of sets available from this resource type + * @desc: Array of resource descriptors. + */ +struct ti_sci_resource { + u16 sets; + struct ti_sci_resource_desc *desc; +}; + +#if IS_ENABLED(CONFIG_TI_SCI_PROTOCOL) + +const struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *dev); +const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev); +const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev, + const char *property); +u16 ti_sci_get_free_resource(struct ti_sci_resource *res); +void ti_sci_release_resource(struct ti_sci_resource *res, u16 id); +struct ti_sci_resource * +devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, + struct udevice *dev, u32 dev_id, char *of_prop); + +#else /* CONFIG_TI_SCI_PROTOCOL */ + +static inline +const struct ti_sci_handle *ti_sci_get_handle_from_sysfw(struct udevice *dev) +{ + return ERR_PTR(-EINVAL); +} + +static inline const struct ti_sci_handle *ti_sci_get_handle(struct udevice *dev) +{ + return ERR_PTR(-EINVAL); +} + +static inline +const struct ti_sci_handle *ti_sci_get_by_phandle(struct udevice *dev, + const char *property) +{ + return ERR_PTR(-EINVAL); +} + +static inline u16 ti_sci_get_free_resource(struct ti_sci_resource *res) +{ + return TI_SCI_RESOURCE_NULL; +} + +static inline void ti_sci_release_resource(struct ti_sci_resource *res, u16 id) +{ +} + +static inline struct ti_sci_resource * +devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle, + struct udevice *dev, u32 dev_id, char *of_prop) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_TI_SCI_PROTOCOL */ + +#endif /* __TISCI_PROTOCOL_H */ diff --git a/include/linux/stat.h b/include/linux/stat.h new file mode 100644 index 0000000..5eba633 --- /dev/null +++ b/include/linux/stat.h @@ -0,0 +1,158 @@ +#ifndef _LINUX_STAT_H +#define _LINUX_STAT_H + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#define S_IFMT 00170000 /* type of file */ +#define S_IFSOCK 0140000 /* named socket */ +#define S_IFLNK 0120000 /* symbolic link */ +#define S_IFREG 0100000 /* regular */ +#define S_IFBLK 0060000 /* block special */ +#define S_IFDIR 0040000 /* directory */ +#define S_IFCHR 0020000 /* character special */ +#define S_IFIFO 0010000 /* fifo */ +#define S_ISUID 0004000 /* set user id on execution */ +#define S_ISGID 0002000 /* set group id on execution */ +#define S_ISVTX 0001000 /* save swapped text even after use */ + +#define S_ISLNK(m) (((m) & S_IFMT) == S_IFLNK) +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) +#define S_ISBLK(m) (((m) & S_IFMT) == S_IFBLK) +#define S_ISFIFO(m) (((m) & S_IFMT) == S_IFIFO) +#define S_ISSOCK(m) (((m) & S_IFMT) == S_IFSOCK) + +#define S_IRWXU 00700 /* rwx for owner */ +#define S_IRUSR 00400 /* read permission for owner */ +#define S_IWUSR 00200 /* write permission for owner */ +#define S_IXUSR 00100 /* execute/search permission for owner */ + +#define S_IRWXG 00070 /* rwx for group */ +#define S_IRGRP 00040 /* read permission for group */ +#define S_IWGRP 00020 /* write permission for group */ +#define S_IXGRP 00010 /* execute/search permission for group */ + +#define S_IRWXO 00007 /* rwx for other */ +#define S_IROTH 00004 /* read permission for other */ +#define S_IWOTH 00002 /* read permission for other */ +#define S_IXOTH 00001 /* execute/search permission for other */ + +#ifdef __PPC__ + +struct stat { + dev_t st_dev; /* file system id */ + ino_t st_ino; /* file id */ + mode_t st_mode; /* ownership/protection */ + nlink_t st_nlink; /* number of links */ + uid_t st_uid; /* user id */ + gid_t st_gid; /* group id */ + dev_t st_rdev; + off_t st_size; /* file size in # of bytes */ + unsigned long st_blksize; /* block size */ + unsigned long st_blocks; /* file size in # of blocks */ + unsigned long st_atime; /* time file was last accessed */ + unsigned long __unused1; + unsigned long st_mtime; /* time file was last modified */ + unsigned long __unused2; + unsigned long st_ctime; /* time file status was last changed */ + unsigned long __unused3; + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* __PPC__ */ + +#if defined (__ARM__) || defined (__I386__) || defined (__M68K__) || defined (__bfin__) ||\ + defined (__microblaze__) || defined (__nios2__) + +struct stat { + unsigned short st_dev; + unsigned short __pad1; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned short st_rdev; + unsigned short __pad2; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long __unused1; + unsigned long st_mtime; + unsigned long __unused2; + unsigned long st_ctime; + unsigned long __unused3; + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* __ARM__ */ + +#if defined (__MIPS__) + +struct stat { + dev_t st_dev; + long st_pad1[3]; + ino_t st_ino; + mode_t st_mode; + nlink_t st_nlink; + uid_t st_uid; + gid_t st_gid; + dev_t st_rdev; + long st_pad2[2]; + off_t st_size; + long st_pad3; + /* + * Actually this should be timestruc_t st_atime, st_mtime and st_ctime + * but we don't have it under Linux. + */ + time_t st_atime; + long reserved0; + time_t st_mtime; + long reserved1; + time_t st_ctime; + long reserved2; + long st_blksize; + long st_blocks; + long st_pad4[14]; +}; + +#endif /* __MIPS__ */ + +#if defined(__SH__) || defined(__XTENSA__) + +struct stat { + unsigned long st_dev; + unsigned long st_ino; + unsigned short st_mode; + unsigned short st_nlink; + unsigned short st_uid; + unsigned short st_gid; + unsigned long st_rdev; + unsigned long st_size; + unsigned long st_blksize; + unsigned long st_blocks; + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + unsigned long __unused4; + unsigned long __unused5; +}; + +#endif /* __SH__ || __XTENSA__ */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/include/linux/stddef.h b/include/linux/stddef.h new file mode 100644 index 0000000..c540f61 --- /dev/null +++ b/include/linux/stddef.h @@ -0,0 +1,20 @@ +#ifndef _LINUX_STDDEF_H +#define _LINUX_STDDEF_H + +#undef NULL +#if defined(__cplusplus) +#define NULL 0 +#else +#define NULL ((void *)0) +#endif + +#ifndef _SIZE_T +#include +#endif + +#ifndef __CHECKER__ +#undef offsetof +#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) +#endif + +#endif diff --git a/include/linux/string.h b/include/linux/string.h new file mode 100644 index 0000000..5d63be4 --- /dev/null +++ b/include/linux/string.h @@ -0,0 +1,131 @@ +#ifndef _LINUX_STRING_H_ +#define _LINUX_STRING_H_ + +#include /* for size_t */ +#include /* for NULL */ + +#ifdef __cplusplus +extern "C" { +#endif + +extern char * ___strtok; +extern char * strpbrk(const char *,const char *); +extern char * strtok(char *,const char *); +extern char * strsep(char **,const char *); +extern __kernel_size_t strspn(const char *,const char *); + + +/* + * Include machine specific inline routines + */ +#include + +#ifndef __HAVE_ARCH_STRCPY +extern char * strcpy(char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRNCPY +extern char * strncpy(char *,const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRLCPY +size_t strlcpy(char *, const char *, size_t); +#endif +#ifndef __HAVE_ARCH_STRCAT +extern char * strcat(char *, const char *); +#endif +#ifndef __HAVE_ARCH_STRNCAT +extern char * strncat(char *, const char *, __kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRCMP +extern int strcmp(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRNCMP +extern int strncmp(const char *,const char *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_STRCASECMP +int strcasecmp(const char *s1, const char *s2); +#endif +#ifndef __HAVE_ARCH_STRNCASECMP +extern int strncasecmp(const char *s1, const char *s2, __kernel_size_t len); +#endif +#ifndef __HAVE_ARCH_STRCHR +extern char * strchr(const char *,int); +#endif + +/** + * strchrnul() - return position of a character in the string, or end of string + * + * The strchrnul() function is like strchr() except that if c is not found + * in s, then it returns a pointer to the nul byte at the end of s, rather than + * NULL + * @s: string to search + * @c: character to search for + * @return position of @c in @s, or end of @s if not found + */ +const char *strchrnul(const char *s, int c); + +#ifndef __HAVE_ARCH_STRRCHR +extern char * strrchr(const char *,int); +#endif +#include +#ifndef __HAVE_ARCH_STRSTR +extern char * strstr(const char *,const char *); +#endif +#ifndef __HAVE_ARCH_STRLEN +extern __kernel_size_t strlen(const char *); +#endif +#ifndef __HAVE_ARCH_STRNLEN +extern __kernel_size_t strnlen(const char *,__kernel_size_t); +#endif + +#ifndef __HAVE_ARCH_STRCSPN +/** + * strcspn() - find span of string without given characters + * + * Calculates the length of the initial segment of @s which consists entirely + * of bsytes not in reject. + * + * @s: string to search + * @reject: strings which cause the search to halt + * @return number of characters at the start of @s which are not in @reject + */ +size_t strcspn(const char *s, const char *reject); +#endif + +#ifndef __HAVE_ARCH_STRDUP +extern char * strdup(const char *); +#endif +extern char * strndup(const char *, size_t); +#ifndef __HAVE_ARCH_STRSWAB +extern char * strswab(const char *); +#endif + +#ifndef __HAVE_ARCH_MEMSET +extern void * memset(void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCPY +extern void * memcpy(void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMMOVE +extern void * memmove(void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMSCAN +extern void * memscan(void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCMP +extern int memcmp(const void *,const void *,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCHR +extern void * memchr(const void *,int,__kernel_size_t); +#endif +#ifndef __HAVE_ARCH_MEMCHR_INV +void *memchr_inv(const void *, int, size_t); +#endif + +unsigned long ustrtoul(const char *cp, char **endp, unsigned int base); +unsigned long long ustrtoull(const char *cp, char **endp, unsigned int base); + +#ifdef __cplusplus +} +#endif + +#endif /* _LINUX_STRING_H_ */ diff --git a/include/linux/stringify.h b/include/linux/stringify.h new file mode 100644 index 0000000..841cec8 --- /dev/null +++ b/include/linux/stringify.h @@ -0,0 +1,12 @@ +#ifndef __LINUX_STRINGIFY_H +#define __LINUX_STRINGIFY_H + +/* Indirect stringification. Doing two levels allows the parameter to be a + * macro itself. For example, compile with -DFOO=bar, __stringify(FOO) + * converts to "bar". + */ + +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +#endif /* !__LINUX_STRINGIFY_H */ diff --git a/include/linux/time.h b/include/linux/time.h new file mode 100644 index 0000000..b8d298e --- /dev/null +++ b/include/linux/time.h @@ -0,0 +1,153 @@ +#ifndef _LINUX_TIME_H +#define _LINUX_TIME_H + +#include + +#define _DEFUN(a,b,c) a(c) +#define _CONST const +#define _AND , + +#define _REENT_ONLY + +#define SECSPERMIN 60L +#define MINSPERHOUR 60L +#define HOURSPERDAY 24L +#define SECSPERHOUR (SECSPERMIN * MINSPERHOUR) +#define SECSPERDAY (SECSPERHOUR * HOURSPERDAY) +#define DAYSPERWEEK 7 +#define MONSPERYEAR 12 + +#define YEAR_BASE 1900 +#define EPOCH_YEAR 1970 +#define EPOCH_WDAY 4 + +#define isleap(y) ((((y) % 4) == 0 && ((y) % 100) != 0) || ((y) % 400) == 0) + + +/* Used by other time functions. */ +struct tm { + int tm_sec; /* Seconds. [0-60] (1 leap second) */ + int tm_min; /* Minutes. [0-59] */ + int tm_hour; /* Hours. [0-23] */ + int tm_mday; /* Day. [1-31] */ + int tm_mon; /* Month. [0-11] */ + int tm_year; /* Year - 1900. */ + int tm_wday; /* Day of week. [0-6] */ + int tm_yday; /* Days in year.[0-365] */ + int tm_isdst; /* DST. [-1/0/1]*/ + +# ifdef __USE_BSD + long int tm_gmtoff; /* Seconds east of UTC. */ + __const char *tm_zone; /* Timezone abbreviation. */ +# else + long int __tm_gmtoff; /* Seconds east of UTC. */ + __const char *__tm_zone; /* Timezone abbreviation. */ +# endif +}; + +static inline char * +_DEFUN (asctime_r, (tim_p, result), + _CONST struct tm *tim_p _AND + char *result) +{ + static _CONST char day_name[7][3] = { + "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" + }; + static _CONST char mon_name[12][3] = { + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" + }; + + sprintf (result, "%.3s %.3s %.2d %.2d:%.2d:%.2d %d\n", + day_name[tim_p->tm_wday], + mon_name[tim_p->tm_mon], + tim_p->tm_mday, tim_p->tm_hour, tim_p->tm_min, + tim_p->tm_sec, 1900 + tim_p->tm_year); + return result; +} + +static inline struct tm * +_DEFUN (localtime_r, (tim_p, res), + _CONST time_t * tim_p _AND + struct tm *res) +{ + static _CONST int mon_lengths[2][MONSPERYEAR] = { + {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, + {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} + } ; + + static _CONST int year_lengths[2] = { + 365, + 366 + } ; + + long days, rem; + int y; + int yleap; + _CONST int *ip; + + days = ((long) *tim_p) / SECSPERDAY; + rem = ((long) *tim_p) % SECSPERDAY; + while (rem < 0) + { + rem += SECSPERDAY; + --days; + } + + /* compute hour, min, and sec */ + res->tm_hour = (int) (rem / SECSPERHOUR); + rem %= SECSPERHOUR; + res->tm_min = (int) (rem / SECSPERMIN); + res->tm_sec = (int) (rem % SECSPERMIN); + + /* compute day of week */ + if ((res->tm_wday = ((EPOCH_WDAY + days) % DAYSPERWEEK)) < 0) + res->tm_wday += DAYSPERWEEK; + + /* compute year & day of year */ + y = EPOCH_YEAR; + if (days >= 0) + { + for (;;) + { + yleap = isleap(y); + if (days < year_lengths[yleap]) + break; + y++; + days -= year_lengths[yleap]; + } + } + else + { + do + { + --y; + yleap = isleap(y); + days += year_lengths[yleap]; + } while (days < 0); + } + + res->tm_year = y - YEAR_BASE; + res->tm_yday = days; + ip = mon_lengths[yleap]; + for (res->tm_mon = 0; days >= ip[res->tm_mon]; ++res->tm_mon) + days -= ip[res->tm_mon]; + res->tm_mday = days + 1; + + /* set daylight saving time flag */ + res->tm_isdst = -1; + + return (res); +} + +static inline char * +_DEFUN (ctime_r, (tim_p, result), + _CONST time_t * tim_p _AND + char * result) + +{ + struct tm tm; + return asctime_r (localtime_r (tim_p, &tm), result); +} + +#endif diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h new file mode 100644 index 0000000..eb5b74a --- /dev/null +++ b/include/linux/typecheck.h @@ -0,0 +1,24 @@ +#ifndef TYPECHECK_H_INCLUDED +#define TYPECHECK_H_INCLUDED + +/* + * Check at compile time that something is of a particular type. + * Always evaluates to 1 so you may use it easily in comparisons. + */ +#define typecheck(type,x) \ +({ type __dummy; \ + typeof(x) __dummy2; \ + (void)(&__dummy == &__dummy2); \ + 1; \ +}) + +/* + * Check at compile time that 'function' is a certain type, or is a pointer + * to that type (needs to use typedef for the function type.) + */ +#define typecheck_fn(type,function) \ +({ typeof(type) __tmp = function; \ + (void)__tmp; \ +}) + +#endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/types.h b/include/linux/types.h new file mode 100644 index 0000000..cc6f7cb --- /dev/null +++ b/include/linux/types.h @@ -0,0 +1,164 @@ +#ifndef _LINUX_TYPES_H +#define _LINUX_TYPES_H + +#include +#include +#include + +#ifndef __KERNEL_STRICT_NAMES + +typedef __kernel_fd_set fd_set; +typedef __kernel_dev_t dev_t; +typedef __kernel_ino_t ino_t; +typedef __kernel_mode_t mode_t; +typedef __kernel_nlink_t nlink_t; +typedef __kernel_off_t off_t; +typedef __kernel_pid_t pid_t; +typedef __kernel_daddr_t daddr_t; +typedef __kernel_key_t key_t; +typedef __kernel_suseconds_t suseconds_t; + +#ifdef __KERNEL__ +typedef __kernel_uid32_t uid_t; +typedef __kernel_gid32_t gid_t; +typedef __kernel_uid16_t uid16_t; +typedef __kernel_gid16_t gid16_t; + +typedef unsigned long uintptr_t; + +#ifdef CONFIG_UID16 +/* This is defined by include/asm-{arch}/posix_types.h */ +typedef __kernel_old_uid_t old_uid_t; +typedef __kernel_old_gid_t old_gid_t; +#endif /* CONFIG_UID16 */ + +/* libc5 includes this file to define uid_t, thus uid_t can never change + * when it is included by non-kernel code + */ +#else +typedef __kernel_uid_t uid_t; +typedef __kernel_gid_t gid_t; +#endif /* __KERNEL__ */ + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __kernel_loff_t loff_t; +#endif + +/* + * The following typedefs are also protected by individual ifdefs for + * historical reasons: + */ +#ifndef _SIZE_T +#define _SIZE_T +typedef __kernel_size_t size_t; +#endif + +#ifndef _SSIZE_T +#define _SSIZE_T +typedef __kernel_ssize_t ssize_t; +#endif + +#ifndef _PTRDIFF_T +#define _PTRDIFF_T +typedef __kernel_ptrdiff_t ptrdiff_t; +#endif + +#ifndef _TIME_T +#define _TIME_T +typedef __kernel_time_t time_t; +#endif + +#ifndef _CLOCK_T +#define _CLOCK_T +typedef __kernel_clock_t clock_t; +#endif + +#ifndef _CADDR_T +#define _CADDR_T +typedef __kernel_caddr_t caddr_t; +#endif + +/* bsd */ +typedef unsigned char u_char; +typedef unsigned short u_short; +typedef unsigned int u_int; +typedef unsigned long u_long; + +/* sysv */ +typedef unsigned char unchar; +typedef unsigned short ushort; +typedef unsigned int uint; +typedef unsigned long ulong; + +#ifndef __BIT_TYPES_DEFINED__ +#define __BIT_TYPES_DEFINED__ + +typedef __u8 u_int8_t; +typedef __s8 int8_t; +typedef __u16 u_int16_t; +typedef __s16 int16_t; +typedef __u32 u_int32_t; +typedef __s32 int32_t; + +#endif /* !(__BIT_TYPES_DEFINED__) */ + +typedef __u8 uint8_t; +typedef __u16 uint16_t; +typedef __u32 uint32_t; + +#if defined(__GNUC__) && !defined(__STRICT_ANSI__) +typedef __u64 uint64_t; +typedef __u64 u_int64_t; +typedef __s64 int64_t; +#endif + +#endif /* __KERNEL_STRICT_NAMES */ + +/* this is a special 64bit data type that is 8-byte aligned */ +#define aligned_u64 __u64 __aligned(8) +#define aligned_be64 __be64 __aligned(8) +#define aligned_le64 __le64 __aligned(8) + +#ifdef __KERNEL__ +typedef phys_addr_t resource_size_t; +#endif + +/* + * Below are truly Linux-specific types that should never collide with + * any application/library that wants linux/types.h. + */ +#ifdef __CHECKER__ +#define __bitwise__ __attribute__((bitwise)) +#else +#define __bitwise__ +#endif +#ifdef __CHECK_ENDIAN__ +#define __bitwise __bitwise__ +#else +#define __bitwise +#endif + +typedef __u16 __bitwise __le16; +typedef __u16 __bitwise __be16; +typedef __u32 __bitwise __le32; +typedef __u32 __bitwise __be32; +#if defined(__GNUC__) +typedef __u64 __bitwise __le64; +typedef __u64 __bitwise __be64; +#endif +typedef __u16 __bitwise __sum16; +typedef __u32 __bitwise __wsum; + +typedef unsigned __bitwise__ gfp_t; + +struct ustat { + __kernel_daddr_t f_tfree; + __kernel_ino_t f_tinode; + char f_fname[6]; + char f_fpack[6]; +}; + +#define DECLARE_BITMAP(name, bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +#endif /* _LINUX_TYPES_H */ diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h new file mode 100644 index 0000000..5f46eee --- /dev/null +++ b/include/linux/unaligned/access_ok.h @@ -0,0 +1,66 @@ +#ifndef _LINUX_UNALIGNED_ACCESS_OK_H +#define _LINUX_UNALIGNED_ACCESS_OK_H + +#include + +static inline u16 get_unaligned_le16(const void *p) +{ + return le16_to_cpup((__le16 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return le32_to_cpup((__le32 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return le64_to_cpup((__le64 *)p); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return be16_to_cpup((__be16 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return be32_to_cpup((__be32 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return be64_to_cpup((__be64 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + *((__le16 *)p) = cpu_to_le16(val); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + *((__le32 *)p) = cpu_to_le32(val); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + *((__le64 *)p) = cpu_to_le64(val); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + *((__be16 *)p) = cpu_to_be16(val); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + *((__be32 *)p) = cpu_to_be32(val); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + *((__be64 *)p) = cpu_to_be64(val); +} + +#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h new file mode 100644 index 0000000..9356b24 --- /dev/null +++ b/include/linux/unaligned/be_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H +#define _LINUX_UNALIGNED_BE_BYTESHIFT_H + +#include + +static inline u16 __get_unaligned_be16(const u8 *p) +{ + return p[0] << 8 | p[1]; +} + +static inline u32 __get_unaligned_be32(const u8 *p) +{ + return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; +} + +static inline u64 __get_unaligned_be64(const u8 *p) +{ + return (u64)__get_unaligned_be32(p) << 32 | + __get_unaligned_be32(p + 4); +} + +static inline void __put_unaligned_be16(u16 val, u8 *p) +{ + *p++ = val >> 8; + *p++ = val; +} + +static inline void __put_unaligned_be32(u32 val, u8 *p) +{ + __put_unaligned_be16(val >> 16, p); + __put_unaligned_be16(val, p + 2); +} + +static inline void __put_unaligned_be64(u64 val, u8 *p) +{ + __put_unaligned_be32(val >> 32, p); + __put_unaligned_be32(val, p + 4); +} + +static inline u16 get_unaligned_be16(const void *p) +{ + return __get_unaligned_be16((const u8 *)p); +} + +static inline u32 get_unaligned_be32(const void *p) +{ + return __get_unaligned_be32((const u8 *)p); +} + +static inline u64 get_unaligned_be64(const void *p) +{ + return __get_unaligned_be64((const u8 *)p); +} + +static inline void put_unaligned_be16(u16 val, void *p) +{ + __put_unaligned_be16(val, p); +} + +static inline void put_unaligned_be32(u32 val, void *p) +{ + __put_unaligned_be32(val, p); +} + +static inline void put_unaligned_be64(u64 val, void *p) +{ + __put_unaligned_be64(val, p); +} + +#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h new file mode 100644 index 0000000..02d97ff --- /dev/null +++ b/include/linux/unaligned/generic.h @@ -0,0 +1,68 @@ +#ifndef _LINUX_UNALIGNED_GENERIC_H +#define _LINUX_UNALIGNED_GENERIC_H + +/* + * Cause a link-time error if we try an unaligned access other than + * 1,2,4 or 8 bytes long + */ +extern void __bad_unaligned_access_size(void); + +#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ + __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ + __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ + __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ + __bad_unaligned_access_size())))); \ + })) + +#define __put_unaligned_le(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_le16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_le32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_le64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#define __put_unaligned_be(val, ptr) ({ \ + void *__gu_p = (ptr); \ + switch (sizeof(*(ptr))) { \ + case 1: \ + *(u8 *)__gu_p = (__force u8)(val); \ + break; \ + case 2: \ + put_unaligned_be16((__force u16)(val), __gu_p); \ + break; \ + case 4: \ + put_unaligned_be32((__force u32)(val), __gu_p); \ + break; \ + case 8: \ + put_unaligned_be64((__force u64)(val), __gu_p); \ + break; \ + default: \ + __bad_unaligned_access_size(); \ + break; \ + } \ + (void)0; }) + +#endif /* _LINUX_UNALIGNED_GENERIC_H */ diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h new file mode 100644 index 0000000..be376fb --- /dev/null +++ b/include/linux/unaligned/le_byteshift.h @@ -0,0 +1,70 @@ +#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H +#define _LINUX_UNALIGNED_LE_BYTESHIFT_H + +#include + +static inline u16 __get_unaligned_le16(const u8 *p) +{ + return p[0] | p[1] << 8; +} + +static inline u32 __get_unaligned_le32(const u8 *p) +{ + return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; +} + +static inline u64 __get_unaligned_le64(const u8 *p) +{ + return (u64)__get_unaligned_le32(p + 4) << 32 | + __get_unaligned_le32(p); +} + +static inline void __put_unaligned_le16(u16 val, u8 *p) +{ + *p++ = val; + *p++ = val >> 8; +} + +static inline void __put_unaligned_le32(u32 val, u8 *p) +{ + __put_unaligned_le16(val >> 16, p + 2); + __put_unaligned_le16(val, p); +} + +static inline void __put_unaligned_le64(u64 val, u8 *p) +{ + __put_unaligned_le32(val >> 32, p + 4); + __put_unaligned_le32(val, p); +} + +static inline u16 get_unaligned_le16(const void *p) +{ + return __get_unaligned_le16((const u8 *)p); +} + +static inline u32 get_unaligned_le32(const void *p) +{ + return __get_unaligned_le32((const u8 *)p); +} + +static inline u64 get_unaligned_le64(const void *p) +{ + return __get_unaligned_le64((const u8 *)p); +} + +static inline void put_unaligned_le16(u16 val, void *p) +{ + __put_unaligned_le16(val, p); +} + +static inline void put_unaligned_le32(u32 val, void *p) +{ + __put_unaligned_le32(val, p); +} + +static inline void put_unaligned_le64(u64 val, void *p) +{ + __put_unaligned_le64(val, p); +} + +#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ diff --git a/include/linux/usb/at91_udc.h b/include/linux/usb/at91_udc.h new file mode 100644 index 0000000..541e2ba --- /dev/null +++ b/include/linux/usb/at91_udc.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Platform data definitions for Atmel USBA gadget driver + * pieces copied from linux:include/linux/platform_data/atmel.h + */ +#ifndef __LINUX_USB_AT91_UDC_H__ +#define __LINUX_USB_AT91_UDC_H__ + +struct at91_udc_data { + int vbus_pin; /* high == host powering us */ + u8 vbus_active_low; /* vbus polarity */ + u8 vbus_polled; /* Use polling, not interrupt */ + int pullup_pin; /* active == D+ pulled up */ + u8 pullup_active_low; /* true == pullup_pin is active low */ + unsigned long baseaddr; +}; + +int at91_udc_probe(struct at91_udc_data *pdata); +#endif /* __LINUX_USB_AT91_UDC_H__ */ diff --git a/include/linux/usb/atmel_usba_udc.h b/include/linux/usb/atmel_usba_udc.h new file mode 100644 index 0000000..c1c8107 --- /dev/null +++ b/include/linux/usb/atmel_usba_udc.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Platform data definitions for Atmel USBA gadget driver + * [Original from Linux kernel: include/linux/usb/atmel_usba_udc.h] + */ +#ifndef __LINUX_USB_USBA_H__ +#define __LINUX_USB_USBA_H__ + +struct usba_ep_data { + char *name; + int index; + int fifo_size; + int nr_banks; + int can_dma; + int can_isoc; +}; + +struct usba_platform_data { + int num_ep; + struct usba_ep_data *ep; +}; + +extern int usba_udc_probe(struct usba_platform_data *pdata); + +#endif /* __LINUX_USB_USBA_H */ diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h new file mode 100644 index 0000000..442316b --- /dev/null +++ b/include/linux/usb/cdc.h @@ -0,0 +1,224 @@ +/* + * USB Communications Device Class (CDC) definitions + * + * CDC says how to talk to lots of different types of network adapters, + * notably ethernet adapters and various modems. It's used mostly with + * firmware based USB peripherals. + * + * Ported to U-Boot by: Thomas Smits and + * Remy Bohmer + */ + +#define USB_CDC_SUBCLASS_ACM 0x02 +#define USB_CDC_SUBCLASS_ETHERNET 0x06 +#define USB_CDC_SUBCLASS_WHCM 0x08 +#define USB_CDC_SUBCLASS_DMM 0x09 +#define USB_CDC_SUBCLASS_MDLM 0x0a +#define USB_CDC_SUBCLASS_OBEX 0x0b + +#define USB_CDC_PROTO_NONE 0 + +#define USB_CDC_ACM_PROTO_AT_V25TER 1 +#define USB_CDC_ACM_PROTO_AT_PCCA101 2 +#define USB_CDC_ACM_PROTO_AT_PCCA101_WAKE 3 +#define USB_CDC_ACM_PROTO_AT_GSM 4 +#define USB_CDC_ACM_PROTO_AT_3G 5 +#define USB_CDC_ACM_PROTO_AT_CDMA 6 +#define USB_CDC_ACM_PROTO_VENDOR 0xff + +/*-------------------------------------------------------------------------*/ + +/* + * Class-Specific descriptors ... there are a couple dozen of them + */ + +#define USB_CDC_HEADER_TYPE 0x00 /* header_desc */ +#define USB_CDC_CALL_MANAGEMENT_TYPE 0x01 /* call_mgmt_descriptor */ +#define USB_CDC_ACM_TYPE 0x02 /* acm_descriptor */ +#define USB_CDC_UNION_TYPE 0x06 /* union_desc */ +#define USB_CDC_COUNTRY_TYPE 0x07 +#define USB_CDC_NETWORK_TERMINAL_TYPE 0x0a /* network_terminal_desc */ +#define USB_CDC_ETHERNET_TYPE 0x0f /* ether_desc */ +#define USB_CDC_WHCM_TYPE 0x11 +#define USB_CDC_MDLM_TYPE 0x12 /* mdlm_desc */ +#define USB_CDC_MDLM_DETAIL_TYPE 0x13 /* mdlm_detail_desc */ +#define USB_CDC_DMM_TYPE 0x14 +#define USB_CDC_OBEX_TYPE 0x15 + +/* "Header Functional Descriptor" from CDC spec 5.2.3.1 */ +struct usb_cdc_header_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __le16 bcdCDC; +} __attribute__ ((packed)); + +/* "Call Management Descriptor" from CDC spec 5.2.3.2 */ +struct usb_cdc_call_mgmt_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bmCapabilities; +#define USB_CDC_CALL_MGMT_CAP_CALL_MGMT 0x01 +#define USB_CDC_CALL_MGMT_CAP_DATA_INTF 0x02 + + __u8 bDataInterface; +} __attribute__ ((packed)); + +/* "Abstract Control Management Descriptor" from CDC spec 5.2.3.3 */ +struct usb_cdc_acm_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bmCapabilities; +} __attribute__ ((packed)); + +/* capabilities from 5.2.3.3 */ + +#define USB_CDC_COMM_FEATURE 0x01 +#define USB_CDC_CAP_LINE 0x02 +#define USB_CDC_CAP_BRK 0x04 +#define USB_CDC_CAP_NOTIFY 0x08 + +/* "Union Functional Descriptor" from CDC spec 5.2.3.8 */ +struct usb_cdc_union_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bMasterInterface0; + __u8 bSlaveInterface0; + /* ... and there could be other slave interfaces */ +} __attribute__ ((packed)); + +/* "Country Selection Functional Descriptor" from CDC spec 5.2.3.9 */ +struct usb_cdc_country_functional_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 iCountryCodeRelDate; + __le16 wCountyCode0; + /* ... and there can be a lot of country codes */ +} __attribute__ ((packed)); + +/* "Network Channel Terminal Functional Descriptor" from CDC spec 5.2.3.11 */ +struct usb_cdc_network_terminal_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 bEntityId; + __u8 iName; + __u8 bChannelIndex; + __u8 bPhysicalInterface; +} __attribute__ ((packed)); + +/* "Ethernet Networking Functional Descriptor" from CDC spec 5.2.3.16 */ +struct usb_cdc_ether_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __u8 iMACAddress; + __le32 bmEthernetStatistics; + __le16 wMaxSegmentSize; + __le16 wNumberMCFilters; + __u8 bNumberPowerFilters; +} __attribute__ ((packed)); + +/* "MDLM Functional Descriptor" from CDC WMC spec 6.7.2.3 */ +struct usb_cdc_mdlm_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + __le16 bcdVersion; + __u8 bGUID[16]; +} __attribute__ ((packed)); + +/* "MDLM Detail Functional Descriptor" from CDC WMC spec 6.7.2.4 */ +struct usb_cdc_mdlm_detail_desc { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + + /* type is associated with mdlm_desc.bGUID */ + __u8 bGuidDescriptorType; + __u8 bDetailData[0]; +} __attribute__ ((packed)); + +/*-------------------------------------------------------------------------*/ + +/* + * Class-Specific Control Requests (6.2) + * + * section 3.6.2.1 table 4 has the ACM profile, for modems. + * section 3.8.2 table 10 has the ethernet profile. + * + * Microsoft's RNDIS stack for Ethernet is a vendor-specific CDC ACM variant, + * heavily dependent on the encapsulated (proprietary) command mechanism. + */ + +#define USB_CDC_SEND_ENCAPSULATED_COMMAND 0x00 +#define USB_CDC_GET_ENCAPSULATED_RESPONSE 0x01 +#define USB_CDC_REQ_SET_LINE_CODING 0x20 +#define USB_CDC_REQ_GET_LINE_CODING 0x21 +#define USB_CDC_REQ_SET_CONTROL_LINE_STATE 0x22 +#define USB_CDC_REQ_SEND_BREAK 0x23 +#define USB_CDC_SET_ETHERNET_MULTICAST_FILTERS 0x40 +#define USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER 0x41 +#define USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER 0x42 +#define USB_CDC_SET_ETHERNET_PACKET_FILTER 0x43 +#define USB_CDC_GET_ETHERNET_STATISTIC 0x44 + +/* Line Coding Structure from CDC spec 6.2.13 */ +struct usb_cdc_line_coding { + __le32 dwDTERate; + __u8 bCharFormat; +#define USB_CDC_1_STOP_BITS 0 +#define USB_CDC_1_5_STOP_BITS 1 +#define USB_CDC_2_STOP_BITS 2 + + __u8 bParityType; +#define USB_CDC_NO_PARITY 0 +#define USB_CDC_ODD_PARITY 1 +#define USB_CDC_EVEN_PARITY 2 +#define USB_CDC_MARK_PARITY 3 +#define USB_CDC_SPACE_PARITY 4 + + __u8 bDataBits; +} __attribute__ ((packed)); + +/* table 62; bits in multicast filter */ +#define USB_CDC_PACKET_TYPE_PROMISCUOUS (1 << 0) +#define USB_CDC_PACKET_TYPE_ALL_MULTICAST (1 << 1) /* no filter */ +#define USB_CDC_PACKET_TYPE_DIRECTED (1 << 2) +#define USB_CDC_PACKET_TYPE_BROADCAST (1 << 3) +#define USB_CDC_PACKET_TYPE_MULTICAST (1 << 4) /* filtered */ + +/*-------------------------------------------------------------------------*/ + +/* + * Class-Specific Notifications (6.3) sent by interrupt transfers + * + * section 3.8.2 table 11 of the CDC spec lists Ethernet notifications + * section 3.6.2.1 table 5 specifies ACM notifications, accepted by RNDIS + * RNDIS also defines its own bit-incompatible notifications + */ + +#define USB_CDC_NOTIFY_NETWORK_CONNECTION 0x00 +#define USB_CDC_NOTIFY_RESPONSE_AVAILABLE 0x01 +#define USB_CDC_NOTIFY_SERIAL_STATE 0x20 +#define USB_CDC_NOTIFY_SPEED_CHANGE 0x2a + +struct usb_cdc_notification { + __u8 bmRequestType; + __u8 bNotificationType; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +} __attribute__ ((packed)); diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h new file mode 100644 index 0000000..264c971 --- /dev/null +++ b/include/linux/usb/ch9.h @@ -0,0 +1,1063 @@ +/* + * This file holds USB constants and structures that are needed for + * USB device APIs. These are used by the USB device model, which is + * defined in chapter 9 of the USB 2.0 specification and in the + * Wireless USB 1.0 (spread around). Linux has several APIs in C that + * need these: + * + * - the master/host side Linux-USB kernel driver API; + * - the "usbfs" user space API; and + * - the Linux "gadget" slave/device/peripheral side driver API. + * + * USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems + * act either as a USB master/host or as a USB slave/device. That means + * the master and slave side APIs benefit from working well together. + * + * There's also "Wireless USB", using low power short range radios for + * peripheral interconnection but otherwise building on the USB framework. + * + * Note all descriptors are declared '__attribute__((packed))' so that: + * + * [a] they never get padded, either internally (USB spec writers + * probably handled that) or externally; + * + * [b] so that accessing bigger-than-a-bytes fields will never + * generate bus errors on any platform, even when the location of + * its descriptor inside a bundle isn't "naturally aligned", and + * + * [c] for consistency, removing all doubt even when it appears to + * someone that the two other points are non-issues for that + * particular descriptor type. + */ + +#ifndef __LINUX_USB_CH9_H +#define __LINUX_USB_CH9_H + +#include /* __u8 etc */ +#include /* le16_to_cpu */ +#include /* get_unaligned() */ + +/*-------------------------------------------------------------------------*/ + +/* CONTROL REQUEST SUPPORT */ + +/* + * USB directions + * + * This bit flag is used in endpoint descriptors' bEndpointAddress field. + * It's also one of three fields in control requests bRequestType. + */ +#define USB_DIR_OUT 0 /* to device */ +#define USB_DIR_IN 0x80 /* to host */ + +/* + * USB types, the second of three bRequestType fields + */ +#define USB_TYPE_MASK (0x03 << 5) +#define USB_TYPE_STANDARD (0x00 << 5) +#define USB_TYPE_CLASS (0x01 << 5) +#define USB_TYPE_VENDOR (0x02 << 5) +#define USB_TYPE_RESERVED (0x03 << 5) + +/* + * USB recipients, the third of three bRequestType fields + */ +#define USB_RECIP_MASK 0x1f +#define USB_RECIP_DEVICE 0x00 +#define USB_RECIP_INTERFACE 0x01 +#define USB_RECIP_ENDPOINT 0x02 +#define USB_RECIP_OTHER 0x03 +/* From Wireless USB 1.0 */ +#define USB_RECIP_PORT 0x04 +#define USB_RECIP_RPIPE 0x05 + +/* + * Standard requests, for the bRequest field of a SETUP packet. + * + * These are qualified by the bRequestType field, so that for example + * TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved + * by a GET_STATUS request. + */ +#define USB_REQ_GET_STATUS 0x00 +#define USB_REQ_CLEAR_FEATURE 0x01 +#define USB_REQ_SET_FEATURE 0x03 +#define USB_REQ_SET_ADDRESS 0x05 +#define USB_REQ_GET_DESCRIPTOR 0x06 +#define USB_REQ_SET_DESCRIPTOR 0x07 +#define USB_REQ_GET_CONFIGURATION 0x08 +#define USB_REQ_SET_CONFIGURATION 0x09 +#define USB_REQ_GET_INTERFACE 0x0A +#define USB_REQ_SET_INTERFACE 0x0B +#define USB_REQ_SYNCH_FRAME 0x0C +#define USB_REQ_SET_SEL 0x30 +#define USB_REQ_SET_ISOCH_DELAY 0x31 + +#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */ +#define USB_REQ_GET_ENCRYPTION 0x0E +#define USB_REQ_RPIPE_ABORT 0x0E +#define USB_REQ_SET_HANDSHAKE 0x0F +#define USB_REQ_RPIPE_RESET 0x0F +#define USB_REQ_GET_HANDSHAKE 0x10 +#define USB_REQ_SET_CONNECTION 0x11 +#define USB_REQ_SET_SECURITY_DATA 0x12 +#define USB_REQ_GET_SECURITY_DATA 0x13 +#define USB_REQ_SET_WUSB_DATA 0x14 +#define USB_REQ_LOOPBACK_DATA_WRITE 0x15 +#define USB_REQ_LOOPBACK_DATA_READ 0x16 +#define USB_REQ_SET_INTERFACE_DS 0x17 + +/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command, + * used by hubs to put ports into a new L1 suspend state, except that it + * forgot to define its number ... + */ + +/* + * USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and + * are read as a bit array returned by USB_REQ_GET_STATUS. (So there + * are at most sixteen features of each type.) Hubs may also support a + * new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend. + */ +#define USB_DEVICE_SELF_POWERED 0 /* (read only) */ +#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */ +#define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */ +#define USB_DEVICE_BATTERY 2 /* (wireless) */ +#define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */ +#define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/ +#define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */ +#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ +#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ + +/* + * Test Mode Selectors + * See USB 2.0 spec Table 9-7 + */ +#define TEST_J 1 +#define TEST_K 2 +#define TEST_SE0_NAK 3 +#define TEST_PACKET 4 +#define TEST_FORCE_EN 5 + +/* + * New Feature Selectors as added by USB 3.0 + * See USB 3.0 spec Table 9-6 + */ +#define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */ +#define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */ +#define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */ +#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */ + +#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00 +/* + * Suspend Options, Table 9-7 USB 3.0 spec + */ +#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0)) +#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1)) + +#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ + +/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ +#define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */ +#define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */ +#define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */ + +/** + * struct usb_ctrlrequest - SETUP data for a USB device control request + * @bRequestType: matches the USB bmRequestType field + * @bRequest: matches the USB bRequest field + * @wValue: matches the USB wValue field (le16 byte order) + * @wIndex: matches the USB wIndex field (le16 byte order) + * @wLength: matches the USB wLength field (le16 byte order) + * + * This structure is used to send control requests to a USB device. It matches + * the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the + * USB spec for a fuller description of the different fields, and what they are + * used for. + * + * Note that the driver for any interface can issue control requests. + * For most devices, interfaces don't coordinate with each other, so + * such requests may be made at any time. + */ +struct usb_ctrlrequest { + __u8 bRequestType; + __u8 bRequest; + __le16 wValue; + __le16 wIndex; + __le16 wLength; +} __attribute__ ((packed)); + +/*-------------------------------------------------------------------------*/ + +/* + * STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or + * (rarely) accepted by SET_DESCRIPTOR. + * + * Note that all multi-byte values here are encoded in little endian + * byte order "on the wire". Within the kernel and when exposed + * through the Linux-USB APIs, they are not converted to cpu byte + * order; it is the responsibility of the client code to do this. + * The single exception is when device and configuration descriptors (but + * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD); + * in this case the fields are converted to host endianness by the kernel. + */ + +/* + * Descriptor types ... USB 2.0 spec table 9.5 + */ +#define USB_DT_DEVICE 0x01 +#define USB_DT_CONFIG 0x02 +#define USB_DT_STRING 0x03 +#define USB_DT_INTERFACE 0x04 +#define USB_DT_ENDPOINT 0x05 +#define USB_DT_DEVICE_QUALIFIER 0x06 +#define USB_DT_OTHER_SPEED_CONFIG 0x07 +#define USB_DT_INTERFACE_POWER 0x08 +/* these are from a minor usb 2.0 revision (ECN) */ +#define USB_DT_OTG 0x09 +#define USB_DT_DEBUG 0x0a +#define USB_DT_INTERFACE_ASSOCIATION 0x0b +/* these are from the Wireless USB spec */ +#define USB_DT_SECURITY 0x0c +#define USB_DT_KEY 0x0d +#define USB_DT_ENCRYPTION_TYPE 0x0e +#define USB_DT_BOS 0x0f +#define USB_DT_DEVICE_CAPABILITY 0x10 +#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11 +#define USB_DT_WIRE_ADAPTER 0x21 +#define USB_DT_RPIPE 0x22 +#define USB_DT_CS_RADIO_CONTROL 0x23 +/* From the T10 UAS specification */ +#define USB_DT_PIPE_USAGE 0x24 +/* From the USB 3.0 spec */ +#define USB_DT_SS_ENDPOINT_COMP 0x30 +/* From HID 1.11 spec */ +#define USB_DT_HID_REPORT 0x22 + +/* Conventional codes for class-specific descriptors. The convention is + * defined in the USB "Common Class" Spec (3.11). Individual class specs + * are authoritative for their usage, not the "common class" writeup. + */ +#define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE) +#define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG) +#define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING) +#define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE) +#define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT) + +/* All standard descriptors have these 2 fields at the beginning */ +struct usb_descriptor_header { + __u8 bLength; + __u8 bDescriptorType; +} __attribute__ ((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEVICE: Device descriptor */ +struct usb_device_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __le16 idVendor; + __le16 idProduct; + __le16 bcdDevice; + __u8 iManufacturer; + __u8 iProduct; + __u8 iSerialNumber; + __u8 bNumConfigurations; +} __attribute__ ((packed)); + +#define USB_DT_DEVICE_SIZE 18 + + +/* + * Device and/or Interface Class codes + * as found in bDeviceClass or bInterfaceClass + * and defined by www.usb.org documents + */ +#define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */ +#define USB_CLASS_AUDIO 1 +#define USB_CLASS_COMM 2 +#define USB_CLASS_HID 3 +#define USB_CLASS_PHYSICAL 5 +#define USB_CLASS_STILL_IMAGE 6 +#define USB_CLASS_PRINTER 7 +#define USB_CLASS_MASS_STORAGE 8 +#define USB_CLASS_HUB 9 +#define USB_CLASS_CDC_DATA 0x0a +#define USB_CLASS_CSCID 0x0b /* chip+ smart card */ +#define USB_CLASS_CONTENT_SEC 0x0d /* content security */ +#define USB_CLASS_VIDEO 0x0e +#define USB_CLASS_WIRELESS_CONTROLLER 0xe0 +#define USB_CLASS_MISC 0xef +#define USB_CLASS_APP_SPEC 0xfe +#define USB_CLASS_VENDOR_SPEC 0xff + +#define USB_SUBCLASS_VENDOR_SPEC 0xff + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_CONFIG: Configuration descriptor information. + * + * USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the + * descriptor type is different. Highspeed-capable devices can look + * different depending on what speed they're currently running. Only + * devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG + * descriptors. + */ +struct usb_config_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wTotalLength; + __u8 bNumInterfaces; + __u8 bConfigurationValue; + __u8 iConfiguration; + __u8 bmAttributes; + __u8 bMaxPower; +} __attribute__ ((packed)); + +#define USB_DT_CONFIG_SIZE 9 + +/* from config descriptor bmAttributes */ +#define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */ +#define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */ +#define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */ +#define USB_CONFIG_ATT_BATTERY (1 << 4) /* battery powered */ + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_STRING: String descriptor */ +struct usb_string_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wData[1]; /* UTF-16LE encoded */ +} __attribute__ ((packed)); + +/* note that "string" zero is special, it holds language codes that + * the device supports, not Unicode characters. + */ + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_INTERFACE: Interface descriptor */ +struct usb_interface_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bInterfaceNumber; + __u8 bAlternateSetting; + __u8 bNumEndpoints; + __u8 bInterfaceClass; + __u8 bInterfaceSubClass; + __u8 bInterfaceProtocol; + __u8 iInterface; +} __attribute__ ((packed)); + +#define USB_DT_INTERFACE_SIZE 9 + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_ENDPOINT: Endpoint descriptor */ +struct usb_endpoint_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bEndpointAddress; + __u8 bmAttributes; + __le16 wMaxPacketSize; + __u8 bInterval; + + /* NOTE: these two are _only_ in audio endpoints. */ + /* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */ + __u8 bRefresh; + __u8 bSynchAddress; +} __attribute__ ((packed)); + +#define USB_DT_ENDPOINT_SIZE 7 +#define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */ + +/* Used to access common fields */ +struct usb_generic_descriptor { + __u8 bLength; + __u8 bDescriptorType; +}; + +struct __packed usb_class_hid_descriptor { + u8 bLength; + u8 bDescriptorType; + u16 bcdCDC; + u8 bCountryCode; + u8 bNumDescriptors; /* 0x01 */ + u8 bDescriptorType0; + u16 wDescriptorLength0; + /* optional descriptors are not supported. */ +}; + +struct __packed usb_class_report_descriptor { + u8 bLength; /* dummy */ + u8 bDescriptorType; + u16 wLength; + u8 bData[0]; +}; + +/* + * Endpoints + */ +#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */ +#define USB_ENDPOINT_DIR_MASK 0x80 + +#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */ +#define USB_ENDPOINT_XFER_CONTROL 0 +#define USB_ENDPOINT_XFER_ISOC 1 +#define USB_ENDPOINT_XFER_BULK 2 +#define USB_ENDPOINT_XFER_INT 3 +#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80 + +#define USB_ENDPOINT_MAXP_MASK 0x07ff +#define USB_EP_MAXP_MULT_SHIFT 11 +#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT) +#define USB_EP_MAXP_MULT(m) \ + (((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT) + +/* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */ +#define USB_ENDPOINT_INTRTYPE 0x30 +#define USB_ENDPOINT_INTR_PERIODIC (0 << 4) +#define USB_ENDPOINT_INTR_NOTIFICATION (1 << 4) + +#define USB_ENDPOINT_SYNCTYPE 0x0c +#define USB_ENDPOINT_SYNC_NONE (0 << 2) +#define USB_ENDPOINT_SYNC_ASYNC (1 << 2) +#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2) +#define USB_ENDPOINT_SYNC_SYNC (3 << 2) + +#define USB_ENDPOINT_USAGE_MASK 0x30 +#define USB_ENDPOINT_USAGE_DATA 0x00 +#define USB_ENDPOINT_USAGE_FEEDBACK 0x10 +#define USB_ENDPOINT_USAGE_IMPLICIT_FB 0x20 /* Implicit feedback Data endpoint */ + +/*-------------------------------------------------------------------------*/ + +/** + * usb_endpoint_num - get the endpoint's number + * @epd: endpoint to be checked + * + * Returns @epd's number: 0 to 15. + */ +static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd) +{ + return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; +} + +/** + * usb_endpoint_type - get the endpoint's transfer type + * @epd: endpoint to be checked + * + * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according + * to @epd's transfer type. + */ +static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd) +{ + return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; +} + +/** + * usb_endpoint_dir_in - check if the endpoint has IN direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type IN, otherwise it returns false. + */ +static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN); +} + +/** + * usb_endpoint_dir_out - check if the endpoint has OUT direction + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type OUT, otherwise it returns false. + */ +static inline int usb_endpoint_dir_out( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); +} + +/** + * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type bulk, otherwise it returns false. + */ +static inline int usb_endpoint_xfer_bulk( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_BULK); +} + +/** + * usb_endpoint_xfer_control - check if the endpoint has control transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type control, otherwise it returns false. + */ +static inline int usb_endpoint_xfer_control( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_CONTROL); +} + +/** + * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type interrupt, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_int( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_INT); +} + +/** + * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type + * @epd: endpoint to be checked + * + * Returns true if the endpoint is of type isochronous, otherwise it returns + * false. + */ +static inline int usb_endpoint_xfer_isoc( + const struct usb_endpoint_descriptor *epd) +{ + return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_ISOC); +} + +/** + * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has bulk transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_bulk_in( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd); +} + +/** + * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has bulk transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_bulk_out( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd); +} + +/** + * usb_endpoint_is_int_in - check if the endpoint is interrupt IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has interrupt transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_int_in( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd); +} + +/** + * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has interrupt transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_int_out( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd); +} + +/** + * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN + * @epd: endpoint to be checked + * + * Returns true if the endpoint has isochronous transfer type and IN direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_isoc_in( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd); +} + +/** + * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT + * @epd: endpoint to be checked + * + * Returns true if the endpoint has isochronous transfer type and OUT direction, + * otherwise it returns false. + */ +static inline int usb_endpoint_is_isoc_out( + const struct usb_endpoint_descriptor *epd) +{ + return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd); +} + +/** + * usb_endpoint_maxp - get endpoint's max packet size + * @epd: endpoint to be checked + * + * Returns @epd's max packet + */ +static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd) +{ + return __le16_to_cpu(get_unaligned(&epd->wMaxPacketSize)); +} + +/** + * usb_endpoint_maxp_mult - get endpoint's transactional opportunities + * @epd: endpoint to be checked + * + * Return @epd's wMaxPacketSize[12:11] + 1 + */ +static inline int +usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd) +{ + int maxp = __le16_to_cpu(epd->wMaxPacketSize); + + return USB_EP_MAXP_MULT(maxp) + 1; +} + +static inline int usb_endpoint_interrupt_type( + const struct usb_endpoint_descriptor *epd) +{ + return epd->bmAttributes & USB_ENDPOINT_INTRTYPE; +} + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */ +struct usb_ss_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bMaxBurst; + __u8 bmAttributes; + __le16 wBytesPerInterval; +} __attribute__ ((packed)); + +#define USB_DT_SS_EP_COMP_SIZE 6 + +/* Bits 4:0 of bmAttributes if this is a bulk endpoint */ +static inline int +usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp) +{ + int max_streams; + + if (!comp) + return 0; + + max_streams = comp->bmAttributes & 0x1f; + + if (!max_streams) + return 0; + + max_streams = 1 << max_streams; + + return max_streams; +} + +/* Bits 1:0 of bmAttributes if this is an isoc endpoint */ +#define USB_SS_MULT(p) (1 + ((p) & 0x3)) + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */ +struct usb_qualifier_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 bcdUSB; + __u8 bDeviceClass; + __u8 bDeviceSubClass; + __u8 bDeviceProtocol; + __u8 bMaxPacketSize0; + __u8 bNumConfigurations; + __u8 bRESERVED; +} __attribute__ ((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_OTG (from OTG 1.0a supplement) */ +struct usb_otg_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bmAttributes; /* support for HNP, SRP, etc */ +} __attribute__ ((packed)); + +/* from usb_otg_descriptor.bmAttributes */ +#define USB_OTG_SRP (1 << 0) +#define USB_OTG_HNP (1 << 1) /* swap host/device roles */ + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */ +struct usb_debug_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + /* bulk endpoints with 8 byte maxpacket */ + __u8 bDebugInEndpoint; + __u8 bDebugOutEndpoint; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */ +struct usb_interface_assoc_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bFirstInterface; + __u8 bInterfaceCount; + __u8 bFunctionClass; + __u8 bFunctionSubClass; + __u8 bFunctionProtocol; + __u8 iFunction; +} __attribute__ ((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_SECURITY: group of wireless security descriptors, including + * encryption types available for setting up a CC/association. + */ +struct usb_security_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wTotalLength; + __u8 bNumEncryptionTypes; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_KEY: used with {GET,SET}_SECURITY_DATA; only public keys + * may be retrieved. + */ +struct usb_key_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 tTKID[3]; + __u8 bReserved; + __u8 bKeyData[0]; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_ENCRYPTION_TYPE: bundled in DT_SECURITY groups */ +struct usb_encryption_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bEncryptionType; +#define USB_ENC_TYPE_UNSECURE 0 +#define USB_ENC_TYPE_WIRED 1 /* non-wireless mode */ +#define USB_ENC_TYPE_CCM_1 2 /* aes128/cbc session */ +#define USB_ENC_TYPE_RSA_1 3 /* rsa3072/sha1 auth */ + __u8 bEncryptionValue; /* use in SET_ENCRYPTION */ + __u8 bAuthKeyIndex; +} __attribute__((packed)); + + +/*-------------------------------------------------------------------------*/ + +/* USB_DT_BOS: group of device-level capabilities */ +struct usb_bos_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __le16 wTotalLength; + __u8 bNumDeviceCaps; +} __attribute__((packed)); + +#define USB_DT_BOS_SIZE 5 +/*-------------------------------------------------------------------------*/ + +/* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ +struct usb_dev_cap_header { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; +} __attribute__((packed)); + +#define USB_CAP_TYPE_WIRELESS_USB 1 + +struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + + __u8 bmAttributes; +#define USB_WIRELESS_P2P_DRD (1 << 1) +#define USB_WIRELESS_BEACON_MASK (3 << 2) +#define USB_WIRELESS_BEACON_SELF (1 << 2) +#define USB_WIRELESS_BEACON_DIRECTED (2 << 2) +#define USB_WIRELESS_BEACON_NONE (3 << 2) + __le16 wPHYRates; /* bit rates, Mbps */ +#define USB_WIRELESS_PHY_53 (1 << 0) /* always set */ +#define USB_WIRELESS_PHY_80 (1 << 1) +#define USB_WIRELESS_PHY_107 (1 << 2) /* always set */ +#define USB_WIRELESS_PHY_160 (1 << 3) +#define USB_WIRELESS_PHY_200 (1 << 4) /* always set */ +#define USB_WIRELESS_PHY_320 (1 << 5) +#define USB_WIRELESS_PHY_400 (1 << 6) +#define USB_WIRELESS_PHY_480 (1 << 7) + __u8 bmTFITXPowerInfo; /* TFI power levels */ + __u8 bmFFITXPowerInfo; /* FFI power levels */ + __le16 bmBandGroup; + __u8 bReserved; +} __attribute__((packed)); + +/* USB 2.0 Extension descriptor */ +#define USB_CAP_TYPE_EXT 2 + +struct usb_ext_cap_descriptor { /* Link Power Management */ + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __le32 bmAttributes; +#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ +#define USB_BESL_SUPPORT (1 << 2) /* supports BESL */ +#define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/ +#define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */ +#define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8) +#define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12) +} __attribute__((packed)); + +#define USB_DT_USB_EXT_CAP_SIZE 7 + +/* + * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB + * specific device level capabilities + */ +#define USB_SS_CAP_TYPE 3 +struct usb_ss_cap_descriptor { /* Link Power Management */ + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bmAttributes; +#define USB_LTM_SUPPORT (1 << 1) /* supports LTM */ + __le16 wSpeedSupported; +#define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */ +#define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */ +#define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */ +#define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */ + __u8 bFunctionalitySupport; + __u8 bU1devExitLat; + __le16 bU2DevExitLat; +} __attribute__((packed)); + +#define USB_DT_USB_SS_CAP_SIZE 10 + +/* + * Container ID Capability descriptor: Defines the instance unique ID used to + * identify the instance across all operating modes + */ +#define CONTAINER_ID_TYPE 4 +struct usb_ss_container_id_descriptor { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDevCapabilityType; + __u8 bReserved; + __u8 ContainerID[16]; /* 128-bit number */ +} __attribute__((packed)); + +#define USB_DT_USB_SS_CONTN_ID_SIZE 20 +/*-------------------------------------------------------------------------*/ + +/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with + * each endpoint descriptor for a wireless device + */ +struct usb_wireless_ep_comp_descriptor { + __u8 bLength; + __u8 bDescriptorType; + + __u8 bMaxBurst; + __u8 bMaxSequence; + __le16 wMaxStreamDelay; + __le16 wOverTheAirPacketSize; + __u8 bOverTheAirInterval; + __u8 bmCompAttributes; +#define USB_ENDPOINT_SWITCH_MASK 0x03 /* in bmCompAttributes */ +#define USB_ENDPOINT_SWITCH_NO 0 +#define USB_ENDPOINT_SWITCH_SWITCH 1 +#define USB_ENDPOINT_SWITCH_SCALE 2 +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless + * host and a device for connection set up, mutual authentication, and + * exchanging short lived session keys. The handshake depends on a CC. + */ +struct usb_handshake { + __u8 bMessageNumber; + __u8 bStatus; + __u8 tTKID[3]; + __u8 bReserved; + __u8 CDID[16]; + __u8 nonce[16]; + __u8 MIC[8]; +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC). + * A CC may also be set up using non-wireless secure channels (including + * wired USB!), and some devices may support CCs with multiple hosts. + */ +struct usb_connection_context { + __u8 CHID[16]; /* persistent host id */ + __u8 CDID[16]; /* device id (unique w/in host context) */ + __u8 CK[16]; /* connection key */ +} __attribute__((packed)); + +/*-------------------------------------------------------------------------*/ + +/* USB 2.0 defines three speeds, here's how Linux identifies them */ + +enum usb_device_speed { + USB_SPEED_UNKNOWN = 0, /* enumerating */ + USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */ + USB_SPEED_HIGH, /* usb 2.0 */ + USB_SPEED_WIRELESS, /* wireless (usb 2.5) */ + USB_SPEED_SUPER, /* usb 3.0 */ +}; + +#ifdef __KERNEL__ + +/** + * usb_speed_string() - Returns human readable-name of the speed. + * @speed: The speed to return human-readable name for. If it's not + * any of the speeds defined in usb_device_speed enum, string for + * USB_SPEED_UNKNOWN will be returned. + */ +extern const char *usb_speed_string(enum usb_device_speed speed); + +#endif + +enum usb_device_state { + /* NOTATTACHED isn't in the USB spec, and this state acts + * the same as ATTACHED ... but it's clearer this way. + */ + USB_STATE_NOTATTACHED = 0, + + /* chapter 9 and authentication (wireless) device states */ + USB_STATE_ATTACHED, + USB_STATE_POWERED, /* wired */ + USB_STATE_RECONNECTING, /* auth */ + USB_STATE_UNAUTHENTICATED, /* auth */ + USB_STATE_DEFAULT, /* limited function */ + USB_STATE_ADDRESS, + USB_STATE_CONFIGURED, /* most functions */ + + USB_STATE_SUSPENDED + + /* NOTE: there are actually four different SUSPENDED + * states, returning to POWERED, DEFAULT, ADDRESS, or + * CONFIGURED respectively when SOF tokens flow again. + * At this level there's no difference between L1 and L2 + * suspend states. (L2 being original USB 1.1 suspend.) + */ +}; + +enum usb3_link_state { + USB3_LPM_U0 = 0, + USB3_LPM_U1, + USB3_LPM_U2, + USB3_LPM_U3 +}; + +/* + * A U1 timeout of 0x0 means the parent hub will reject any transitions to U1. + * 0xff means the parent hub will accept transitions to U1, but will not + * initiate a transition. + * + * A U1 timeout of 0x1 to 0x7F also causes the hub to initiate a transition to + * U1 after that many microseconds. Timeouts of 0x80 to 0xFE are reserved + * values. + * + * A U2 timeout of 0x0 means the parent hub will reject any transitions to U2. + * 0xff means the parent hub will accept transitions to U2, but will not + * initiate a transition. + * + * A U2 timeout of 0x1 to 0xFE also causes the hub to initiate a transition to + * U2 after N*256 microseconds. Therefore a U2 timeout value of 0x1 means a U2 + * idle timer of 256 microseconds, 0x2 means 512 microseconds, 0xFE means + * 65.024ms. + */ +#define USB3_LPM_DISABLED 0x0 +#define USB3_LPM_U1_MAX_TIMEOUT 0x7F +#define USB3_LPM_U2_MAX_TIMEOUT 0xFE +#define USB3_LPM_DEVICE_INITIATED 0xFF + +struct usb_set_sel_req { + __u8 u1_sel; + __u8 u1_pel; + __le16 u2_sel; + __le16 u2_pel; +} __attribute__ ((packed)); + +/* + * The Set System Exit Latency control transfer provides one byte each for + * U1 SEL and U1 PEL, so the max exit latency is 0xFF. U2 SEL and U2 PEL each + * are two bytes long. + */ +#define USB3_LPM_MAX_U1_SEL_PEL 0xFF +#define USB3_LPM_MAX_U2_SEL_PEL 0xFFFF + +/*-------------------------------------------------------------------------*/ + +/* + * As per USB compliance update, a device that is actively drawing + * more than 100mA from USB must report itself as bus-powered in + * the GetStatus(DEVICE) call. + * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 + */ +#define USB_SELF_POWER_VBUS_MAX_DRAW 100 + +/** + * struct usb_string - wraps a C string and its USB id + * @id:the (nonzero) ID for this string + * @s:the string, in UTF-8 encoding + * + * If you're using usb_gadget_get_string(), use this to wrap a string + * together with its ID. + */ +struct usb_string { + u8 id; + const char *s; +}; + +#endif /* __LINUX_USB_CH9_H */ diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h new file mode 100644 index 0000000..a49a66f --- /dev/null +++ b/include/linux/usb/composite.h @@ -0,0 +1,346 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * composite.h -- framework for usb gadgets which are composite devices + * + * Copyright (C) 2006-2008 David Brownell + */ + +#ifndef __LINUX_USB_COMPOSITE_H +#define __LINUX_USB_COMPOSITE_H + +/* + * This framework is an optional layer on top of the USB Gadget interface, + * making it easier to build (a) Composite devices, supporting multiple + * functions within any single configuration, and (b) Multi-configuration + * devices, also supporting multiple functions but without necessarily + * having more than one function per configuration. + * + * Example: a device with a single configuration supporting both network + * link and mass storage functions is a composite device. Those functions + * might alternatively be packaged in individual configurations, but in + * the composite model the host can use both functions at the same time. + */ + +#include +#include +#include +#include + +/* + * USB function drivers should return USB_GADGET_DELAYED_STATUS if they + * wish to delay the data/status stages of the control transfer till they + * are ready. The control transfer will then be kept from completing till + * all the function drivers that requested for USB_GADGET_DELAYED_STAUS + * invoke usb_composite_setup_continue(). + */ +#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */ + +struct usb_configuration; + +/** + * struct usb_function - describes one function of a configuration + * @name: For diagnostics, identifies the function. + * @strings: tables of strings, keyed by identifiers assigned during bind() + * and by language IDs provided in control requests + * @descriptors: Table of full (or low) speed descriptors, using interface and + * string identifiers assigned during @bind(). If this pointer is null, + * the function will not be available at full speed (or at low speed). + * @hs_descriptors: Table of high speed descriptors, using interface and + * string identifiers assigned during @bind(). If this pointer is null, + * the function will not be available at high speed. + * @config: assigned when @usb_add_function() is called; this is the + * configuration with which this function is associated. + * @bind: Before the gadget can register, all of its functions bind() to the + * available resources including string and interface identifiers used + * in interface or class descriptors; endpoints; I/O buffers; and so on. + * @unbind: Reverses @bind; called as a side effect of unregistering the + * driver which added this function. + * @set_alt: (REQUIRED) Reconfigures altsettings; function drivers may + * initialize usb_ep.driver data at this time (when it is used). + * Note that setting an interface to its current altsetting resets + * interface state, and that all interfaces have a disabled state. + * @get_alt: Returns the active altsetting. If this is not provided, + * then only altsetting zero is supported. + * @disable: (REQUIRED) Indicates the function should be disabled. Reasons + * include host resetting or reconfiguring the gadget, and disconnection. + * @setup: Used for interface-specific control requests. + * @suspend: Notifies functions when the host stops sending USB traffic. + * @resume: Notifies functions when the host restarts USB traffic. + * + * A single USB function uses one or more interfaces, and should in most + * cases support operation at both full and high speeds. Each function is + * associated by @usb_add_function() with a one configuration; that function + * causes @bind() to be called so resources can be allocated as part of + * setting up a gadget driver. Those resources include endpoints, which + * should be allocated using @usb_ep_autoconfig(). + * + * To support dual speed operation, a function driver provides descriptors + * for both high and full speed operation. Except in rare cases that don't + * involve bulk endpoints, each speed needs different endpoint descriptors. + * + * Function drivers choose their own strategies for managing instance data. + * The simplest strategy just declares it "static', which means the function + * can only be activated once. If the function needs to be exposed in more + * than one configuration at a given speed, it needs to support multiple + * usb_function structures (one for each configuration). + * + * A more complex strategy might encapsulate a @usb_function structure inside + * a driver-specific instance structure to allows multiple activations. An + * example of multiple activations might be a CDC ACM function that supports + * two or more distinct instances within the same configuration, providing + * several independent logical data links to a USB host. + */ +struct usb_function { + const char *name; + struct usb_gadget_strings **strings; + struct usb_descriptor_header **descriptors; + struct usb_descriptor_header **hs_descriptors; + + struct usb_configuration *config; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching. + * Related: unbind() may kfree() but bind() won't... + */ + + /* configuration management: bind/unbind */ + int (*bind)(struct usb_configuration *, + struct usb_function *); + void (*unbind)(struct usb_configuration *, + struct usb_function *); + + /* runtime state management */ + int (*set_alt)(struct usb_function *, + unsigned interface, unsigned alt); + int (*get_alt)(struct usb_function *, + unsigned interface); + void (*disable)(struct usb_function *); + int (*setup)(struct usb_function *, + const struct usb_ctrlrequest *); + void (*suspend)(struct usb_function *); + void (*resume)(struct usb_function *); + + /* private: */ + /* internals */ + struct list_head list; + DECLARE_BITMAP(endpoints, 32); +}; + +int usb_add_function(struct usb_configuration *, struct usb_function *); + +int usb_function_deactivate(struct usb_function *); +int usb_function_activate(struct usb_function *); + +int usb_interface_id(struct usb_configuration *, struct usb_function *); + +/** + * ep_choose - select descriptor endpoint at current device speed + * @g: gadget, connected and running at some speed + * @hs: descriptor to use for high speed operation + * @fs: descriptor to use for full or low speed operation + */ +static inline struct usb_endpoint_descriptor * +ep_choose(struct usb_gadget *g, struct usb_endpoint_descriptor *hs, + struct usb_endpoint_descriptor *fs) +{ + if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) + return hs; + return fs; +} + +#define MAX_CONFIG_INTERFACES 16 /* arbitrary; max 255 */ + +/** + * struct usb_configuration - represents one gadget configuration + * @label: For diagnostics, describes the configuration. + * @strings: Tables of strings, keyed by identifiers assigned during @bind() + * and by language IDs provided in control requests. + * @descriptors: Table of descriptors preceding all function descriptors. + * Examples include OTG and vendor-specific descriptors. + * @bind: Called from @usb_add_config() to allocate resources unique to this + * configuration and to call @usb_add_function() for each function used. + * @unbind: Reverses @bind; called as a side effect of unregistering the + * driver which added this configuration. + * @setup: Used to delegate control requests that aren't handled by standard + * device infrastructure or directed at a specific interface. + * @bConfigurationValue: Copied into configuration descriptor. + * @iConfiguration: Copied into configuration descriptor. + * @bmAttributes: Copied into configuration descriptor. + * @bMaxPower: Copied into configuration descriptor. + * @cdev: assigned by @usb_add_config() before calling @bind(); this is + * the device associated with this configuration. + * + * Configurations are building blocks for gadget drivers structured around + * function drivers. Simple USB gadgets require only one function and one + * configuration, and handle dual-speed hardware by always providing the same + * functionality. Slightly more complex gadgets may have more than one + * single-function configuration at a given speed; or have configurations + * that only work at one speed. + * + * Composite devices are, by definition, ones with configurations which + * include more than one function. + * + * The lifecycle of a usb_configuration includes allocation, initialization + * of the fields described above, and calling @usb_add_config() to set up + * internal data and bind it to a specific device. The configuration's + * @bind() method is then used to initialize all the functions and then + * call @usb_add_function() for them. + * + * Those functions would normally be independant of each other, but that's + * not mandatory. CDC WMC devices are an example where functions often + * depend on other functions, with some functions subsidiary to others. + * Such interdependency may be managed in any way, so long as all of the + * descriptors complete by the time the composite driver returns from + * its bind() routine. + */ +struct usb_configuration { + const char *label; + struct usb_gadget_strings **strings; + const struct usb_descriptor_header **descriptors; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching... + */ + + /* configuration management: bind/unbind */ + int (*bind)(struct usb_configuration *); + void (*unbind)(struct usb_configuration *); + int (*setup)(struct usb_configuration *, + const struct usb_ctrlrequest *); + + /* fields in the config descriptor */ + u8 bConfigurationValue; + u8 iConfiguration; + u8 bmAttributes; + u8 bMaxPower; + + struct usb_composite_dev *cdev; + + /* private: */ + /* internals */ + struct list_head list; + struct list_head functions; + u8 next_interface_id; + unsigned highspeed:1; + unsigned fullspeed:1; + struct usb_function *interface[MAX_CONFIG_INTERFACES]; +}; + +int usb_add_config(struct usb_composite_dev *, + struct usb_configuration *); + +/** + * struct usb_composite_driver - groups configurations into a gadget + * @name: For diagnostics, identifies the driver. + * @dev: Template descriptor for the device, including default device + * identifiers. + * @strings: tables of strings, keyed by identifiers assigned during bind() + * and language IDs provided in control requests + * @bind: (REQUIRED) Used to allocate resources that are shared across the + * whole device, such as string IDs, and add its configurations using + * @usb_add_config(). This may fail by returning a negative errno + * value; it should return zero on successful initialization. + * @unbind: Reverses @bind(); called as a side effect of unregistering + * this driver. + * @disconnect: optional driver disconnect method + * @suspend: Notifies when the host stops sending USB traffic, + * after function notifications + * @resume: Notifies configuration when the host restarts USB traffic, + * before function notifications + * + * Devices default to reporting self powered operation. Devices which rely + * on bus powered operation should report this in their @bind() method. + * + * Before returning from @bind, various fields in the template descriptor + * may be overridden. These include the idVendor/idProduct/bcdDevice values + * normally to bind the appropriate host side driver, and the three strings + * (iManufacturer, iProduct, iSerialNumber) normally used to provide user + * meaningful device identifiers. (The strings will not be defined unless + * they are defined in @dev and @strings.) The correct ep0 maxpacket size + * is also reported, as defined by the underlying controller driver. + */ +struct usb_composite_driver { + const char *name; + const struct usb_device_descriptor *dev; + struct usb_gadget_strings **strings; + + /* REVISIT: bind() functions can be marked __init, which + * makes trouble for section mismatch analysis. See if + * we can't restructure things to avoid mismatching... + */ + + int (*bind)(struct usb_composite_dev *); + int (*unbind)(struct usb_composite_dev *); + + void (*disconnect)(struct usb_composite_dev *); + + /* global suspend hooks */ + void (*suspend)(struct usb_composite_dev *); + void (*resume)(struct usb_composite_dev *); +}; + +extern int usb_composite_register(struct usb_composite_driver *); +extern void usb_composite_unregister(struct usb_composite_driver *); + + +/** + * struct usb_composite_device - represents one composite usb gadget + * @gadget: read-only, abstracts the gadget's usb peripheral controller + * @req: used for control responses; buffer is pre-allocated + * @bufsiz: size of buffer pre-allocated in @req + * @config: the currently active configuration + * + * One of these devices is allocated and initialized before the + * associated device driver's bind() is called. + * + * OPEN ISSUE: it appears that some WUSB devices will need to be + * built by combining a normal (wired) gadget with a wireless one. + * This revision of the gadget framework should probably try to make + * sure doing that won't hurt too much. + * + * One notion for how to handle Wireless USB devices involves: + * (a) a second gadget here, discovery mechanism TBD, but likely + * needing separate "register/unregister WUSB gadget" calls; + * (b) updates to usb_gadget to include flags "is it wireless", + * "is it wired", plus (presumably in a wrapper structure) + * bandgroup and PHY info; + * (c) presumably a wireless_ep wrapping a usb_ep, and reporting + * wireless-specific parameters like maxburst and maxsequence; + * (d) configurations that are specific to wireless links; + * (e) function drivers that understand wireless configs and will + * support wireless for (additional) function instances; + * (f) a function to support association setup (like CBAF), not + * necessarily requiring a wireless adapter; + * (g) composite device setup that can create one or more wireless + * configs, including appropriate association setup support; + * (h) more, TBD. + */ +struct usb_composite_dev { + struct usb_gadget *gadget; + struct usb_request *req; + unsigned bufsiz; + + struct usb_configuration *config; + + /* private: */ + /* internals */ + unsigned int suspended:1; + struct usb_device_descriptor __aligned(CONFIG_SYS_CACHELINE_SIZE) desc; + struct list_head configs; + struct usb_composite_driver *driver; + u8 next_string_id; + + /* the gadget driver won't enable the data pullup + * while the deactivation count is nonzero. + */ + unsigned deactivations; +}; + +extern int usb_string_id(struct usb_composite_dev *c); +extern int usb_string_ids_tab(struct usb_composite_dev *c, + struct usb_string *str); +extern int usb_string_ids_n(struct usb_composite_dev *c, unsigned n); + +#endif /* __LINUX_USB_COMPOSITE_H */ diff --git a/include/linux/usb/dwc3-omap.h b/include/linux/usb/dwc3-omap.h new file mode 100644 index 0000000..62180e3 --- /dev/null +++ b/include/linux/usb/dwc3-omap.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* include/linux/usb/dwc3-omap.h + * + * Copyright (c) 2014 Texas Instruments Incorporated - http://www.ti.com + * + * Designware SuperSpeed Glue + */ + +#ifndef __DWC3_OMAP_H_ +#define __DWC3_OMAP_H_ + +enum dwc3_omap_utmi_mode { + DWC3_OMAP_UTMI_MODE_UNKNOWN = 0, + DWC3_OMAP_UTMI_MODE_HW, + DWC3_OMAP_UTMI_MODE_SW, +}; + +#endif /* __DWC3_OMAP_H_ */ diff --git a/include/linux/usb/dwc3.h b/include/linux/usb/dwc3.h new file mode 100644 index 0000000..9ceee0a --- /dev/null +++ b/include/linux/usb/dwc3.h @@ -0,0 +1,223 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* include/linux/usb/dwc3.h + * + * Copyright (c) 2012 Samsung Electronics Co. Ltd + * + * Designware SuperSpeed USB 3.0 DRD Controller global and OTG registers + */ + +#ifndef __DWC3_H_ +#define __DWC3_H_ + +/* Global constants */ +#define DWC3_ENDPOINTS_NUM 32 + +#define DWC3_EVENT_BUFFERS_SIZE PAGE_SIZE +#define DWC3_EVENT_TYPE_MASK 0xfe + +#define DWC3_EVENT_TYPE_DEV 0 +#define DWC3_EVENT_TYPE_CARKIT 3 +#define DWC3_EVENT_TYPE_I2C 4 + +#define DWC3_DEVICE_EVENT_DISCONNECT 0 +#define DWC3_DEVICE_EVENT_RESET 1 +#define DWC3_DEVICE_EVENT_CONNECT_DONE 2 +#define DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE 3 +#define DWC3_DEVICE_EVENT_WAKEUP 4 +#define DWC3_DEVICE_EVENT_EOPF 6 +#define DWC3_DEVICE_EVENT_SOF 7 +#define DWC3_DEVICE_EVENT_ERRATIC_ERROR 9 +#define DWC3_DEVICE_EVENT_CMD_CMPL 10 +#define DWC3_DEVICE_EVENT_OVERFLOW 11 + +#define DWC3_GEVNTCOUNT_MASK 0xfffc +#define DWC3_GSNPSID_MASK 0xffff0000 +#define DWC3_GSNPSID_SHIFT 16 +#define DWC3_GSNPSREV_MASK 0xffff + +#define DWC3_REVISION_MASK 0xffff + +#define DWC3_REG_OFFSET 0xC100 + +struct g_event_buffer { + u32 g_evntadrlo; + u32 g_evntadrhi; + u32 g_evntsiz; + u32 g_evntcount; +}; + +struct d_physical_endpoint { + u32 d_depcmdpar2; + u32 d_depcmdpar1; + u32 d_depcmdpar0; + u32 d_depcmd; +}; + +struct dwc3 { /* offset: 0xC100 */ + u32 g_sbuscfg0; + u32 g_sbuscfg1; + u32 g_txthrcfg; + u32 g_rxthrcfg; + u32 g_ctl; + + u32 reserved1; + + u32 g_sts; + + u32 reserved2; + + u32 g_snpsid; + u32 g_gpio; + u32 g_uid; + u32 g_uctl; + u64 g_buserraddr; + u64 g_prtbimap; + + u32 g_hwparams0; + u32 g_hwparams1; + u32 g_hwparams2; + u32 g_hwparams3; + u32 g_hwparams4; + u32 g_hwparams5; + u32 g_hwparams6; + u32 g_hwparams7; + + u32 g_dbgfifospace; + u32 g_dbgltssm; + u32 g_dbglnmcc; + u32 g_dbgbmu; + u32 g_dbglspmux; + u32 g_dbglsp; + u32 g_dbgepinfo0; + u32 g_dbgepinfo1; + + u64 g_prtbimap_hs; + u64 g_prtbimap_fs; + + u32 reserved3[28]; + + u32 g_usb2phycfg[16]; + u32 g_usb2i2cctl[16]; + u32 g_usb2phyacc[16]; + u32 g_usb3pipectl[16]; + + u32 g_txfifosiz[32]; + u32 g_rxfifosiz[32]; + + struct g_event_buffer g_evnt_buf[32]; + + u32 g_hwparams8; + + u32 reserved4[11]; + + u32 g_fladj; + + u32 reserved5[51]; + + u32 d_cfg; + u32 d_ctl; + u32 d_evten; + u32 d_sts; + u32 d_gcmdpar; + u32 d_gcmd; + + u32 reserved6[2]; + + u32 d_alepena; + + u32 reserved7[55]; + + struct d_physical_endpoint d_phy_ep_cmd[32]; + + u32 reserved8[128]; + + u32 o_cfg; + u32 o_ctl; + u32 o_evt; + u32 o_evten; + u32 o_sts; + + u32 reserved9[3]; + + u32 adp_cfg; + u32 adp_ctl; + u32 adp_evt; + u32 adp_evten; + + u32 bc_cfg; + + u32 reserved10; + + u32 bc_evt; + u32 bc_evten; +}; + +/* Global Configuration Register */ +#define DWC3_GCTL_PWRDNSCALE(n) ((n) << 19) +#define DWC3_GCTL_U2RSTECN (1 << 16) +#define DWC3_GCTL_RAMCLKSEL(x) \ + (((x) & DWC3_GCTL_CLK_MASK) << 6) +#define DWC3_GCTL_CLK_BUS (0) +#define DWC3_GCTL_CLK_PIPE (1) +#define DWC3_GCTL_CLK_PIPEHALF (2) +#define DWC3_GCTL_CLK_MASK (3) +#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12) +#define DWC3_GCTL_PRTCAPDIR(n) ((n) << 12) +#define DWC3_GCTL_PRTCAP_HOST 1 +#define DWC3_GCTL_PRTCAP_DEVICE 2 +#define DWC3_GCTL_PRTCAP_OTG 3 +#define DWC3_GCTL_CORESOFTRESET (1 << 11) +#define DWC3_GCTL_SCALEDOWN(n) ((n) << 4) +#define DWC3_GCTL_SCALEDOWN_MASK DWC3_GCTL_SCALEDOWN(3) +#define DWC3_GCTL_DISSCRAMBLE (1 << 3) +#define DWC3_GCTL_DSBLCLKGTNG (1 << 0) + +/* Global HWPARAMS1 Register */ +#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24) +#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0 +#define DWC3_GHWPARAMS1_EN_PWROPT_CLK 1 + +/* Global USB2 PHY Configuration Register */ +#define DWC3_GUSB2PHYCFG_PHYSOFTRST (1 << 31) +#define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS (1 << 30) +#define DWC3_GUSB2PHYCFG_ENBLSLPM (1 << 8) +#define DWC3_GUSB2PHYCFG_SUSPHY (1 << 6) +#define DWC3_GUSB2PHYCFG_PHYIF (1 << 3) + +/* Global USB2 PHY Configuration Mask */ +#define DWC3_GUSB2PHYCFG_USBTRDTIM_MASK (0xf << 10) + +/* Global USB2 PHY Configuration Offset */ +#define DWC3_GUSB2PHYCFG_USBTRDTIM_OFFSET 10 + +#define DWC3_GUSB2PHYCFG_USBTRDTIM_16BIT (0x5 << \ + DWC3_GUSB2PHYCFG_USBTRDTIM_OFFSET) +#define DWC3_GUSB2PHYCFG_USBTRDTIM_8BIT (0x9 << \ + DWC3_GUSB2PHYCFG_USBTRDTIM_OFFSET) + +/* Global USB3 PIPE Control Register */ +#define DWC3_GUSB3PIPECTL_PHYSOFTRST (1 << 31) +#define DWC3_GUSB3PIPECTL_DISRXDETP3 (1 << 28) +#define DWC3_GUSB3PIPECTL_SUSPHY (1 << 17) + +/* Global TX Fifo Size Register */ +#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff) +#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000) + +/* Device Control Register */ +#define DWC3_DCTL_RUN_STOP (1 << 31) +#define DWC3_DCTL_CSFTRST (1 << 30) +#define DWC3_DCTL_LSFTRST (1 << 29) + +/* Global Frame Length Adjustment Register */ +#define GFLADJ_30MHZ_REG_SEL (1 << 7) +#define GFLADJ_30MHZ(n) ((n) & 0x3f) +#define GFLADJ_30MHZ_DEFAULT 0x20 + +#ifdef CONFIG_USB_XHCI_DWC3 +void dwc3_set_mode(struct dwc3 *dwc3_reg, u32 mode); +void dwc3_core_soft_reset(struct dwc3 *dwc3_reg); +int dwc3_core_init(struct dwc3 *dwc3_reg); +void dwc3_set_fladj(struct dwc3 *dwc3_reg, u32 val); +#endif +#endif /* __DWC3_H_ */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h new file mode 100644 index 0000000..497798a --- /dev/null +++ b/include/linux/usb/gadget.h @@ -0,0 +1,947 @@ +/* + * + * + * We call the USB code inside a Linux-based peripheral device a "gadget" + * driver, except for the hardware-specific bus glue. One USB host can + * master many USB gadgets, but the gadgets are only slaved to one host. + * + * + * (C) Copyright 2002-2004 by David Brownell + * All Rights Reserved. + * + * This software is licensed under the GNU GPL version 2. + * + * Ported to U-Boot by: Thomas Smits and + * Remy Bohmer + */ + +#ifndef __LINUX_USB_GADGET_H +#define __LINUX_USB_GADGET_H + +#include +#include +#include +#include + +struct usb_ep; + +/** + * struct usb_request - describes one i/o request + * @buf: Buffer used for data. Always provide this; some controllers + * only use PIO, or don't use DMA for some endpoints. + * @dma: DMA address corresponding to 'buf'. If you don't set this + * field, and the usb controller needs one, it is responsible + * for mapping and unmapping the buffer. + * @stream_id: The stream id, when USB3.0 bulk streams are being used + * @length: Length of that data + * @no_interrupt: If true, hints that no completion irq is needed. + * Helpful sometimes with deep request queues that are handled + * directly by DMA controllers. + * @zero: If true, when writing data, makes the last packet be "short" + * by adding a zero length packet as needed; + * @short_not_ok: When reading data, makes short packets be + * treated as errors (queue stops advancing till cleanup). + * @complete: Function called when request completes, so this request and + * its buffer may be re-used. + * Reads terminate with a short packet, or when the buffer fills, + * whichever comes first. When writes terminate, some data bytes + * will usually still be in flight (often in a hardware fifo). + * Errors (for reads or writes) stop the queue from advancing + * until the completion function returns, so that any transfers + * invalidated by the error may first be dequeued. + * @context: For use by the completion callback + * @list: For use by the gadget driver. + * @status: Reports completion code, zero or a negative errno. + * Normally, faults block the transfer queue from advancing until + * the completion callback returns. + * Code "-ESHUTDOWN" indicates completion caused by device disconnect, + * or when the driver disabled the endpoint. + * @actual: Reports bytes transferred to/from the buffer. For reads (OUT + * transfers) this may be less than the requested length. If the + * short_not_ok flag is set, short reads are treated as errors + * even when status otherwise indicates successful completion. + * Note that for writes (IN transfers) some data bytes may still + * reside in a device-side FIFO when the request is reported as + * complete. + * + * These are allocated/freed through the endpoint they're used with. The + * hardware's driver can add extra per-request data to the memory it returns, + * which often avoids separate memory allocations (potential failures), + * later when the request is queued. + * + * Request flags affect request handling, such as whether a zero length + * packet is written (the "zero" flag), whether a short read should be + * treated as an error (blocking request queue advance, the "short_not_ok" + * flag), or hinting that an interrupt is not required (the "no_interrupt" + * flag, for use with deep request queues). + * + * Bulk endpoints can use any size buffers, and can also be used for interrupt + * transfers. interrupt-only endpoints can be much less functional. + * + * NOTE: this is analagous to 'struct urb' on the host side, except that + * it's thinner and promotes more pre-allocation. + */ + +struct usb_request { + void *buf; + unsigned length; + dma_addr_t dma; + + unsigned stream_id:16; + unsigned no_interrupt:1; + unsigned zero:1; + unsigned short_not_ok:1; + + void (*complete)(struct usb_ep *ep, + struct usb_request *req); + void *context; + struct list_head list; + + int status; + unsigned actual; +}; + +/*-------------------------------------------------------------------------*/ + +/* endpoint-specific parts of the api to the usb controller hardware. + * unlike the urb model, (de)multiplexing layers are not required. + * (so this api could slash overhead if used on the host side...) + * + * note that device side usb controllers commonly differ in how many + * endpoints they support, as well as their capabilities. + */ +struct usb_ep_ops { + int (*enable) (struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc); + int (*disable) (struct usb_ep *ep); + + struct usb_request *(*alloc_request) (struct usb_ep *ep, + gfp_t gfp_flags); + void (*free_request) (struct usb_ep *ep, struct usb_request *req); + + int (*queue) (struct usb_ep *ep, struct usb_request *req, + gfp_t gfp_flags); + int (*dequeue) (struct usb_ep *ep, struct usb_request *req); + + int (*set_halt) (struct usb_ep *ep, int value); + int (*set_wedge)(struct usb_ep *ep); + int (*fifo_status) (struct usb_ep *ep); + void (*fifo_flush) (struct usb_ep *ep); +}; + +/** + * struct usb_ep - device side representation of USB endpoint + * @name:identifier for the endpoint, such as "ep-a" or "ep9in-bulk" + * @ops: Function pointers used to access hardware-specific operations. + * @ep_list:the gadget's ep_list holds all of its endpoints + * @maxpacket:The maximum packet size used on this endpoint. The initial + * value can sometimes be reduced (hardware allowing), according to + * the endpoint descriptor used to configure the endpoint. + * @maxpacket_limit:The maximum packet size value which can be handled by this + * endpoint. It's set once by UDC driver when endpoint is initialized, and + * should not be changed. Should not be confused with maxpacket. + * @max_streams: The maximum number of streams supported + * by this EP (0 - 16, actual number is 2^n) + * @maxburst: the maximum number of bursts supported by this EP (for usb3) + * @driver_data:for use by the gadget driver. all other fields are + * read-only to gadget drivers. + * @desc: endpoint descriptor. This pointer is set before the endpoint is + * enabled and remains valid until the endpoint is disabled. + * @comp_desc: In case of SuperSpeed support, this is the endpoint companion + * descriptor that is used to configure the endpoint + * + * the bus controller driver lists all the general purpose endpoints in + * gadget->ep_list. the control endpoint (gadget->ep0) is not in that list, + * and is accessed only in response to a driver setup() callback. + */ +struct usb_ep { + void *driver_data; + const char *name; + const struct usb_ep_ops *ops; + struct list_head ep_list; + unsigned maxpacket:16; + unsigned maxpacket_limit:16; + unsigned max_streams:16; + unsigned maxburst:5; + const struct usb_endpoint_descriptor *desc; + const struct usb_ss_ep_comp_descriptor *comp_desc; +}; + +/*-------------------------------------------------------------------------*/ + +/** + * usb_ep_set_maxpacket_limit - set maximum packet size limit for endpoint + * @ep:the endpoint being configured + * @maxpacket_limit:value of maximum packet size limit + * + * This function shoud be used only in UDC drivers to initialize endpoint + * (usually in probe function). + */ +static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep, + unsigned maxpacket_limit) +{ + ep->maxpacket_limit = maxpacket_limit; + ep->maxpacket = maxpacket_limit; +} + +/** + * usb_ep_enable - configure endpoint, making it usable + * @ep:the endpoint being configured. may not be the endpoint named "ep0". + * drivers discover endpoints through the ep_list of a usb_gadget. + * @desc:descriptor for desired behavior. caller guarantees this pointer + * remains valid until the endpoint is disabled; the data byte order + * is little-endian (usb-standard). + * + * when configurations are set, or when interface settings change, the driver + * will enable or disable the relevant endpoints. while it is enabled, an + * endpoint may be used for i/o until the driver receives a disconnect() from + * the host or until the endpoint is disabled. + * + * the ep0 implementation (which calls this routine) must ensure that the + * hardware capabilities of each endpoint match the descriptor provided + * for it. for example, an endpoint named "ep2in-bulk" would be usable + * for interrupt transfers as well as bulk, but it likely couldn't be used + * for iso transfers or for endpoint 14. some endpoints are fully + * configurable, with more generic names like "ep-a". (remember that for + * USB, "in" means "towards the USB master".) + * + * returns zero, or a negative error code. + */ +static inline int usb_ep_enable(struct usb_ep *ep, + const struct usb_endpoint_descriptor *desc) +{ + return ep->ops->enable(ep, desc); +} + +/** + * usb_ep_disable - endpoint is no longer usable + * @ep:the endpoint being unconfigured. may not be the endpoint named "ep0". + * + * no other task may be using this endpoint when this is called. + * any pending and uncompleted requests will complete with status + * indicating disconnect (-ESHUTDOWN) before this call returns. + * gadget drivers must call usb_ep_enable() again before queueing + * requests to the endpoint. + * + * returns zero, or a negative error code. + */ +static inline int usb_ep_disable(struct usb_ep *ep) +{ + return ep->ops->disable(ep); +} + +/** + * usb_ep_alloc_request - allocate a request object to use with this endpoint + * @ep:the endpoint to be used with with the request + * @gfp_flags:GFP_* flags to use + * + * Request objects must be allocated with this call, since they normally + * need controller-specific setup and may even need endpoint-specific + * resources such as allocation of DMA descriptors. + * Requests may be submitted with usb_ep_queue(), and receive a single + * completion callback. Free requests with usb_ep_free_request(), when + * they are no longer needed. + * + * Returns the request, or null if one could not be allocated. + */ +static inline struct usb_request *usb_ep_alloc_request(struct usb_ep *ep, + gfp_t gfp_flags) +{ + return ep->ops->alloc_request(ep, gfp_flags); +} + +/** + * usb_ep_free_request - frees a request object + * @ep:the endpoint associated with the request + * @req:the request being freed + * + * Reverses the effect of usb_ep_alloc_request(). + * Caller guarantees the request is not queued, and that it will + * no longer be requeued (or otherwise used). + */ +static inline void usb_ep_free_request(struct usb_ep *ep, + struct usb_request *req) +{ + ep->ops->free_request(ep, req); +} + +/** + * usb_ep_queue - queues (submits) an I/O request to an endpoint. + * @ep:the endpoint associated with the request + * @req:the request being submitted + * @gfp_flags: GFP_* flags to use in case the lower level driver couldn't + * pre-allocate all necessary memory with the request. + * + * This tells the device controller to perform the specified request through + * that endpoint (reading or writing a buffer). When the request completes, + * including being canceled by usb_ep_dequeue(), the request's completion + * routine is called to return the request to the driver. Any endpoint + * (except control endpoints like ep0) may have more than one transfer + * request queued; they complete in FIFO order. Once a gadget driver + * submits a request, that request may not be examined or modified until it + * is given back to that driver through the completion callback. + * + * Each request is turned into one or more packets. The controller driver + * never merges adjacent requests into the same packet. OUT transfers + * will sometimes use data that's already buffered in the hardware. + * Drivers can rely on the fact that the first byte of the request's buffer + * always corresponds to the first byte of some USB packet, for both + * IN and OUT transfers. + * + * Bulk endpoints can queue any amount of data; the transfer is packetized + * automatically. The last packet will be short if the request doesn't fill it + * out completely. Zero length packets (ZLPs) should be avoided in portable + * protocols since not all usb hardware can successfully handle zero length + * packets. (ZLPs may be explicitly written, and may be implicitly written if + * the request 'zero' flag is set.) Bulk endpoints may also be used + * for interrupt transfers; but the reverse is not true, and some endpoints + * won't support every interrupt transfer. (Such as 768 byte packets.) + * + * Interrupt-only endpoints are less functional than bulk endpoints, for + * example by not supporting queueing or not handling buffers that are + * larger than the endpoint's maxpacket size. They may also treat data + * toggle differently. + * + * Control endpoints ... after getting a setup() callback, the driver queues + * one response (even if it would be zero length). That enables the + * status ack, after transfering data as specified in the response. Setup + * functions may return negative error codes to generate protocol stalls. + * (Note that some USB device controllers disallow protocol stall responses + * in some cases.) When control responses are deferred (the response is + * written after the setup callback returns), then usb_ep_set_halt() may be + * used on ep0 to trigger protocol stalls. + * + * For periodic endpoints, like interrupt or isochronous ones, the usb host + * arranges to poll once per interval, and the gadget driver usually will + * have queued some data to transfer at that time. + * + * Returns zero, or a negative error code. Endpoints that are not enabled + * report errors; errors will also be + * reported when the usb peripheral is disconnected. + */ +static inline int usb_ep_queue(struct usb_ep *ep, + struct usb_request *req, gfp_t gfp_flags) +{ + return ep->ops->queue(ep, req, gfp_flags); +} + +/** + * usb_ep_dequeue - dequeues (cancels, unlinks) an I/O request from an endpoint + * @ep:the endpoint associated with the request + * @req:the request being canceled + * + * if the request is still active on the endpoint, it is dequeued and its + * completion routine is called (with status -ECONNRESET); else a negative + * error code is returned. + * + * note that some hardware can't clear out write fifos (to unlink the request + * at the head of the queue) except as part of disconnecting from usb. such + * restrictions prevent drivers from supporting configuration changes, + * even to configuration zero (a "chapter 9" requirement). + */ +static inline int usb_ep_dequeue(struct usb_ep *ep, struct usb_request *req) +{ + return ep->ops->dequeue(ep, req); +} + +/** + * usb_ep_set_halt - sets the endpoint halt feature. + * @ep: the non-isochronous endpoint being stalled + * + * Use this to stall an endpoint, perhaps as an error report. + * Except for control endpoints, + * the endpoint stays halted (will not stream any data) until the host + * clears this feature; drivers may need to empty the endpoint's request + * queue first, to make sure no inappropriate transfers happen. + * + * Note that while an endpoint CLEAR_FEATURE will be invisible to the + * gadget driver, a SET_INTERFACE will not be. To reset endpoints for the + * current altsetting, see usb_ep_clear_halt(). When switching altsettings, + * it's simplest to use usb_ep_enable() or usb_ep_disable() for the endpoints. + * + * Returns zero, or a negative error code. On success, this call sets + * underlying hardware state that blocks data transfers. + * Attempts to halt IN endpoints will fail (returning -EAGAIN) if any + * transfer requests are still queued, or if the controller hardware + * (usually a FIFO) still holds bytes that the host hasn't collected. + */ +static inline int usb_ep_set_halt(struct usb_ep *ep) +{ + return ep->ops->set_halt(ep, 1); +} + +/** + * usb_ep_clear_halt - clears endpoint halt, and resets toggle + * @ep:the bulk or interrupt endpoint being reset + * + * Use this when responding to the standard usb "set interface" request, + * for endpoints that aren't reconfigured, after clearing any other state + * in the endpoint's i/o queue. + * + * Returns zero, or a negative error code. On success, this call clears + * the underlying hardware state reflecting endpoint halt and data toggle. + * Note that some hardware can't support this request (like pxa2xx_udc), + * and accordingly can't correctly implement interface altsettings. + */ +static inline int usb_ep_clear_halt(struct usb_ep *ep) +{ + return ep->ops->set_halt(ep, 0); +} + +/** + * usb_ep_fifo_status - returns number of bytes in fifo, or error + * @ep: the endpoint whose fifo status is being checked. + * + * FIFO endpoints may have "unclaimed data" in them in certain cases, + * such as after aborted transfers. Hosts may not have collected all + * the IN data written by the gadget driver (and reported by a request + * completion). The gadget driver may not have collected all the data + * written OUT to it by the host. Drivers that need precise handling for + * fault reporting or recovery may need to use this call. + * + * This returns the number of such bytes in the fifo, or a negative + * errno if the endpoint doesn't use a FIFO or doesn't support such + * precise handling. + */ +static inline int usb_ep_fifo_status(struct usb_ep *ep) +{ + if (ep->ops->fifo_status) + return ep->ops->fifo_status(ep); + else + return -EOPNOTSUPP; +} + +/** + * usb_ep_fifo_flush - flushes contents of a fifo + * @ep: the endpoint whose fifo is being flushed. + * + * This call may be used to flush the "unclaimed data" that may exist in + * an endpoint fifo after abnormal transaction terminations. The call + * must never be used except when endpoint is not being used for any + * protocol translation. + */ +static inline void usb_ep_fifo_flush(struct usb_ep *ep) +{ + if (ep->ops->fifo_flush) + ep->ops->fifo_flush(ep); +} + + +/*-------------------------------------------------------------------------*/ + +struct usb_gadget; +struct usb_gadget_driver; + +/* the rest of the api to the controller hardware: device operations, + * which don't involve endpoints (or i/o). + */ +struct usb_gadget_ops { + int (*get_frame)(struct usb_gadget *); + int (*wakeup)(struct usb_gadget *); + int (*set_selfpowered) (struct usb_gadget *, int is_selfpowered); + int (*vbus_session) (struct usb_gadget *, int is_active); + int (*vbus_draw) (struct usb_gadget *, unsigned mA); + int (*pullup) (struct usb_gadget *, int is_on); + int (*ioctl)(struct usb_gadget *, + unsigned code, unsigned long param); + int (*udc_start)(struct usb_gadget *, + struct usb_gadget_driver *); + int (*udc_stop)(struct usb_gadget *); +}; + +/** + * struct usb_gadget - represents a usb slave device + * @ops: Function pointers used to access hardware-specific operations. + * @ep0: Endpoint zero, used when reading or writing responses to + * driver setup() requests + * @ep_list: List of other endpoints supported by the device. + * @speed: Speed of current connection to USB host. + * @max_speed: Maximal speed the UDC can handle. UDC must support this + * and all slower speeds. + * @is_dualspeed: true if the controller supports both high and full speed + * operation. If it does, the gadget driver must also support both. + * @is_otg: true if the USB device port uses a Mini-AB jack, so that the + * gadget driver must provide a USB OTG descriptor. + * @is_a_peripheral: false unless is_otg, the "A" end of a USB cable + * is in the Mini-AB jack, and HNP has been used to switch roles + * so that the "A" device currently acts as A-Peripheral, not A-Host. + * @a_hnp_support: OTG device feature flag, indicating that the A-Host + * supports HNP at this port. + * @a_alt_hnp_support: OTG device feature flag, indicating that the A-Host + * only supports HNP on a different root port. + * @b_hnp_enable: OTG device feature flag, indicating that the A-Host + * enabled HNP support. + * @name: Identifies the controller hardware type. Used in diagnostics + * and sometimes configuration. + * @dev: Driver model state for this abstract device. + * @quirk_ep_out_aligned_size: epout requires buffer size to be aligned to + * MaxPacketSize. + * + * Gadgets have a mostly-portable "gadget driver" implementing device + * functions, handling all usb configurations and interfaces. Gadget + * drivers talk to hardware-specific code indirectly, through ops vectors. + * That insulates the gadget driver from hardware details, and packages + * the hardware endpoints through generic i/o queues. The "usb_gadget" + * and "usb_ep" interfaces provide that insulation from the hardware. + * + * Except for the driver data, all fields in this structure are + * read-only to the gadget driver. That driver data is part of the + * "driver model" infrastructure in 2.6 (and later) kernels, and for + * earlier systems is grouped in a similar structure that's not known + * to the rest of the kernel. + * + * Values of the three OTG device feature flags are updated before the + * setup() call corresponding to USB_REQ_SET_CONFIGURATION, and before + * driver suspend() calls. They are valid only when is_otg, and when the + * device is acting as a B-Peripheral (so is_a_peripheral is false). + */ +struct usb_gadget { + /* readonly to gadget driver */ + const struct usb_gadget_ops *ops; + struct usb_ep *ep0; + struct list_head ep_list; /* of usb_ep */ + enum usb_device_speed speed; + enum usb_device_speed max_speed; + enum usb_device_state state; + unsigned is_dualspeed:1; + unsigned is_otg:1; + unsigned is_a_peripheral:1; + unsigned b_hnp_enable:1; + unsigned a_hnp_support:1; + unsigned a_alt_hnp_support:1; + const char *name; + struct device dev; + unsigned quirk_ep_out_aligned_size:1; +}; + +static inline void set_gadget_data(struct usb_gadget *gadget, void *data) +{ + gadget->dev.driver_data = data; +} + +static inline void *get_gadget_data(struct usb_gadget *gadget) +{ + return gadget->dev.driver_data; +} + +static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) +{ + return container_of(dev, struct usb_gadget, dev); +} + +/* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */ +#define gadget_for_each_ep(tmp, gadget) \ + list_for_each_entry(tmp, &(gadget)->ep_list, ep_list) + + +/** + * gadget_is_dualspeed - return true iff the hardware handles high speed + * @g: controller that might support both high and full speeds + */ +static inline int gadget_is_dualspeed(struct usb_gadget *g) +{ +#ifdef CONFIG_USB_GADGET_DUALSPEED + /* runtime test would check "g->is_dualspeed" ... that might be + * useful to work around hardware bugs, but is mostly pointless + */ + return 1; +#else + return 0; +#endif +} + +/** + * gadget_is_otg - return true iff the hardware is OTG-ready + * @g: controller that might have a Mini-AB connector + * + * This is a runtime test, since kernels with a USB-OTG stack sometimes + * run on boards which only have a Mini-B (or Mini-A) connector. + */ +static inline int gadget_is_otg(struct usb_gadget *g) +{ +#ifdef CONFIG_USB_OTG + return g->is_otg; +#else + return 0; +#endif +} + +/** + * usb_gadget_frame_number - returns the current frame number + * @gadget: controller that reports the frame number + * + * Returns the usb frame number, normally eleven bits from a SOF packet, + * or negative errno if this device doesn't support this capability. + */ +static inline int usb_gadget_frame_number(struct usb_gadget *gadget) +{ + return gadget->ops->get_frame(gadget); +} + +/** + * usb_gadget_wakeup - tries to wake up the host connected to this gadget + * @gadget: controller used to wake up the host + * + * Returns zero on success, else negative error code if the hardware + * doesn't support such attempts, or its support has not been enabled + * by the usb host. Drivers must return device descriptors that report + * their ability to support this, or hosts won't enable it. + * + * This may also try to use SRP to wake the host and start enumeration, + * even if OTG isn't otherwise in use. OTG devices may also start + * remote wakeup even when hosts don't explicitly enable it. + */ +static inline int usb_gadget_wakeup(struct usb_gadget *gadget) +{ + if (!gadget->ops->wakeup) + return -EOPNOTSUPP; + return gadget->ops->wakeup(gadget); +} + +/** + * usb_gadget_set_selfpowered - sets the device selfpowered feature. + * @gadget:the device being declared as self-powered + * + * this affects the device status reported by the hardware driver + * to reflect that it now has a local power supply. + * + * returns zero on success, else negative errno. + */ +static inline int usb_gadget_set_selfpowered(struct usb_gadget *gadget) +{ + if (!gadget->ops->set_selfpowered) + return -EOPNOTSUPP; + return gadget->ops->set_selfpowered(gadget, 1); +} + +/** + * usb_gadget_clear_selfpowered - clear the device selfpowered feature. + * @gadget:the device being declared as bus-powered + * + * this affects the device status reported by the hardware driver. + * some hardware may not support bus-powered operation, in which + * case this feature's value can never change. + * + * returns zero on success, else negative errno. + */ +static inline int usb_gadget_clear_selfpowered(struct usb_gadget *gadget) +{ + if (!gadget->ops->set_selfpowered) + return -EOPNOTSUPP; + return gadget->ops->set_selfpowered(gadget, 0); +} + +/** + * usb_gadget_vbus_connect - Notify controller that VBUS is powered + * @gadget:The device which now has VBUS power. + * + * This call is used by a driver for an external transceiver (or GPIO) + * that detects a VBUS power session starting. Common responses include + * resuming the controller, activating the D+ (or D-) pullup to let the + * host detect that a USB device is attached, and starting to draw power + * (8mA or possibly more, especially after SET_CONFIGURATION). + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_vbus_connect(struct usb_gadget *gadget) +{ + if (!gadget->ops->vbus_session) + return -EOPNOTSUPP; + return gadget->ops->vbus_session(gadget, 1); +} + +/** + * usb_gadget_vbus_draw - constrain controller's VBUS power usage + * @gadget:The device whose VBUS usage is being described + * @mA:How much current to draw, in milliAmperes. This should be twice + * the value listed in the configuration descriptor bMaxPower field. + * + * This call is used by gadget drivers during SET_CONFIGURATION calls, + * reporting how much power the device may consume. For example, this + * could affect how quickly batteries are recharged. + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA) +{ + if (!gadget->ops->vbus_draw) + return -EOPNOTSUPP; + return gadget->ops->vbus_draw(gadget, mA); +} + +/** + * usb_gadget_vbus_disconnect - notify controller about VBUS session end + * @gadget:the device whose VBUS supply is being described + * + * This call is used by a driver for an external transceiver (or GPIO) + * that detects a VBUS power session ending. Common responses include + * reversing everything done in usb_gadget_vbus_connect(). + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_vbus_disconnect(struct usb_gadget *gadget) +{ + if (!gadget->ops->vbus_session) + return -EOPNOTSUPP; + return gadget->ops->vbus_session(gadget, 0); +} + +/** + * usb_gadget_connect - software-controlled connect to USB host + * @gadget:the peripheral being connected + * + * Enables the D+ (or potentially D-) pullup. The host will start + * enumerating this gadget when the pullup is active and a VBUS session + * is active (the link is powered). This pullup is always enabled unless + * usb_gadget_disconnect() has been used to disable it. + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_connect(struct usb_gadget *gadget) +{ + if (!gadget->ops->pullup) + return -EOPNOTSUPP; + return gadget->ops->pullup(gadget, 1); +} + +/** + * usb_gadget_disconnect - software-controlled disconnect from USB host + * @gadget:the peripheral being disconnected + * + * Disables the D+ (or potentially D-) pullup, which the host may see + * as a disconnect (when a VBUS session is active). Not all systems + * support software pullup controls. + * + * This routine may be used during the gadget driver bind() call to prevent + * the peripheral from ever being visible to the USB host, unless later + * usb_gadget_connect() is called. For example, user mode components may + * need to be activated before the system can talk to hosts. + * + * Returns zero on success, else negative errno. + */ +static inline int usb_gadget_disconnect(struct usb_gadget *gadget) +{ + if (!gadget->ops->pullup) + return -EOPNOTSUPP; + return gadget->ops->pullup(gadget, 0); +} + + +/*-------------------------------------------------------------------------*/ + +/** + * struct usb_gadget_driver - driver for usb 'slave' devices + * @function: String describing the gadget's function + * @speed: Highest speed the driver handles. + * @bind: Invoked when the driver is bound to a gadget, usually + * after registering the driver. + * At that point, ep0 is fully initialized, and ep_list holds + * the currently-available endpoints. + * Called in a context that permits sleeping. + * @setup: Invoked for ep0 control requests that aren't handled by + * the hardware level driver. Most calls must be handled by + * the gadget driver, including descriptor and configuration + * management. The 16 bit members of the setup data are in + * USB byte order. Called in_interrupt; this may not sleep. Driver + * queues a response to ep0, or returns negative to stall. + * @disconnect: Invoked after all transfers have been stopped, + * when the host is disconnected. May be called in_interrupt; this + * may not sleep. Some devices can't detect disconnect, so this might + * not be called except as part of controller shutdown. + * @unbind: Invoked when the driver is unbound from a gadget, + * usually from rmmod (after a disconnect is reported). + * Called in a context that permits sleeping. + * @suspend: Invoked on USB suspend. May be called in_interrupt. + * @resume: Invoked on USB resume. May be called in_interrupt. + * @reset: Invoked on USB bus reset. It is mandatory for all gadget drivers + * and should be called in_interrupt. + * + * Devices are disabled till a gadget driver successfully bind()s, which + * means the driver will handle setup() requests needed to enumerate (and + * meet "chapter 9" requirements) then do some useful work. + * + * If gadget->is_otg is true, the gadget driver must provide an OTG + * descriptor during enumeration, or else fail the bind() call. In such + * cases, no USB traffic may flow until both bind() returns without + * having called usb_gadget_disconnect(), and the USB host stack has + * initialized. + * + * Drivers use hardware-specific knowledge to configure the usb hardware. + * endpoint addressing is only one of several hardware characteristics that + * are in descriptors the ep0 implementation returns from setup() calls. + * + * Except for ep0 implementation, most driver code shouldn't need change to + * run on top of different usb controllers. It'll use endpoints set up by + * that ep0 implementation. + * + * The usb controller driver handles a few standard usb requests. Those + * include set_address, and feature flags for devices, interfaces, and + * endpoints (the get_status, set_feature, and clear_feature requests). + * + * Accordingly, the driver's setup() callback must always implement all + * get_descriptor requests, returning at least a device descriptor and + * a configuration descriptor. Drivers must make sure the endpoint + * descriptors match any hardware constraints. Some hardware also constrains + * other descriptors. (The pxa250 allows only configurations 1, 2, or 3). + * + * The driver's setup() callback must also implement set_configuration, + * and should also implement set_interface, get_configuration, and + * get_interface. Setting a configuration (or interface) is where + * endpoints should be activated or (config 0) shut down. + * + * (Note that only the default control endpoint is supported. Neither + * hosts nor devices generally support control traffic except to ep0.) + * + * Most devices will ignore USB suspend/resume operations, and so will + * not provide those callbacks. However, some may need to change modes + * when the host is not longer directing those activities. For example, + * local controls (buttons, dials, etc) may need to be re-enabled since + * the (remote) host can't do that any longer; or an error state might + * be cleared, to make the device behave identically whether or not + * power is maintained. + */ +struct usb_gadget_driver { + char *function; + enum usb_device_speed speed; + int (*bind)(struct usb_gadget *); + void (*unbind)(struct usb_gadget *); + int (*setup)(struct usb_gadget *, + const struct usb_ctrlrequest *); + void (*disconnect)(struct usb_gadget *); + void (*suspend)(struct usb_gadget *); + void (*resume)(struct usb_gadget *); + void (*reset)(struct usb_gadget *); +}; + + +/*-------------------------------------------------------------------------*/ + +/* driver modules register and unregister, as usual. + * these calls must be made in a context that can sleep. + * + * these will usually be implemented directly by the hardware-dependent + * usb bus interface driver, which will only support a single driver. + */ + +/** + * usb_gadget_register_driver - register a gadget driver + * @driver:the driver being registered + * + * Call this in your gadget driver's module initialization function, + * to tell the underlying usb controller driver about your driver. + * The driver's bind() function will be called to bind it to a + * gadget before this registration call returns. It's expected that + * the bind() functions will be in init sections. + * This function must be called in a context that can sleep. + */ +int usb_gadget_register_driver(struct usb_gadget_driver *driver); + +/** + * usb_gadget_unregister_driver - unregister a gadget driver + * @driver:the driver being unregistered + * + * Call this in your gadget driver's module cleanup function, + * to tell the underlying usb controller that your driver is + * going away. If the controller is connected to a USB host, + * it will first disconnect(). The driver is also requested + * to unbind() and clean up any device state, before this procedure + * finally returns. It's expected that the unbind() functions + * will in in exit sections, so may not be linked in some kernels. + * This function must be called in a context that can sleep. + */ +int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); + +int usb_add_gadget_udc_release(struct device *parent, + struct usb_gadget *gadget, void (*release)(struct device *dev)); +int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); +void usb_del_gadget_udc(struct usb_gadget *gadget); +/*-------------------------------------------------------------------------*/ + +/* utility to simplify dealing with string descriptors */ + +/** + * struct usb_gadget_strings - a set of USB strings in a given language + * @language:identifies the strings' language (0x0409 for en-us) + * @strings:array of strings with their ids + * + * If you're using usb_gadget_get_string(), use this to wrap all the + * strings for a given language. + */ +struct usb_gadget_strings { + u16 language; /* 0x0409 for en-us */ + struct usb_string *strings; +}; + +/* put descriptor for string with that id into buf (buflen >= 256) */ +int usb_gadget_get_string(struct usb_gadget_strings *table, int id, u8 *buf); + +/*-------------------------------------------------------------------------*/ + +/* utility to simplify managing config descriptors */ + +/* write vector of descriptors into buffer */ +int usb_descriptor_fillbuf(void *, unsigned, + const struct usb_descriptor_header **); + +/* build config descriptor from single descriptor vector */ +int usb_gadget_config_buf(const struct usb_config_descriptor *config, + void *buf, unsigned buflen, const struct usb_descriptor_header **desc); + +/*-------------------------------------------------------------------------*/ +/* utility to simplify map/unmap of usb_requests to/from DMA */ + +extern int usb_gadget_map_request(struct usb_gadget *gadget, + struct usb_request *req, int is_in); + +extern void usb_gadget_unmap_request(struct usb_gadget *gadget, + struct usb_request *req, int is_in); + +/*-------------------------------------------------------------------------*/ + +/* utility to set gadget state properly */ + +extern void usb_gadget_set_state(struct usb_gadget *gadget, + enum usb_device_state state); + +/*-------------------------------------------------------------------------*/ + +/* utility to tell udc core that the bus reset occurs */ +extern void usb_gadget_udc_reset(struct usb_gadget *gadget, + struct usb_gadget_driver *driver); + +/*-------------------------------------------------------------------------*/ + +/* utility to give requests back to the gadget layer */ + +extern void usb_gadget_giveback_request(struct usb_ep *ep, + struct usb_request *req); + +/*-------------------------------------------------------------------------*/ + +/* utility wrapping a simple endpoint selection policy */ + +extern struct usb_ep *usb_ep_autoconfig(struct usb_gadget *, + struct usb_endpoint_descriptor *); + +extern void usb_ep_autoconfig_reset(struct usb_gadget *); + +extern int usb_gadget_handle_interrupts(int index); + +#if CONFIG_IS_ENABLED(DM_USB_GADGET) +int usb_gadget_initialize(int index); +int usb_gadget_release(int index); +int dm_usb_gadget_handle_interrupts(struct udevice *dev); +#else +#include +static inline int usb_gadget_initialize(int index) +{ + return board_usb_init(index, USB_INIT_DEVICE); +} + +static inline int usb_gadget_release(int index) +{ + return board_usb_cleanup(index, USB_INIT_DEVICE); +} +#endif + +#endif /* __LINUX_USB_GADGET_H */ diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h new file mode 100644 index 0000000..a31ce67 --- /dev/null +++ b/include/linux/usb/musb.h @@ -0,0 +1,156 @@ +/* + * This is used to for host and peripheral modes of the driver for + * Inventra (Multidrop) Highspeed Dual-Role Controllers: (M)HDRC. + * + * Board initialization should put one of these into dev->platform_data, + * probably on some platform_device named "musb-hdrc". It encapsulates + * key configuration differences between boards. + */ + +#ifndef __LINUX_USB_MUSB_H +#define __LINUX_USB_MUSB_H + +#ifndef __deprecated +#define __deprecated +#endif + +#include + +/* The USB role is defined by the connector used on the board, so long as + * standards are being followed. (Developer boards sometimes won't.) + */ +enum musb_mode { + MUSB_UNDEFINED = 0, + MUSB_HOST, /* A or Mini-A connector */ + MUSB_PERIPHERAL, /* B or Mini-B connector */ + MUSB_OTG /* Mini-AB connector */ +}; + +struct clk; + +enum musb_fifo_style { + FIFO_RXTX, + FIFO_TX, + FIFO_RX +} __attribute__ ((packed)); + +enum musb_buf_mode { + BUF_SINGLE, + BUF_DOUBLE +} __attribute__ ((packed)); + +struct musb_fifo_cfg { + u8 hw_ep_num; + enum musb_fifo_style style; + enum musb_buf_mode mode; + u16 maxpacket; +}; + +#define MUSB_EP_FIFO(ep, st, m, pkt) \ +{ \ + .hw_ep_num = ep, \ + .style = st, \ + .mode = m, \ + .maxpacket = pkt, \ +} + +#define MUSB_EP_FIFO_SINGLE(ep, st, pkt) \ + MUSB_EP_FIFO(ep, st, BUF_SINGLE, pkt) + +#define MUSB_EP_FIFO_DOUBLE(ep, st, pkt) \ + MUSB_EP_FIFO(ep, st, BUF_DOUBLE, pkt) + +struct musb_hdrc_eps_bits { + const char name[16]; + u8 bits; +}; + +struct musb_hdrc_config { + struct musb_fifo_cfg *fifo_cfg; /* board fifo configuration */ + unsigned fifo_cfg_size; /* size of the fifo configuration */ + + /* MUSB configuration-specific details */ + unsigned multipoint:1; /* multipoint device */ + unsigned dyn_fifo:1 __deprecated; /* supports dynamic fifo sizing */ + unsigned soft_con:1 __deprecated; /* soft connect required */ + unsigned utm_16:1 __deprecated; /* utm data witdh is 16 bits */ + unsigned big_endian:1; /* true if CPU uses big-endian */ + unsigned mult_bulk_tx:1; /* Tx ep required for multbulk pkts */ + unsigned mult_bulk_rx:1; /* Rx ep required for multbulk pkts */ + unsigned high_iso_tx:1; /* Tx ep required for HB iso */ + unsigned high_iso_rx:1; /* Rx ep required for HD iso */ + unsigned dma:1 __deprecated; /* supports DMA */ + unsigned vendor_req:1 __deprecated; /* vendor registers required */ + + u8 num_eps; /* number of endpoints _with_ ep0 */ + u8 dma_channels __deprecated; /* number of dma channels */ + u8 dyn_fifo_size; /* dynamic size in bytes */ + u8 vendor_ctrl __deprecated; /* vendor control reg width */ + u8 vendor_stat __deprecated; /* vendor status reg witdh */ + u8 dma_req_chan __deprecated; /* bitmask for required dma channels */ + u8 ram_bits; /* ram address size */ + + struct musb_hdrc_eps_bits *eps_bits __deprecated; +}; + +struct musb_hdrc_platform_data { + /* MUSB_HOST, MUSB_PERIPHERAL, or MUSB_OTG */ + u8 mode; + + /* for clk_get() */ + const char *clock; + + /* (HOST or OTG) switch VBUS on/off */ + int (*set_vbus)(struct device *dev, int is_on); + + /* (HOST or OTG) mA/2 power supplied on (default = 8mA) */ + u8 power; + + /* (PERIPHERAL) mA/2 max power consumed (default = 100mA) */ + u8 min_power; + + /* (HOST or OTG) msec/2 after VBUS on till power good */ + u8 potpgt; + + /* (HOST or OTG) program PHY for external Vbus */ + unsigned extvbus:1; + + /* Power the device on or off */ + int (*set_power)(int state); + + /* MUSB configuration-specific details */ + struct musb_hdrc_config *config; + + /* Architecture specific board data */ + void *board_data; + + /* Platform specific struct musb_ops pointer */ + const void *platform_ops; +}; + + +/* TUSB 6010 support */ + +#define TUSB6010_OSCCLK_60 16667 /* psec/clk @ 60.0 MHz */ +#define TUSB6010_REFCLK_24 41667 /* psec/clk @ 24.0 MHz XI */ +#define TUSB6010_REFCLK_19 52083 /* psec/clk @ 19.2 MHz CLKIN */ + +#ifdef CONFIG_ARCH_OMAP2PLUS + +extern int __init tusb6010_setup_interface( + struct musb_hdrc_platform_data *data, + unsigned ps_refclk, unsigned waitpin, + unsigned async_cs, unsigned sync_cs, + unsigned irq, unsigned dmachan); + +extern int tusb6010_platform_retime(unsigned is_refclk); + +#endif /* OMAP2 */ + +/* + * U-Boot specfic stuff + */ +struct musb *musb_register(struct musb_hdrc_platform_data *plat, void *bdata, + void *ctl_regs); + +#endif /* __LINUX_USB_MUSB_H */ diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h new file mode 100644 index 0000000..d2604c5 --- /dev/null +++ b/include/linux/usb/otg.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* include/linux/usb/otg.h + * + * Copyright (c) 2015 Texas Instruments Incorporated - http://www.ti.com + * + * USB OTG (On The Go) defines + */ + +#ifndef __LINUX_USB_OTG_H +#define __LINUX_USB_OTG_H + +enum usb_dr_mode { + USB_DR_MODE_UNKNOWN, + USB_DR_MODE_HOST, + USB_DR_MODE_PERIPHERAL, + USB_DR_MODE_OTG, +}; + +/** + * usb_get_dr_mode() - Get dual role mode for given device + * @node: Node offset to the given device + * + * The function gets phy interface string from property 'dr_mode', + * and returns the correspondig enum usb_dr_mode + */ +enum usb_dr_mode usb_get_dr_mode(int node); + +/** + * usb_get_maximum_speed() - Get maximum speed for given device + * @node: Node offset to the given device + * + * The function gets phy interface string from property 'maximum-speed', + * and returns the correspondig enum usb_device_speed + */ +enum usb_device_speed usb_get_maximum_speed(int node); + +#endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/usb/xhci-fsl.h b/include/linux/usb/xhci-fsl.h new file mode 100644 index 0000000..1367149 --- /dev/null +++ b/include/linux/usb/xhci-fsl.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2015 Freescale Semiconductor, Inc. + * + * FSL USB HOST xHCI Controller + * + * Author: Ramneek Mehresh + */ + +#ifndef _ASM_ARCH_XHCI_FSL_H_ +#define _ASM_ARCH_XHCI_FSL_H_ + +/* Default to the FSL XHCI defines */ +#define USB3_PWRCTL_CLK_CMD_MASK 0x3FE000 +#define USB3_PWRCTL_CLK_FREQ_MASK 0xFFC +#define USB3_PHY_PARTIAL_RX_POWERON BIT(6) +#define USB3_PHY_RX_POWERON BIT(14) +#define USB3_PHY_TX_POWERON BIT(15) +#define USB3_PHY_TX_RX_POWERON (USB3_PHY_RX_POWERON | USB3_PHY_TX_POWERON) +#define USB3_PWRCTL_CLK_CMD_SHIFT 14 +#define USB3_PWRCTL_CLK_FREQ_SHIFT 22 +#define USB3_ENABLE_BEAT_BURST 0xF +#define USB3_ENABLE_BEAT_BURST_MASK 0xFF +#define USB3_SET_BEAT_BURST_LIMIT 0xF00 + +/* USBOTGSS_WRAPPER definitions */ +#define USBOTGSS_WRAPRESET BIT(17) +#define USBOTGSS_DMADISABLE BIT(16) +#define USBOTGSS_STANDBYMODE_NO_STANDBY BIT(4) +#define USBOTGSS_STANDBYMODE_SMRT BIT(5) +#define USBOTGSS_STANDBYMODE_SMRT_WKUP (0x3 << 4) +#define USBOTGSS_IDLEMODE_NOIDLE BIT(2) +#define USBOTGSS_IDLEMODE_SMRT BIT(3) +#define USBOTGSS_IDLEMODE_SMRT_WKUP (0x3 << 2) + +/* USBOTGSS_IRQENABLE_SET_0 bit */ +#define USBOTGSS_COREIRQ_EN BIT(1) + +/* USBOTGSS_IRQENABLE_SET_1 bits */ +#define USBOTGSS_IRQ_SET_1_IDPULLUP_FALL_EN BIT(1) +#define USBOTGSS_IRQ_SET_1_DISCHRGVBUS_FALL_EN BIT(3) +#define USBOTGSS_IRQ_SET_1_CHRGVBUS_FALL_EN BIT(4) +#define USBOTGSS_IRQ_SET_1_DRVVBUS_FALL_EN BIT(5) +#define USBOTGSS_IRQ_SET_1_IDPULLUP_RISE_EN BIT(8) +#define USBOTGSS_IRQ_SET_1_DISCHRGVBUS_RISE_EN BIT(11) +#define USBOTGSS_IRQ_SET_1_CHRGVBUS_RISE_EN BIT(12) +#define USBOTGSS_IRQ_SET_1_DRVVBUS_RISE_EN BIT(13) +#define USBOTGSS_IRQ_SET_1_OEVT_EN BIT(16) +#define USBOTGSS_IRQ_SET_1_DMADISABLECLR_EN BIT(17) + +struct fsl_xhci { + struct xhci_hccr *hcd; + struct dwc3 *dwc3_reg; +}; + +#if defined(CONFIG_ARCH_LS1021A) || defined(CONFIG_ARCH_LS1012A) +#define CONFIG_SYS_FSL_XHCI_USB1_ADDR CONFIG_SYS_XHCI_USB1_ADDR +#define CONFIG_SYS_FSL_XHCI_USB2_ADDR 0 +#define CONFIG_SYS_FSL_XHCI_USB3_ADDR 0 +#elif defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) +#define CONFIG_SYS_FSL_XHCI_USB1_ADDR CONFIG_SYS_XHCI_USB1_ADDR +#define CONFIG_SYS_FSL_XHCI_USB2_ADDR CONFIG_SYS_XHCI_USB2_ADDR +#define CONFIG_SYS_FSL_XHCI_USB3_ADDR 0 +#elif defined(CONFIG_ARCH_LS1043A) || defined(CONFIG_ARCH_LS1046A) +#define CONFIG_SYS_FSL_XHCI_USB1_ADDR CONFIG_SYS_XHCI_USB1_ADDR +#define CONFIG_SYS_FSL_XHCI_USB2_ADDR CONFIG_SYS_XHCI_USB2_ADDR +#define CONFIG_SYS_FSL_XHCI_USB3_ADDR CONFIG_SYS_XHCI_USB3_ADDR +#endif + +#define FSL_USB_XHCI_ADDR {CONFIG_SYS_FSL_XHCI_USB1_ADDR, \ + CONFIG_SYS_FSL_XHCI_USB2_ADDR, \ + CONFIG_SYS_FSL_XHCI_USB3_ADDR} +#endif /* _ASM_ARCH_XHCI_FSL_H_ */ diff --git a/include/linux/usb/xhci-omap.h b/include/linux/usb/xhci-omap.h new file mode 100644 index 0000000..ce9c074 --- /dev/null +++ b/include/linux/usb/xhci-omap.h @@ -0,0 +1,145 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * (C) Copyright 2013 + * Texas Instruments Inc, + * + * Author: Dan Murphy + */ + +#ifndef _ASM_ARCH_XHCI_OMAP_H_ +#define _ASM_ARCH_XHCI_OMAP_H_ + +#ifdef CONFIG_DRA7XX +#if CONFIG_USB_XHCI_DRA7XX_INDEX == 1 +#define OMAP_XHCI_BASE 0x488d0000 +#define OMAP_OCP1_SCP_BASE 0x4A081000 +#define OMAP_OTG_WRAPPER_BASE 0x488c0000 +#elif CONFIG_USB_XHCI_DRA7XX_INDEX == 0 +#define OMAP_XHCI_BASE 0x48890000 +#define OMAP_OCP1_SCP_BASE 0x4A084c00 +#define OMAP_OTG_WRAPPER_BASE 0x48880000 +#endif /* CONFIG_USB_XHCI_DRA7XX_INDEX == 1 */ +#elif defined CONFIG_AM43XX +#define OMAP_XHCI_BASE 0x483d0000 +#define OMAP_OCP1_SCP_BASE 0x483E8000 +#define OMAP_OTG_WRAPPER_BASE 0x483dc100 +#else +/* Default to the OMAP5 XHCI defines */ +#define OMAP_XHCI_BASE 0x4a030000 +#define OMAP_OCP1_SCP_BASE 0x4a084c00 +#define OMAP_OTG_WRAPPER_BASE 0x4A020000 +#endif + +/* Phy register MACRO definitions */ +#define PLL_REGM_MASK 0x001FFE00 +#define PLL_REGM_SHIFT 0x9 +#define PLL_REGM_F_MASK 0x0003FFFF +#define PLL_REGM_F_SHIFT 0x0 +#define PLL_REGN_MASK 0x000001FE +#define PLL_REGN_SHIFT 0x1 +#define PLL_SELFREQDCO_MASK 0x0000000E +#define PLL_SELFREQDCO_SHIFT 0x1 +#define PLL_SD_MASK 0x0003FC00 +#define PLL_SD_SHIFT 0x9 +#define SET_PLL_GO 0x1 +#define PLL_TICOPWDN 0x10000 +#define PLL_LOCK 0x2 +#define PLL_IDLE 0x1 + +#define USB3_PWRCTL_CLK_CMD_MASK 0x3FE000 +#define USB3_PWRCTL_CLK_FREQ_MASK 0xFFC +#define USB3_PHY_PARTIAL_RX_POWERON (1 << 6) +#define USB3_PHY_RX_POWERON (1 << 14) +#define USB3_PHY_TX_POWERON (1 << 15) +#define USB3_PHY_TX_RX_POWERON (USB3_PHY_RX_POWERON | USB3_PHY_TX_POWERON) +#define USB3_PWRCTL_CLK_CMD_SHIFT 14 +#define USB3_PWRCTL_CLK_FREQ_SHIFT 22 + +/* USBOTGSS_WRAPPER definitions */ +#define USBOTGSS_WRAPRESET (1 << 17) +#define USBOTGSS_DMADISABLE (1 << 16) +#define USBOTGSS_STANDBYMODE_NO_STANDBY (1 << 4) +#define USBOTGSS_STANDBYMODE_SMRT (1 << 5) +#define USBOTGSS_STANDBYMODE_SMRT_WKUP (0x3 << 4) +#define USBOTGSS_IDLEMODE_NOIDLE (1 << 2) +#define USBOTGSS_IDLEMODE_SMRT (1 << 3) +#define USBOTGSS_IDLEMODE_SMRT_WKUP (0x3 << 2) + +/* USBOTGSS_IRQENABLE_SET_0 bit */ +#define USBOTGSS_COREIRQ_EN (1 << 0) + +/* USBOTGSS_IRQENABLE_SET_1 bits */ +#define USBOTGSS_IRQ_SET_1_IDPULLUP_FALL_EN (1 << 0) +#define USBOTGSS_IRQ_SET_1_DISCHRGVBUS_FALL_EN (1 << 3) +#define USBOTGSS_IRQ_SET_1_CHRGVBUS_FALL_EN (1 << 4) +#define USBOTGSS_IRQ_SET_1_DRVVBUS_FALL_EN (1 << 5) +#define USBOTGSS_IRQ_SET_1_IDPULLUP_RISE_EN (1 << 8) +#define USBOTGSS_IRQ_SET_1_DISCHRGVBUS_RISE_EN (1 << 11) +#define USBOTGSS_IRQ_SET_1_CHRGVBUS_RISE_EN (1 << 12) +#define USBOTGSS_IRQ_SET_1_DRVVBUS_RISE_EN (1 << 13) +#define USBOTGSS_IRQ_SET_1_OEVT_EN (1 << 16) +#define USBOTGSS_IRQ_SET_1_DMADISABLECLR_EN (1 << 17) + +/* + * USBOTGSS_WRAPPER registers + */ +struct omap_dwc_wrapper { + u32 revision; + + u32 reserve_1[3]; + + u32 sysconfig; /* offset of 0x10 */ + + u32 reserve_2[3]; + u16 reserve_3; + + u32 irqstatus_raw_0; /* offset of 0x24 */ + u32 irqstatus_0; + u32 irqenable_set_0; + u32 irqenable_clr_0; + + u32 irqstatus_raw_1; /* offset of 0x34 */ + u32 irqstatus_1; + u32 irqenable_set_1; + u32 irqenable_clr_1; + + u32 reserve_4[15]; + + u32 utmi_otg_ctrl; /* offset of 0x80 */ + u32 utmi_otg_status; + + u32 reserve_5[30]; + + u32 mram_offset; /* offset of 0x100 */ + u32 fladj; + u32 dbg_config; + u32 dbg_data; + u32 dev_ebc_en; +}; + +/* XHCI PHY register structure */ +struct omap_usb3_phy { + u32 reserve1; + u32 pll_status; + u32 pll_go; + u32 pll_config_1; + u32 pll_config_2; + u32 pll_config_3; + u32 pll_ssc_config_1; + u32 pll_ssc_config_2; + u32 pll_config_4; +}; + +struct omap_xhci { + struct omap_dwc_wrapper *otg_wrapper; + struct omap_usb3_phy *usb3_phy; + struct xhci_hccr *hcd; + struct dwc3 *dwc3_reg; +}; + +/* USB PHY functions */ +void omap_enable_phy(struct omap_xhci *omap); +void omap_reset_usb_phy(struct dwc3 *dwc3_reg); +void usb_phy_power(int on); + +#endif /* _ASM_ARCH_XHCI_OMAP_H_ */ diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h new file mode 100644 index 0000000..85feb67 --- /dev/null +++ b/include/linux/xxhash.h @@ -0,0 +1,229 @@ +/* SPDX-License-Identifier: (GPL-2.0 or BSD-2-Clause) */ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * You can contact the author at: + * - xxHash homepage: http://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/* + * Notice extracted from xxHash homepage: + * + * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. + * It also successfully passes all tests from the SMHasher suite. + * + * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 + * Duo @3GHz) + * + * Name Speed Q.Score Author + * xxHash 5.4 GB/s 10 + * CrapWow 3.2 GB/s 2 Andrew + * MumurHash 3a 2.7 GB/s 10 Austin Appleby + * SpookyHash 2.0 GB/s 10 Bob Jenkins + * SBox 1.4 GB/s 9 Bret Mulvey + * Lookup3 1.2 GB/s 9 Bob Jenkins + * SuperFastHash 1.2 GB/s 1 Paul Hsieh + * CityHash64 1.05 GB/s 10 Pike & Alakuijala + * FNV 0.55 GB/s 5 Fowler, Noll, Vo + * CRC32 0.43 GB/s 9 + * MD5-32 0.33 GB/s 10 Ronald L. Rivest + * SHA1-32 0.28 GB/s 10 + * + * Q.Score is a measure of quality of the hash function. + * It depends on successfully passing SMHasher test set. + * 10 is a perfect score. + * + * A 64-bits version, named xxh64 offers much better speed, + * but for 64-bits applications only. + * Name Speed on 64 bits Speed on 32 bits + * xxh64 13.8 GB/s 1.9 GB/s + * xxh32 6.8 GB/s 6.0 GB/s + */ + +#ifndef XXHASH_H +#define XXHASH_H + +#include + +/*-**************************** + * Simple Hash Functions + *****************************/ + +/** + * xxh32() - calculate the 32-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s + * + * Return: The 32-bit hash of the data. + */ +uint32_t xxh32(const void *input, size_t length, uint32_t seed); + +/** + * xxh64() - calculate the 64-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. + * + * Return: The 64-bit hash of the data. + */ +uint64_t xxh64(const void *input, size_t length, uint64_t seed); + +/** + * xxhash() - calculate wordsize hash of the input with a given seed + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * If the hash does not need to be comparable between machines with + * different word sizes, this function will call whichever of xxh32() + * or xxh64() is faster. + * + * Return: wordsize hash of the data. + */ + +static inline unsigned long xxhash(const void *input, size_t length, + uint64_t seed) +{ +#if BITS_PER_LONG == 64 + return xxh64(input, length, seed); +#else + return xxh32(input, length, seed); +#endif +} + +/*-**************************** + * Streaming Hash Functions + *****************************/ + +/* + * These definitions are only meant to allow allocation of XXH state + * statically, on stack, or in a struct for example. + * Do not use members directly. + */ + +/** + * struct xxh32_state - private xxh32 state, do not use members directly + */ +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +/** + * struct xxh32_state - private xxh64 state, do not use members directly + */ +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +/** + * xxh32_reset() - reset the xxh32 state to start a new hashing operation + * + * @state: The xxh32 state to reset. + * @seed: Initialize the hash state with this seed. + * + * Call this function on any xxh32_state to prepare for a new hashing operation. + */ +void xxh32_reset(struct xxh32_state *state, uint32_t seed); + +/** + * xxh32_update() - hash the data given and update the xxh32 state + * + * @state: The xxh32 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh32_reset() call xxh32_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh32_update(struct xxh32_state *state, const void *input, size_t length); + +/** + * xxh32_digest() - produce the current xxh32 hash + * + * @state: Produce the current xxh32 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh32_digest(), and + * generate new hashes later on, by calling xxh32_digest() again. + * + * Return: The xxh32 hash stored in the state. + */ +uint32_t xxh32_digest(const struct xxh32_state *state); + +/** + * xxh64_reset() - reset the xxh64 state to start a new hashing operation + * + * @state: The xxh64 state to reset. + * @seed: Initialize the hash state with this seed. + */ +void xxh64_reset(struct xxh64_state *state, uint64_t seed); + +/** + * xxh64_update() - hash the data given and update the xxh64 state + * @state: The xxh64 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh64_reset() call xxh64_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh64_update(struct xxh64_state *state, const void *input, size_t length); + +/** + * xxh64_digest() - produce the current xxh64 hash + * + * @state: Produce the current xxh64 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh64_digest(), and + * generate new hashes later on, by calling xxh64_digest() again. + * + * Return: The xxh64 hash stored in the state. + */ +uint64_t xxh64_digest(const struct xxh64_state *state); + +/*-************************** + * Utils + ***************************/ + +/** + * xxh32_copy_state() - copy the source state into the destination state + * + * @src: The source xxh32 state. + * @dst: The destination xxh32 state. + */ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); + +/** + * xxh64_copy_state() - copy the source state into the destination state + * + * @src: The source xxh64 state. + * @dst: The destination xxh64 state. + */ +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); + +#endif /* XXHASH_H */ diff --git a/include/linux/zstd.h b/include/linux/zstd.h new file mode 100644 index 0000000..724f693 --- /dev/null +++ b/include/linux/zstd.h @@ -0,0 +1,1147 @@ +/* SPDX-License-Identifier: (GPL-2.0 or BSD-3-Clause-Clear) */ +/* + * Copyright (c) 2016-present, Yann Collet, Facebook, Inc. + * All rights reserved. + */ + +#ifndef ZSTD_H +#define ZSTD_H + +/* ====== Dependency ======*/ +#include /* size_t */ + + +/*-***************************************************************************** + * Introduction + * + * zstd, short for Zstandard, is a fast lossless compression algorithm, + * targeting real-time compression scenarios at zlib-level and better + * compression ratios. The zstd compression library provides in-memory + * compression and decompression functions. The library supports compression + * levels from 1 up to ZSTD_maxCLevel() which is 22. Levels >= 20, labeled + * ultra, should be used with caution, as they require more memory. + * Compression can be done in: + * - a single step, reusing a context (described as Explicit memory management) + * - unbounded multiple steps (described as Streaming compression) + * The compression ratio achievable on small data can be highly improved using + * compression with a dictionary in: + * - a single step (described as Simple dictionary API) + * - a single step, reusing a dictionary (described as Fast dictionary API) + ******************************************************************************/ + +/*====== Helper functions ======*/ + +/** + * enum ZSTD_ErrorCode - zstd error codes + * + * Functions that return size_t can be checked for errors using ZSTD_isError() + * and the ZSTD_ErrorCode can be extracted using ZSTD_getErrorCode(). + */ +typedef enum { + ZSTD_error_no_error, + ZSTD_error_GENERIC, + ZSTD_error_prefix_unknown, + ZSTD_error_version_unsupported, + ZSTD_error_parameter_unknown, + ZSTD_error_frameParameter_unsupported, + ZSTD_error_frameParameter_unsupportedBy32bits, + ZSTD_error_frameParameter_windowTooLarge, + ZSTD_error_compressionParameter_unsupported, + ZSTD_error_init_missing, + ZSTD_error_memory_allocation, + ZSTD_error_stage_wrong, + ZSTD_error_dstSize_tooSmall, + ZSTD_error_srcSize_wrong, + ZSTD_error_corruption_detected, + ZSTD_error_checksum_wrong, + ZSTD_error_tableLog_tooLarge, + ZSTD_error_maxSymbolValue_tooLarge, + ZSTD_error_maxSymbolValue_tooSmall, + ZSTD_error_dictionary_corrupted, + ZSTD_error_dictionary_wrong, + ZSTD_error_dictionaryCreation_failed, + ZSTD_error_maxCode +} ZSTD_ErrorCode; + +/** + * ZSTD_maxCLevel() - maximum compression level available + * + * Return: Maximum compression level available. + */ +int ZSTD_maxCLevel(void); +/** + * ZSTD_compressBound() - maximum compressed size in worst case scenario + * @srcSize: The size of the data to compress. + * + * Return: The maximum compressed size in the worst case scenario. + */ +size_t ZSTD_compressBound(size_t srcSize); +/** + * ZSTD_isError() - tells if a size_t function result is an error code + * @code: The function result to check for error. + * + * Return: Non-zero iff the code is an error. + */ +static __attribute__((unused)) unsigned int ZSTD_isError(size_t code) +{ + return code > (size_t)-ZSTD_error_maxCode; +} +/** + * ZSTD_getErrorCode() - translates an error function result to a ZSTD_ErrorCode + * @functionResult: The result of a function for which ZSTD_isError() is true. + * + * Return: The ZSTD_ErrorCode corresponding to the functionResult or 0 + * if the functionResult isn't an error. + */ +static __attribute__((unused)) ZSTD_ErrorCode ZSTD_getErrorCode( + size_t functionResult) +{ + if (!ZSTD_isError(functionResult)) + return (ZSTD_ErrorCode)0; + return (ZSTD_ErrorCode)(0 - functionResult); +} + +/** + * enum ZSTD_strategy - zstd compression search strategy + * + * From faster to stronger. + */ +typedef enum { + ZSTD_fast, + ZSTD_dfast, + ZSTD_greedy, + ZSTD_lazy, + ZSTD_lazy2, + ZSTD_btlazy2, + ZSTD_btopt, + ZSTD_btopt2 +} ZSTD_strategy; + +/** + * struct ZSTD_compressionParameters - zstd compression parameters + * @windowLog: Log of the largest match distance. Larger means more + * compression, and more memory needed during decompression. + * @chainLog: Fully searched segment. Larger means more compression, slower, + * and more memory (useless for fast). + * @hashLog: Dispatch table. Larger means more compression, + * slower, and more memory. + * @searchLog: Number of searches. Larger means more compression and slower. + * @searchLength: Match length searched. Larger means faster decompression, + * sometimes less compression. + * @targetLength: Acceptable match size for optimal parser (only). Larger means + * more compression, and slower. + * @strategy: The zstd compression strategy. + */ +typedef struct { + unsigned int windowLog; + unsigned int chainLog; + unsigned int hashLog; + unsigned int searchLog; + unsigned int searchLength; + unsigned int targetLength; + ZSTD_strategy strategy; +} ZSTD_compressionParameters; + +/** + * struct ZSTD_frameParameters - zstd frame parameters + * @contentSizeFlag: Controls whether content size will be present in the frame + * header (when known). + * @checksumFlag: Controls whether a 32-bit checksum is generated at the end + * of the frame for error detection. + * @noDictIDFlag: Controls whether dictID will be saved into the frame header + * when using dictionary compression. + * + * The default value is all fields set to 0. + */ +typedef struct { + unsigned int contentSizeFlag; + unsigned int checksumFlag; + unsigned int noDictIDFlag; +} ZSTD_frameParameters; + +/** + * struct ZSTD_parameters - zstd parameters + * @cParams: The compression parameters. + * @fParams: The frame parameters. + */ +typedef struct { + ZSTD_compressionParameters cParams; + ZSTD_frameParameters fParams; +} ZSTD_parameters; + +/** + * ZSTD_getCParams() - returns ZSTD_compressionParameters for selected level + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. + * + * Return: The selected ZSTD_compressionParameters. + */ +ZSTD_compressionParameters ZSTD_getCParams(int compressionLevel, + unsigned long long estimatedSrcSize, size_t dictSize); + +/** + * ZSTD_getParams() - returns ZSTD_parameters for selected level + * @compressionLevel: The compression level from 1 to ZSTD_maxCLevel(). + * @estimatedSrcSize: The estimated source size to compress or 0 if unknown. + * @dictSize: The dictionary size or 0 if a dictionary isn't being used. + * + * The same as ZSTD_getCParams() except also selects the default frame + * parameters (all zero). + * + * Return: The selected ZSTD_parameters. + */ +ZSTD_parameters ZSTD_getParams(int compressionLevel, + unsigned long long estimatedSrcSize, size_t dictSize); + +/*-************************************* + * Explicit memory management + **************************************/ + +/** + * ZSTD_CCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_CCtx + * @cParams: The compression parameters to be used for compression. + * + * If multiple compression parameters might be used, the caller must call + * ZSTD_CCtxWorkspaceBound() for each set of parameters and use the maximum + * size. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCCtx(). + */ +size_t ZSTD_CCtxWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CCtx - the zstd compression context + * + * When compressing many times it is recommended to allocate a context just once + * and reuse it for each successive compression operation. + */ +typedef struct ZSTD_CCtx_s ZSTD_CCtx; +/** + * ZSTD_initCCtx() - initialize a zstd compression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Use ZSTD_CCtxWorkspaceBound() to + * determine how large the workspace must be. + * + * Return: A compression context emplaced into workspace. + */ +ZSTD_CCtx *ZSTD_initCCtx(void *workspace, size_t workspaceSize); + +/** + * ZSTD_compressCCtx() - compress src into dst + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compressCCtx(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, ZSTD_parameters params); + +/** + * ZSTD_DCtxWorkspaceBound() - amount of memory needed to initialize a ZSTD_DCtx + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDCtx(). + */ +size_t ZSTD_DCtxWorkspaceBound(void); + +/** + * struct ZSTD_DCtx - the zstd decompression context + * + * When decompressing many times it is recommended to allocate a context just + * once and reuse it for each successive decompression operation. + */ +typedef struct ZSTD_DCtx_s ZSTD_DCtx; +/** + * ZSTD_initDCtx() - initialize a zstd decompression context + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Use ZSTD_DCtxWorkspaceBound() to + * determine how large the workspace must be. + * + * Return: A decompression context emplaced into workspace. + */ +ZSTD_DCtx *ZSTD_initDCtx(void *workspace, size_t workspaceSize); + +/** + * ZSTD_decompressDCtx() - decompress zstd compressed src into dst + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompressDCtx(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); + +/*-************************ + * Simple dictionary API + **************************/ + +/** + * ZSTD_compress_usingDict() - compress src into dst using a dictionary + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(params.cParams). + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @dict: The dictionary to use for compression. + * @dictSize: The size of the dictionary. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Compression using a predefined dictionary. The same dictionary must be used + * during decompression. + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compress_usingDict(ZSTD_CCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const void *dict, size_t dictSize, + ZSTD_parameters params); + +/** + * ZSTD_decompress_usingDict() - decompress src into dst using a dictionary + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * @dict: The dictionary to use for decompression. The same dictionary + * must've been used to compress the data. + * @dictSize: The size of the dictionary. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompress_usingDict(ZSTD_DCtx *ctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const void *dict, size_t dictSize); + +/*-************************** + * Fast dictionary API + ***************************/ + +/** + * ZSTD_CDictWorkspaceBound() - memory needed to initialize a ZSTD_CDict + * @cParams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCDict(). + */ +size_t ZSTD_CDictWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CDict - a digested dictionary to be used for compression + */ +typedef struct ZSTD_CDict_s ZSTD_CDict; + +/** + * ZSTD_initCDict() - initialize a digested dictionary for compression + * @dictBuffer: The dictionary to digest. The buffer is referenced by the + * ZSTD_CDict so it must outlive the returned ZSTD_CDict. + * @dictSize: The size of the dictionary. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * @workspace: The workspace. It must outlive the returned ZSTD_CDict. + * @workspaceSize: The workspace size. Must be at least + * ZSTD_CDictWorkspaceBound(params.cParams). + * + * When compressing multiple messages / blocks with the same dictionary it is + * recommended to load it just once. The ZSTD_CDict merely references the + * dictBuffer, so it must outlive the returned ZSTD_CDict. + * + * Return: The digested dictionary emplaced into workspace. + */ +ZSTD_CDict *ZSTD_initCDict(const void *dictBuffer, size_t dictSize, + ZSTD_parameters params, void *workspace, size_t workspaceSize); + +/** + * ZSTD_compress_usingCDict() - compress src into dst using a ZSTD_CDict + * @ctx: The context. Must have been initialized with a workspace at + * least as large as ZSTD_CCtxWorkspaceBound(cParams) where + * cParams are the compression parameters used to initialize the + * cdict. + * @dst: The buffer to compress src into. + * @dstCapacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @srcSize: The size of the data to compress. + * @cdict: The digested dictionary to use for compression. + * @params: The parameters to use for compression. See ZSTD_getParams(). + * + * Compression using a digested dictionary. The same dictionary must be used + * during decompression. + * + * Return: The compressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compress_usingCDict(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize, const ZSTD_CDict *cdict); + + +/** + * ZSTD_DDictWorkspaceBound() - memory needed to initialize a ZSTD_DDict + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDDict(). + */ +size_t ZSTD_DDictWorkspaceBound(void); + +/** + * struct ZSTD_DDict - a digested dictionary to be used for decompression + */ +typedef struct ZSTD_DDict_s ZSTD_DDict; + +/** + * ZSTD_initDDict() - initialize a digested dictionary for decompression + * @dictBuffer: The dictionary to digest. The buffer is referenced by the + * ZSTD_DDict so it must outlive the returned ZSTD_DDict. + * @dictSize: The size of the dictionary. + * @workspace: The workspace. It must outlive the returned ZSTD_DDict. + * @workspaceSize: The workspace size. Must be at least + * ZSTD_DDictWorkspaceBound(). + * + * When decompressing multiple messages / blocks with the same dictionary it is + * recommended to load it just once. The ZSTD_DDict merely references the + * dictBuffer, so it must outlive the returned ZSTD_DDict. + * + * Return: The digested dictionary emplaced into workspace. + */ +ZSTD_DDict *ZSTD_initDDict(const void *dictBuffer, size_t dictSize, + void *workspace, size_t workspaceSize); + +/** + * ZSTD_decompress_usingDDict() - decompress src into dst using a ZSTD_DDict + * @ctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dstCapacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @srcSize: The exact size of the data to decompress. + * @ddict: The digested dictionary to use for decompression. The same + * dictionary must've been used to compress the data. + * + * Return: The decompressed size or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_decompress_usingDDict(ZSTD_DCtx *dctx, void *dst, + size_t dstCapacity, const void *src, size_t srcSize, + const ZSTD_DDict *ddict); + + +/*-************************** + * Streaming + ***************************/ + +/** + * struct ZSTD_inBuffer - input buffer for streaming + * @src: Start of the input buffer. + * @size: Size of the input buffer. + * @pos: Position where reading stopped. Will be updated. + * Necessarily 0 <= pos <= size. + */ +typedef struct ZSTD_inBuffer_s { + const void *src; + size_t size; + size_t pos; +} ZSTD_inBuffer; + +/** + * struct ZSTD_outBuffer - output buffer for streaming + * @dst: Start of the output buffer. + * @size: Size of the output buffer. + * @pos: Position where writing stopped. Will be updated. + * Necessarily 0 <= pos <= size. + */ +typedef struct ZSTD_outBuffer_s { + void *dst; + size_t size; + size_t pos; +} ZSTD_outBuffer; + + + +/*-***************************************************************************** + * Streaming compression - HowTo + * + * A ZSTD_CStream object is required to track streaming operation. + * Use ZSTD_initCStream() to initialize a ZSTD_CStream object. + * ZSTD_CStream objects can be reused multiple times on consecutive compression + * operations. It is recommended to re-use ZSTD_CStream in situations where many + * streaming operations will be achieved consecutively. Use one separate + * ZSTD_CStream per thread for parallel execution. + * + * Use ZSTD_compressStream() repetitively to consume input stream. + * The function will automatically update both `pos` fields. + * Note that it may not consume the entire input, in which case `pos < size`, + * and it's up to the caller to present again remaining data. + * It returns a hint for the preferred number of bytes to use as an input for + * the next function call. + * + * At any moment, it's possible to flush whatever data remains within internal + * buffer, using ZSTD_flushStream(). `output->pos` will be updated. There might + * still be some content left within the internal buffer if `output->size` is + * too small. It returns the number of bytes left in the internal buffer and + * must be called until it returns 0. + * + * ZSTD_endStream() instructs to finish a frame. It will perform a flush and + * write frame epilogue. The epilogue is required for decoders to consider a + * frame completed. Similar to ZSTD_flushStream(), it may not be able to flush + * the full content if `output->size` is too small. In which case, call again + * ZSTD_endStream() to complete the flush. It returns the number of bytes left + * in the internal buffer and must be called until it returns 0. + ******************************************************************************/ + +/** + * ZSTD_CStreamWorkspaceBound() - memory needed to initialize a ZSTD_CStream + * @cParams: The compression parameters to be used for compression. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initCStream() and ZSTD_initCStream_usingCDict(). + */ +size_t ZSTD_CStreamWorkspaceBound(ZSTD_compressionParameters cParams); + +/** + * struct ZSTD_CStream - the zstd streaming compression context + */ +typedef struct ZSTD_CStream_s ZSTD_CStream; + +/*===== ZSTD_CStream management functions =====*/ +/** + * ZSTD_initCStream() - initialize a zstd streaming compression context + * @params: The zstd compression parameters. + * @pledgedSrcSize: If params.fParams.contentSizeFlag == 1 then the caller must + * pass the source size (zero means empty source). Otherwise, + * the caller may optionally pass the source size, or zero if + * unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_CStreamWorkspaceBound(params.cParams) to determine + * how large the workspace must be. + * + * Return: The zstd streaming compression context. + */ +ZSTD_CStream *ZSTD_initCStream(ZSTD_parameters params, + unsigned long long pledgedSrcSize, void *workspace, + size_t workspaceSize); + +/** + * ZSTD_initCStream_usingCDict() - initialize a streaming compression context + * @cdict: The digested dictionary to use for compression. + * @pledgedSrcSize: Optionally the source size, or zero if unknown. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. Call ZSTD_CStreamWorkspaceBound() + * with the cParams used to initialize the cdict to determine + * how large the workspace must be. + * + * Return: The zstd streaming compression context. + */ +ZSTD_CStream *ZSTD_initCStream_usingCDict(const ZSTD_CDict *cdict, + unsigned long long pledgedSrcSize, void *workspace, + size_t workspaceSize); + +/*===== Streaming compression functions =====*/ +/** + * ZSTD_resetCStream() - reset the context using parameters from creation + * @zcs: The zstd streaming compression context to reset. + * @pledgedSrcSize: Optionally the source size, or zero if unknown. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. If `pledgedSrcSize` is non-zero the frame + * content size is always written into the frame header. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_resetCStream(ZSTD_CStream *zcs, unsigned long long pledgedSrcSize); +/** + * ZSTD_compressStream() - streaming compress some of input into output + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * @input: Source buffer. `input->pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input->pos < input->size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * + * Return: A hint for the number of bytes to use as the input for the next + * function call or an error, which can be checked using + * ZSTD_isError(). + */ +size_t ZSTD_compressStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output, + ZSTD_inBuffer *input); +/** + * ZSTD_flushStream() - flush internal buffers into output + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * ZSTD_flushStream() must be called until it returns 0, meaning all the data + * has been flushed. Since ZSTD_flushStream() causes a block to be ended, + * calling it too often will degrade the compression ratio. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_flushStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); +/** + * ZSTD_endStream() - flush internal buffers into output and end the frame + * @zcs: The zstd streaming compression context. + * @output: Destination buffer. `output->pos` is updated to indicate how much + * compressed data was written. + * + * ZSTD_endStream() must be called until it returns 0, meaning all the data has + * been flushed and the frame epilogue has been written. + * + * Return: The number of bytes still present within internal buffers or an + * error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_endStream(ZSTD_CStream *zcs, ZSTD_outBuffer *output); + +/** + * ZSTD_CStreamInSize() - recommended size for the input buffer + * + * Return: The recommended size for the input buffer. + */ +size_t ZSTD_CStreamInSize(void); +/** + * ZSTD_CStreamOutSize() - recommended size for the output buffer + * + * When the output buffer is at least this large, it is guaranteed to be large + * enough to flush at least one complete compressed block. + * + * Return: The recommended size for the output buffer. + */ +size_t ZSTD_CStreamOutSize(void); + + + +/*-***************************************************************************** + * Streaming decompression - HowTo + * + * A ZSTD_DStream object is required to track streaming operations. + * Use ZSTD_initDStream() to initialize a ZSTD_DStream object. + * ZSTD_DStream objects can be re-used multiple times. + * + * Use ZSTD_decompressStream() repetitively to consume your input. + * The function will update both `pos` fields. + * If `input->pos < input->size`, some input has not been consumed. + * It's up to the caller to present again remaining data. + * If `output->pos < output->size`, decoder has flushed everything it could. + * Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise it returns a suggested next input size that will never load more + * than the current frame. + ******************************************************************************/ + +/** + * ZSTD_DStreamWorkspaceBound() - memory needed to initialize a ZSTD_DStream + * @maxWindowSize: The maximum window size allowed for compressed frames. + * + * Return: A lower bound on the size of the workspace that is passed to + * ZSTD_initDStream() and ZSTD_initDStream_usingDDict(). + */ +size_t ZSTD_DStreamWorkspaceBound(size_t maxWindowSize); + +/** + * struct ZSTD_DStream - the zstd streaming decompression context + */ +typedef struct ZSTD_DStream_s ZSTD_DStream; +/*===== ZSTD_DStream management functions =====*/ +/** + * ZSTD_initDStream() - initialize a zstd streaming decompression context + * @maxWindowSize: The maximum window size allowed for compressed frames. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine + * how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +ZSTD_DStream *ZSTD_initDStream(size_t maxWindowSize, void *workspace, + size_t workspaceSize); +/** + * ZSTD_initDStream_usingDDict() - initialize streaming decompression context + * @maxWindowSize: The maximum window size allowed for compressed frames. + * @ddict: The digested dictionary to use for decompression. + * @workspace: The workspace to emplace the context into. It must outlive + * the returned context. + * @workspaceSize: The size of workspace. + * Use ZSTD_DStreamWorkspaceBound(maxWindowSize) to determine + * how large the workspace must be. + * + * Return: The zstd streaming decompression context. + */ +ZSTD_DStream *ZSTD_initDStream_usingDDict(size_t maxWindowSize, + const ZSTD_DDict *ddict, void *workspace, size_t workspaceSize); + +/*===== Streaming decompression functions =====*/ +/** + * ZSTD_resetDStream() - reset the context using parameters from creation + * @zds: The zstd streaming decompression context to reset. + * + * Resets the context using the parameters from creation. Skips dictionary + * loading, since it can be reused. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_resetDStream(ZSTD_DStream *zds); +/** + * ZSTD_decompressStream() - streaming decompress some of input into output + * @zds: The zstd streaming decompression context. + * @output: Destination buffer. `output.pos` is updated to indicate how much + * decompressed data was written. + * @input: Source buffer. `input.pos` is updated to indicate how much data was + * read. Note that it may not consume the entire input, in which case + * `input.pos < input.size`, and it's up to the caller to present + * remaining data again. + * + * The `input` and `output` buffers may be any size. Guaranteed to make some + * forward progress if `input` and `output` are not empty. + * ZSTD_decompressStream() will not consume the last byte of the frame until + * the entire frame is flushed. + * + * Return: Returns 0 iff a frame is completely decoded and fully flushed. + * Otherwise returns a hint for the number of bytes to use as the input + * for the next function call or an error, which can be checked using + * ZSTD_isError(). The size hint will never load more than the frame. + */ +size_t ZSTD_decompressStream(ZSTD_DStream *zds, ZSTD_outBuffer *output, + ZSTD_inBuffer *input); + +/** + * ZSTD_DStreamInSize() - recommended size for the input buffer + * + * Return: The recommended size for the input buffer. + */ +size_t ZSTD_DStreamInSize(void); +/** + * ZSTD_DStreamOutSize() - recommended size for the output buffer + * + * When the output buffer is at least this large, it is guaranteed to be large + * enough to flush at least one complete decompressed block. + * + * Return: The recommended size for the output buffer. + */ +size_t ZSTD_DStreamOutSize(void); + + +/* --- Constants ---*/ +#define ZSTD_MAGICNUMBER 0xFD2FB528 /* >= v0.8.0 */ +#define ZSTD_MAGIC_SKIPPABLE_START 0x184D2A50U + +#define ZSTD_CONTENTSIZE_UNKNOWN (0ULL - 1) +#define ZSTD_CONTENTSIZE_ERROR (0ULL - 2) + +#define ZSTD_WINDOWLOG_MAX_32 27 +#define ZSTD_WINDOWLOG_MAX_64 27 +#define ZSTD_WINDOWLOG_MAX \ + ((unsigned int)(sizeof(size_t) == 4 \ + ? ZSTD_WINDOWLOG_MAX_32 \ + : ZSTD_WINDOWLOG_MAX_64)) +#define ZSTD_WINDOWLOG_MIN 10 +#define ZSTD_HASHLOG_MAX ZSTD_WINDOWLOG_MAX +#define ZSTD_HASHLOG_MIN 6 +#define ZSTD_CHAINLOG_MAX (ZSTD_WINDOWLOG_MAX+1) +#define ZSTD_CHAINLOG_MIN ZSTD_HASHLOG_MIN +#define ZSTD_HASHLOG3_MAX 17 +#define ZSTD_SEARCHLOG_MAX (ZSTD_WINDOWLOG_MAX-1) +#define ZSTD_SEARCHLOG_MIN 1 +/* only for ZSTD_fast, other strategies are limited to 6 */ +#define ZSTD_SEARCHLENGTH_MAX 7 +/* only for ZSTD_btopt, other strategies are limited to 4 */ +#define ZSTD_SEARCHLENGTH_MIN 3 +#define ZSTD_TARGETLENGTH_MIN 4 +#define ZSTD_TARGETLENGTH_MAX 999 + +/* for static allocation */ +#define ZSTD_FRAMEHEADERSIZE_MAX 18 +#define ZSTD_FRAMEHEADERSIZE_MIN 6 +static const size_t ZSTD_frameHeaderSize_prefix = 5; +static const size_t ZSTD_frameHeaderSize_min = ZSTD_FRAMEHEADERSIZE_MIN; +static const size_t ZSTD_frameHeaderSize_max = ZSTD_FRAMEHEADERSIZE_MAX; +/* magic number + skippable frame length */ +static const size_t ZSTD_skippableHeaderSize = 8; + + +/*-************************************* + * Compressed size functions + **************************************/ + +/** + * ZSTD_findFrameCompressedSize() - returns the size of a compressed frame + * @src: Source buffer. It should point to the start of a zstd encoded frame + * or a skippable frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * size of the frame. + * + * Return: The compressed size of the frame pointed to by `src` or an error, + * which can be check with ZSTD_isError(). + * Suitable to pass to ZSTD_decompress() or similar functions. + */ +size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize); + +/*-************************************* + * Decompressed size functions + **************************************/ +/** + * ZSTD_getFrameContentSize() - returns the content size in a zstd frame header + * @src: It should point to the start of a zstd encoded frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. + * + * Return: The frame content size stored in the frame header if known. + * `ZSTD_CONTENTSIZE_UNKNOWN` if the content size isn't stored in the + * frame header. `ZSTD_CONTENTSIZE_ERROR` on invalid input. + */ +unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize); + +/** + * ZSTD_findDecompressedSize() - returns decompressed size of a series of frames + * @src: It should point to the start of a series of zstd encoded and/or + * skippable frames. + * @srcSize: The exact size of the series of frames. + * + * If any zstd encoded frame in the series doesn't have the frame content size + * set, `ZSTD_CONTENTSIZE_UNKNOWN` is returned. But frame content size is always + * set when using ZSTD_compress(). The decompressed size can be very large. + * If the source is untrusted, the decompressed size could be wrong or + * intentionally modified. Always ensure the result fits within the + * application's authorized limits. ZSTD_findDecompressedSize() handles multiple + * frames, and so it must traverse the input to read each frame header. This is + * efficient as most of the data is skipped, however it does mean that all frame + * data must be present and valid. + * + * Return: Decompressed size of all the data contained in the frames if known. + * `ZSTD_CONTENTSIZE_UNKNOWN` if the decompressed size is unknown. + * `ZSTD_CONTENTSIZE_ERROR` if an error occurred. + */ +unsigned long long ZSTD_findDecompressedSize(const void *src, size_t srcSize); + +/*-************************************* + * Advanced compression functions + **************************************/ +/** + * ZSTD_checkCParams() - ensure parameter values remain within authorized range + * @cParams: The zstd compression parameters. + * + * Return: Zero or an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_checkCParams(ZSTD_compressionParameters cParams); + +/** + * ZSTD_adjustCParams() - optimize parameters for a given srcSize and dictSize + * @srcSize: Optionally the estimated source size, or zero if unknown. + * @dictSize: Optionally the estimated dictionary size, or zero if unknown. + * + * Return: The optimized parameters. + */ +ZSTD_compressionParameters ZSTD_adjustCParams( + ZSTD_compressionParameters cParams, unsigned long long srcSize, + size_t dictSize); + +/*--- Advanced decompression functions ---*/ + +/** + * ZSTD_isFrame() - returns true iff the buffer starts with a valid frame + * @buffer: The source buffer to check. + * @size: The size of the source buffer, must be at least 4 bytes. + * + * Return: True iff the buffer starts with a zstd or skippable frame identifier. + */ +unsigned int ZSTD_isFrame(const void *buffer, size_t size); + +/** + * ZSTD_getDictID_fromDict() - returns the dictionary id stored in a dictionary + * @dict: The dictionary buffer. + * @dictSize: The size of the dictionary buffer. + * + * Return: The dictionary id stored within the dictionary or 0 if the + * dictionary is not a zstd dictionary. If it returns 0 the + * dictionary can still be loaded as a content-only dictionary. + */ +unsigned int ZSTD_getDictID_fromDict(const void *dict, size_t dictSize); + +/** + * ZSTD_getDictID_fromDDict() - returns the dictionary id stored in a ZSTD_DDict + * @ddict: The ddict to find the id of. + * + * Return: The dictionary id stored within `ddict` or 0 if the dictionary is not + * a zstd dictionary. If it returns 0 `ddict` will be loaded as a + * content-only dictionary. + */ +unsigned int ZSTD_getDictID_fromDDict(const ZSTD_DDict *ddict); + +/** + * ZSTD_getDictID_fromFrame() - returns the dictionary id stored in a zstd frame + * @src: Source buffer. It must be a zstd encoded frame. + * @srcSize: The size of the source buffer. It must be at least as large as the + * frame header. `ZSTD_frameHeaderSize_max` is always large enough. + * + * Return: The dictionary id required to decompress the frame stored within + * `src` or 0 if the dictionary id could not be decoded. It can return + * 0 if the frame does not require a dictionary, the dictionary id + * wasn't stored in the frame, `src` is not a zstd frame, or `srcSize` + * is too small. + */ +unsigned int ZSTD_getDictID_fromFrame(const void *src, size_t srcSize); + +/** + * struct ZSTD_frameParams - zstd frame parameters stored in the frame header + * @frameContentSize: The frame content size, or 0 if not present. + * @windowSize: The window size, or 0 if the frame is a skippable frame. + * @dictID: The dictionary id, or 0 if not present. + * @checksumFlag: Whether a checksum was used. + */ +typedef struct { + unsigned long long frameContentSize; + unsigned int windowSize; + unsigned int dictID; + unsigned int checksumFlag; +} ZSTD_frameParams; + +/** + * ZSTD_getFrameParams() - extracts parameters from a zstd or skippable frame + * @fparamsPtr: On success the frame parameters are written here. + * @src: The source buffer. It must point to a zstd or skippable frame. + * @srcSize: The size of the source buffer. `ZSTD_frameHeaderSize_max` is + * always large enough to succeed. + * + * Return: 0 on success. If more data is required it returns how many bytes + * must be provided to make forward progress. Otherwise it returns + * an error, which can be checked using ZSTD_isError(). + */ +size_t ZSTD_getFrameParams(ZSTD_frameParams *fparamsPtr, const void *src, + size_t srcSize); + +/*-***************************************************************************** + * Buffer-less and synchronous inner streaming functions + * + * This is an advanced API, giving full control over buffer management, for + * users which need direct control over memory. + * But it's also a complex one, with many restrictions (documented below). + * Prefer using normal streaming API for an easier experience + ******************************************************************************/ + +/*-***************************************************************************** + * Buffer-less streaming compression (synchronous mode) + * + * A ZSTD_CCtx object is required to track streaming operations. + * Use ZSTD_initCCtx() to initialize a context. + * ZSTD_CCtx object can be re-used multiple times within successive compression + * operations. + * + * Start by initializing a context. + * Use ZSTD_compressBegin(), or ZSTD_compressBegin_usingDict() for dictionary + * compression, + * or ZSTD_compressBegin_advanced(), for finer parameter control. + * It's also possible to duplicate a reference context which has already been + * initialized, using ZSTD_copyCCtx() + * + * Then, consume your input using ZSTD_compressContinue(). + * There are some important considerations to keep in mind when using this + * advanced function : + * - ZSTD_compressContinue() has no internal buffer. It uses externally provided + * buffer only. + * - Interface is synchronous : input is consumed entirely and produce 1+ + * (or more) compressed blocks. + * - Caller must ensure there is enough space in `dst` to store compressed data + * under worst case scenario. Worst case evaluation is provided by + * ZSTD_compressBound(). + * ZSTD_compressContinue() doesn't guarantee recover after a failed + * compression. + * - ZSTD_compressContinue() presumes prior input ***is still accessible and + * unmodified*** (up to maximum distance size, see WindowLog). + * It remembers all previous contiguous blocks, plus one separated memory + * segment (which can itself consists of multiple contiguous blocks) + * - ZSTD_compressContinue() detects that prior input has been overwritten when + * `src` buffer overlaps. In which case, it will "discard" the relevant memory + * section from its history. + * + * Finish a frame with ZSTD_compressEnd(), which will write the last block(s) + * and optional checksum. It's possible to use srcSize==0, in which case, it + * will write a final empty block to end the frame. Without last block mark, + * frames will be considered unfinished (corrupted) by decoders. + * + * `ZSTD_CCtx` object can be re-used (ZSTD_compressBegin()) to compress some new + * frame. + ******************************************************************************/ + +/*===== Buffer-less streaming compression functions =====*/ +size_t ZSTD_compressBegin(ZSTD_CCtx *cctx, int compressionLevel); +size_t ZSTD_compressBegin_usingDict(ZSTD_CCtx *cctx, const void *dict, + size_t dictSize, int compressionLevel); +size_t ZSTD_compressBegin_advanced(ZSTD_CCtx *cctx, const void *dict, + size_t dictSize, ZSTD_parameters params, + unsigned long long pledgedSrcSize); +size_t ZSTD_copyCCtx(ZSTD_CCtx *cctx, const ZSTD_CCtx *preparedCCtx, + unsigned long long pledgedSrcSize); +size_t ZSTD_compressBegin_usingCDict(ZSTD_CCtx *cctx, const ZSTD_CDict *cdict, + unsigned long long pledgedSrcSize); +size_t ZSTD_compressContinue(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_compressEnd(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); + + + +/*-***************************************************************************** + * Buffer-less streaming decompression (synchronous mode) + * + * A ZSTD_DCtx object is required to track streaming operations. + * Use ZSTD_initDCtx() to initialize a context. + * A ZSTD_DCtx object can be re-used multiple times. + * + * First typical operation is to retrieve frame parameters, using + * ZSTD_getFrameParams(). It fills a ZSTD_frameParams structure which provide + * important information to correctly decode the frame, such as the minimum + * rolling buffer size to allocate to decompress data (`windowSize`), and the + * dictionary ID used. + * Note: content size is optional, it may not be present. 0 means unknown. + * Note that these values could be wrong, either because of data malformation, + * or because an attacker is spoofing deliberate false information. As a + * consequence, check that values remain within valid application range, + * especially `windowSize`, before allocation. Each application can set its own + * limit, depending on local restrictions. For extended interoperability, it is + * recommended to support at least 8 MB. + * Frame parameters are extracted from the beginning of the compressed frame. + * Data fragment must be large enough to ensure successful decoding, typically + * `ZSTD_frameHeaderSize_max` bytes. + * Result: 0: successful decoding, the `ZSTD_frameParams` structure is filled. + * >0: `srcSize` is too small, provide at least this many bytes. + * errorCode, which can be tested using ZSTD_isError(). + * + * Start decompression, with ZSTD_decompressBegin() or + * ZSTD_decompressBegin_usingDict(). Alternatively, you can copy a prepared + * context, using ZSTD_copyDCtx(). + * + * Then use ZSTD_nextSrcSizeToDecompress() and ZSTD_decompressContinue() + * alternatively. + * ZSTD_nextSrcSizeToDecompress() tells how many bytes to provide as 'srcSize' + * to ZSTD_decompressContinue(). + * ZSTD_decompressContinue() requires this _exact_ amount of bytes, or it will + * fail. + * + * The result of ZSTD_decompressContinue() is the number of bytes regenerated + * within 'dst' (necessarily <= dstCapacity). It can be zero, which is not an + * error; it just means ZSTD_decompressContinue() has decoded some metadata + * item. It can also be an error code, which can be tested with ZSTD_isError(). + * + * ZSTD_decompressContinue() needs previous data blocks during decompression, up + * to `windowSize`. They should preferably be located contiguously, prior to + * current block. Alternatively, a round buffer of sufficient size is also + * possible. Sufficient size is determined by frame parameters. + * ZSTD_decompressContinue() is very sensitive to contiguity, if 2 blocks don't + * follow each other, make sure that either the compressor breaks contiguity at + * the same place, or that previous contiguous segment is large enough to + * properly handle maximum back-reference. + * + * A frame is fully decoded when ZSTD_nextSrcSizeToDecompress() returns zero. + * Context can then be reset to start a new decompression. + * + * Note: it's possible to know if next input to present is a header or a block, + * using ZSTD_nextInputType(). This information is not required to properly + * decode a frame. + * + * == Special case: skippable frames == + * + * Skippable frames allow integration of user-defined data into a flow of + * concatenated frames. Skippable frames will be ignored (skipped) by a + * decompressor. The format of skippable frames is as follows: + * a) Skippable frame ID - 4 Bytes, Little endian format, any value from + * 0x184D2A50 to 0x184D2A5F + * b) Frame Size - 4 Bytes, Little endian format, unsigned 32-bits + * c) Frame Content - any content (User Data) of length equal to Frame Size + * For skippable frames ZSTD_decompressContinue() always returns 0. + * For skippable frames ZSTD_getFrameParams() returns fparamsPtr->windowLog==0 + * what means that a frame is skippable. + * Note: If fparamsPtr->frameContentSize==0, it is ambiguous: the frame might + * actually be a zstd encoded frame with no content. For purposes of + * decompression, it is valid in both cases to skip the frame using + * ZSTD_findFrameCompressedSize() to find its size in bytes. + * It also returns frame size as fparamsPtr->frameContentSize. + ******************************************************************************/ + +/*===== Buffer-less streaming decompression functions =====*/ +size_t ZSTD_decompressBegin(ZSTD_DCtx *dctx); +size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx *dctx, const void *dict, + size_t dictSize); +void ZSTD_copyDCtx(ZSTD_DCtx *dctx, const ZSTD_DCtx *preparedDCtx); +size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx *dctx); +size_t ZSTD_decompressContinue(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +typedef enum { + ZSTDnit_frameHeader, + ZSTDnit_blockHeader, + ZSTDnit_block, + ZSTDnit_lastBlock, + ZSTDnit_checksum, + ZSTDnit_skippableFrame +} ZSTD_nextInputType_e; +ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx *dctx); + +/*-***************************************************************************** + * Block functions + * + * Block functions produce and decode raw zstd blocks, without frame metadata. + * Frame metadata cost is typically ~18 bytes, which can be non-negligible for + * very small blocks (< 100 bytes). User will have to take in charge required + * information to regenerate data, such as compressed and content sizes. + * + * A few rules to respect: + * - Compressing and decompressing require a context structure + * + Use ZSTD_initCCtx() and ZSTD_initDCtx() + * - It is necessary to init context before starting + * + compression : ZSTD_compressBegin() + * + decompression : ZSTD_decompressBegin() + * + variants _usingDict() are also allowed + * + copyCCtx() and copyDCtx() work too + * - Block size is limited, it must be <= ZSTD_getBlockSizeMax() + * + If you need to compress more, cut data into multiple blocks + * + Consider using the regular ZSTD_compress() instead, as frame metadata + * costs become negligible when source size is large. + * - When a block is considered not compressible enough, ZSTD_compressBlock() + * result will be zero. In which case, nothing is produced into `dst`. + * + User must test for such outcome and deal directly with uncompressed data + * + ZSTD_decompressBlock() doesn't accept uncompressed data as input!!! + * + In case of multiple successive blocks, decoder must be informed of + * uncompressed block existence to follow proper history. Use + * ZSTD_insertBlock() in such a case. + ******************************************************************************/ + +/* Define for static allocation */ +#define ZSTD_BLOCKSIZE_ABSOLUTEMAX (128 * 1024) +/*===== Raw zstd block functions =====*/ +size_t ZSTD_getBlockSizeMax(ZSTD_CCtx *cctx); +size_t ZSTD_compressBlock(ZSTD_CCtx *cctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_decompressBlock(ZSTD_DCtx *dctx, void *dst, size_t dstCapacity, + const void *src, size_t srcSize); +size_t ZSTD_insertBlock(ZSTD_DCtx *dctx, const void *blockStart, + size_t blockSize); + +#endif /* ZSTD_H */ -- cgit v1.2.3