summaryrefslogtreecommitdiff
path: root/payloads/libpayload/libc
diff options
context:
space:
mode:
authorJeremy Compostella <jeremy.compostella@intel.com>2017-07-11 14:08:37 -0700
committerJulius Werner <jwerner@chromium.org>2017-07-13 18:16:15 +0000
commit5b8987ae46512b5550d15fafd54b270ec913422d (patch)
tree4f65b2ff44bbbd736ba976d75390679b5a4acdd8 /payloads/libpayload/libc
parent4dc9fb026cf1834d5b99baee8f265ad6cebf8fc6 (diff)
downloadcoreboot-5b8987ae46512b5550d15fafd54b270ec913422d.tar.xz
libpayload: Support unaligned pointer for memcpy, memmove and memcmp
The memcpy(), memmove() and memcmp() functions use word by word operations regardless of the pointer alignment. Depending on the platform, this could lead to a crash. This patch makes the memcpy(), memmove() or memcmp() operate byte per byte if they are supplied with unaligned pointers. Change-Id: I0b668739b7b58d47266f10f2dff2dc9cbf38577e Signed-off-by: Jeremy Compostella <jeremy.compostella@intel.com> Reviewed-on: https://review.coreboot.org/20535 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Nico Huber <nico.h@gmx.de> Reviewed-by: Julius Werner <jwerner@chromium.org>
Diffstat (limited to 'payloads/libpayload/libc')
-rw-r--r--payloads/libpayload/libc/memory.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/payloads/libpayload/libc/memory.c b/payloads/libpayload/libc/memory.c
index 1adfb3207f..2c44764edb 100644
--- a/payloads/libpayload/libc/memory.c
+++ b/payloads/libpayload/libc/memory.c
@@ -66,13 +66,17 @@ static void *default_memcpy(void *dst, const void *src, size_t n)
size_t i;
void *ret = dst;
- for(i = 0; i < n / sizeof(unsigned long); i++)
- ((unsigned long *)dst)[i] = ((unsigned long *)src)[i];
+ if (IS_ALIGNED((uintptr_t)dst, sizeof(unsigned long)) &&
+ IS_ALIGNED((uintptr_t)src, sizeof(unsigned long))) {
+ for (i = 0; i < n / sizeof(unsigned long); i++)
+ ((unsigned long *)dst)[i] = ((unsigned long *)src)[i];
- src += i * sizeof(unsigned long);
- dst += i * sizeof(unsigned long);
+ src += i * sizeof(unsigned long);
+ dst += i * sizeof(unsigned long);
+ n -= i * sizeof(unsigned long);
+ }
- for(i = 0; i < n % sizeof(unsigned long); i++)
+ for (i = 0; i < n; i++)
((u8 *)dst)[i] = ((u8 *)src)[i];
return ret;
@@ -89,6 +93,13 @@ static void *default_memmove(void *dst, const void *src, size_t n)
if (src > dst)
return memcpy(dst, src, n);
+ if (!IS_ALIGNED((uintptr_t)dst, sizeof(unsigned long)) ||
+ !IS_ALIGNED((uintptr_t)src, sizeof(unsigned long))) {
+ for (i = n - 1; i >= 0; i--)
+ ((u8 *)dst)[i] = ((u8 *)src)[i];
+ return dst;
+ }
+
offs = n - (n % sizeof(unsigned long));
for (i = (n % sizeof(unsigned long)) - 1; i >= 0; i--)
@@ -116,11 +127,14 @@ void *memmove(void *dst, const void *src, size_t n)
static int default_memcmp(const void *s1, const void *s2, size_t n)
{
- size_t i;
-
- for (i = 0; i < n / sizeof(unsigned long); i++)
- if (((unsigned long *)s1)[i] != ((unsigned long *)s2)[i])
- break; /* fall through to find differing byte */
+ size_t i = 0;
+ const unsigned long *w1 = s1, *w2 = s2;
+
+ if (IS_ALIGNED((uintptr_t)s1, sizeof(unsigned long)) &&
+ IS_ALIGNED((uintptr_t)s2, sizeof(unsigned long)))
+ for (; i < n / sizeof(unsigned long); i++)
+ if (w1[i] != w2[i])
+ break; /* fall through to find differing byte */
for (i *= sizeof(unsigned long); i < n; i++)
if (((u8 *)s1)[i] != ((u8 *)s2)[i])