summaryrefslogtreecommitdiff
path: root/include/asm-generic/unaligned.h
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2021-05-08 00:07:57 +0200
committerArnd Bergmann <arnd@arndb.de>2021-05-17 13:30:29 +0200
commit803f4e1eab7a8938ba3a3c30dd4eb5e9eeef5e63 (patch)
tree4a7096939a4a914eedad1fba212cee0e88ba3ec5 /include/asm-generic/unaligned.h
parentd40d8179482c330df5b9049797fe94c2e8eb4f6e (diff)
downloadlinux-803f4e1eab7a8938ba3a3c30dd4eb5e9eeef5e63.tar.gz
asm-generic: simplify asm/unaligned.h
The get_unaligned()/put_unaligned() implementations are much more complex than necessary, now that all architectures use the same code. Move everything into one file and use a much more compact way to express the same logic. I've compared the binary output using gcc-11 across defconfig builds for all architectures and found this patch to make no difference, except for a single function on powerpc that needs two additional register moves because of random differences in register allocation. There are a handful of callers of the low-level __get_unaligned_cpu32, so leave that in place for the time being even though the common code no longer uses it. This adds a warning for any caller of get_unaligned()/put_unaligned() that passes in a single-byte pointer, but I've sent patches for all instances that show up in x86 and randconfig builds. It would be nice to change the arguments of the endian-specific accessors to take the matching __be16/__be32/__be64/__le16/__le32/__le64 arguments instead of a void pointer, but that requires more changes to the rest of the kernel. This new version does allow aggregate types into get_unaligned(), which was not the original goal but might come in handy. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'include/asm-generic/unaligned.h')
-rw-r--r--include/asm-generic/unaligned.h130
1 files changed, 117 insertions, 13 deletions
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
index 36bf03aaa674..1c4242416c9f 100644
--- a/include/asm-generic/unaligned.h
+++ b/include/asm-generic/unaligned.h
@@ -6,20 +6,124 @@
* This is the most generic implementation of unaligned accesses
* and should work almost anywhere.
*/
+#include <linux/unaligned/packed_struct.h>
#include <asm/byteorder.h>
-#if defined(__LITTLE_ENDIAN)
-# include <linux/unaligned/le_struct.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_le
-# define put_unaligned __put_unaligned_le
-#elif defined(__BIG_ENDIAN)
-# include <linux/unaligned/be_struct.h>
-# include <linux/unaligned/generic.h>
-# define get_unaligned __get_unaligned_be
-# define put_unaligned __put_unaligned_be
-#else
-# error need to define endianess
-#endif
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
+#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+ return le16_to_cpu(__get_unaligned_t(__le16, p));
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+ return le32_to_cpu(__get_unaligned_t(__le32, p));
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+ return le64_to_cpu(__get_unaligned_t(__le64, p));
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+ __put_unaligned_t(__le16, cpu_to_le16(val), p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+ __put_unaligned_t(__le32, cpu_to_le32(val), p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+ __put_unaligned_t(__le64, cpu_to_le64(val), p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+ return be16_to_cpu(__get_unaligned_t(__be16, p));
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+ return be32_to_cpu(__get_unaligned_t(__be32, p));
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+ return be64_to_cpu(__get_unaligned_t(__be64, p));
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+ __put_unaligned_t(__be16, cpu_to_be16(val), p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ __put_unaligned_t(__be32, cpu_to_be32(val), p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+ __put_unaligned_t(__be64, cpu_to_be64(val), p);
+}
+
+static inline u32 __get_unaligned_be24(const u8 *p)
+{
+ return p[0] << 16 | p[1] << 8 | p[2];
+}
+
+static inline u32 get_unaligned_be24(const void *p)
+{
+ return __get_unaligned_be24(p);
+}
+
+static inline u32 __get_unaligned_le24(const u8 *p)
+{
+ return p[0] | p[1] << 8 | p[2] << 16;
+}
+
+static inline u32 get_unaligned_le24(const void *p)
+{
+ return __get_unaligned_le24(p);
+}
+
+static inline void __put_unaligned_be24(const u32 val, u8 *p)
+{
+ *p++ = val >> 16;
+ *p++ = val >> 8;
+ *p++ = val;
+}
+
+static inline void put_unaligned_be24(const u32 val, void *p)
+{
+ __put_unaligned_be24(val, p);
+}
+
+static inline void __put_unaligned_le24(const u32 val, u8 *p)
+{
+ *p++ = val;
+ *p++ = val >> 8;
+ *p++ = val >> 16;
+}
+
+static inline void put_unaligned_le24(const u32 val, void *p)
+{
+ __put_unaligned_le24(val, p);
+}
#endif /* __ASM_GENERIC_UNALIGNED_H */