+/* DANGER!!!! here be dragons!
+ *
+ * Leave these fn's as byte accesses because it is safe
+ * across architectures. Clever usage of 32 bit access
+ * will create problems on some hosts.
+ *
+ * Note that the "buf" pointer in memory is probably unaligned.
+ *
+ * Were these functions to be re-written to take a 32 bit wide or 16 bit wide
+ * memory access shortcut, then on some CPU's, i.e. ARM7, the 2 lsbytes of the address are
+ * ignored for 32 bit access, whereas on other CPU's a 32 bit wide unaligned memory access
+ * will cause an exception, and lastly on x86, an unaligned "greater than bytewide"
+ * memory access works as if aligned. So what follows below will work for all
+ * platforms and gives the compiler leeway to do its own platform specific optimizations.
+ *
+ * Again, note that the "buf" pointer in memory is probably unaligned.
+ */
+
+
+static inline uint32_t le_to_h_u32(const uint8_t* buf)
+{
+ return (uint32_t)(buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24);
+}
+
+static inline uint16_t le_to_h_u16(const uint8_t* buf)
+{
+ return (uint16_t)(buf[0] | buf[1] << 8);
+}
+
+static inline uint32_t be_to_h_u32(const uint8_t* buf)
+{
+ return (uint32_t)(buf[3] | buf[2] << 8 | buf[1] << 16 | buf[0] << 24);
+}
+
+static inline uint16_t be_to_h_u16(const uint8_t* buf)
+{
+ return (uint16_t)(buf[1] | buf[0] << 8);
+}
+
+static inline void h_u32_to_le(uint8_t* buf, int val)
+{
+ buf[3] = (uint8_t) (val >> 24);
+ buf[2] = (uint8_t) (val >> 16);
+ buf[1] = (uint8_t) (val >> 8);
+ buf[0] = (uint8_t) (val >> 0);
+}
+
+static inline void h_u32_to_be(uint8_t* buf, int val)
+{
+ buf[0] = (uint8_t) (val >> 24);
+ buf[1] = (uint8_t) (val >> 16);
+ buf[2] = (uint8_t) (val >> 8);
+ buf[3] = (uint8_t) (val >> 0);
+}
+
+static inline void h_u16_to_le(uint8_t* buf, int val)
+{
+ buf[1] = (uint8_t) (val >> 8);
+ buf[0] = (uint8_t) (val >> 0);
+}
+
+static inline void h_u16_to_be(uint8_t* buf, int val)
+{
+ buf[0] = (uint8_t) (val >> 8);
+ buf[1] = (uint8_t) (val >> 0);
+}