summaryrefslogtreecommitdiff
path: root/handy.h
diff options
context:
space:
mode:
authorKarl Williamson <khw@cpan.org>2019-04-20 00:17:42 -0600
committerKarl Williamson <khw@cpan.org>2019-04-20 00:33:19 -0600
commit94250c4fc8bd70e90985c18c6eeb4257b56420e9 (patch)
treef09ee39c25fa9f26a9624a4d179885f2d75822a4 /handy.h
parentb9893ba58d9656b9b5f2a5a6998b77dc16542ef7 (diff)
downloadperl-94250c4fc8bd70e90985c18c6eeb4257b56420e9.tar.gz
handy.h: Make inRANGE more robust
It turns out that in order to make this work or assert if it won't, it needs to be more complicated. The problem has to do with signed and unsigned operands. It now special cases where the item being checked to be in the range is a char, and casts that to a U8. Otherwise, there is a problem when that is a negative signed type and the range is above the int_max for that type. An assert is added to detect this rare event (not rare for chars, hence the special case). The use of sizeof() for this case means that it will be resolved at compile time and not generate extra code. I did an experiment, and gcc even under -O0 compiled away the sizeof() clause. As an example of why this is needed, suppose we have a signed char, -127, and we want to see if it is in the range 128-130, expressed as ints. Without casting that -127 to an unsigned char, it would subtract 128 from -127, yield -255, which when cast to an unsigned int would be wrong. But casting the -127 to U8 first yields 129, and gives the correct result. The same issue could happen wih ints if the range is above INT_MAX, but that is a much rarer case, and this macro asserts against it.
Diffstat (limited to 'handy.h')
-rw-r--r--handy.h26
1 files changed, 21 insertions, 5 deletions
diff --git a/handy.h b/handy.h
index edc01075d5..51f79efcf3 100644
--- a/handy.h
+++ b/handy.h
@@ -1094,11 +1094,27 @@ patched there. The file as of this writing is cpan/Devel-PPPort/parts/inc/misc
#define FITS_IN_8_BITS(c) (1)
#endif
-/* Returns true if c is in the range l..u
- * Written with the cast so it only needs one conditional test
- */
-#define inRANGE(c, l, u) (__ASSERT_((u) >= (l)) \
- ((WIDEST_UTYPE) (((c) - (l)) | 0) <= ((WIDEST_UTYPE) ((u) - (l)))))
+/* Returns true if c is in the range l..u, where 'l' is non-negative
+ * Written this way so that after optimization, only one conditional test is
+ * needed.
+ *
+ * This isn't fully general, except for the special cased 'signed char' (which
+ * should be resolved at compile time): It won't work if 'c' is negative, and
+ * 'l' is larger than the max for that signed type. Thus if 'c' is a negative
+ * int, and 'l' is larger than INT_MAX, it will fail. To protect agains this
+ * happening, there is an assert that will generate a warning if c is larger
+ * than e.g. INT_MAX if it is an 'unsigned int'. This could be a false
+ * positive, but khw couldn't figure out a way to make it better. It's good
+ * enough so far */
+#define inRANGE(c, l, u) (__ASSERT_((l) >= 0) __ASSERT_((u) >= (l)) \
+ ((sizeof(c) == 1) \
+ ? (((WIDEST_UTYPE) ((((U8) (c))|0) - (l))) <= ((WIDEST_UTYPE) ((u) - (l)))) \
+ : (__ASSERT_( (((WIDEST_UTYPE) 1) << (CHARBITS * sizeof(c) - 1) & (c)) \
+ /* sign bit of c is 0 */ == 0 \
+ || (((~ ((WIDEST_UTYPE) 1) << ((CHARBITS * sizeof(c) - 1) - 1))\
+ /* l not larger than largest value in c's signed type */ \
+ & ~ ((WIDEST_UTYPE) 0)) & (l)) == 0) \
+ ((WIDEST_UTYPE) (((c) - (l)) | 0) <= ((WIDEST_UTYPE) ((u) - (l)))))))
#ifdef EBCDIC
# ifndef _ALL_SOURCE