summaryrefslogtreecommitdiff
path: root/nss/nss_files
diff options
context:
space:
mode:
authorSiddhesh Poyarekar <siddhesh@redhat.com>2013-10-30 16:13:37 +0530
committerSiddhesh Poyarekar <siddhesh@redhat.com>2013-10-30 16:19:40 +0530
commit977f4b31b7ca4a4e498c397f3fd70510694bbd86 (patch)
tree4f53a0fdb7ea94f487d26f9df0b658e0b14ff64b /nss/nss_files
parent66925c47793852d1a8423cd25ab78d7dabdf5924 (diff)
downloadglibc-977f4b31b7ca4a4e498c397f3fd70510694bbd86.tar.gz
Fix reads for sizes larger than INT_MAX in AF_INET lookup
Currently for AF_INET lookups from the hosts file, buffer sizes larger than INT_MAX silently overflow and may result in access beyond bounds of a buffer. This happens when the number of results in an AF_INET lookup in /etc/hosts are very large. There are two aspects to the problem. One problem is that the size computed from the buffer size is stored into an int, which results in overflow for large sizes. Additionally, even if this size was expanded, the function used to read content into the buffer (fgets) accepts only int sizes. As a result, the fix is to have a function wrap around fgets that calls it multiple times with int sizes if necessary.
Diffstat (limited to 'nss/nss_files')
-rw-r--r--nss/nss_files/files-XXX.c59
1 files changed, 51 insertions, 8 deletions
diff --git a/nss/nss_files/files-XXX.c b/nss/nss_files/files-XXX.c
index 082d1ea2b7..b62208c324 100644
--- a/nss/nss_files/files-XXX.c
+++ b/nss/nss_files/files-XXX.c
@@ -179,8 +179,51 @@ CONCAT(_nss_files_end,ENTNAME) (void)
return NSS_STATUS_SUCCESS;
}
-/* Parsing the database file into `struct STRUCTURE' data structures. */
+typedef enum
+{
+ gcr_ok = 0,
+ gcr_error = -1,
+ gcr_overflow = -2
+} get_contents_ret;
+
+/* Hack around the fact that fgets only accepts int sizes. */
+static get_contents_ret
+get_contents (char *linebuf, size_t len, FILE *stream)
+{
+ size_t remaining_len = len;
+ char *curbuf = linebuf;
+
+ do
+ {
+ int curlen = ((remaining_len > (size_t) INT_MAX) ? INT_MAX
+ : remaining_len);
+ char *p = fgets_unlocked (curbuf, curlen, stream);
+
+ ((unsigned char *) curbuf)[curlen - 1] = 0xff;
+
+ /* EOF or read error. */
+ if (p == NULL)
+ return gcr_error;
+
+ /* Done reading in the line. */
+ if (((unsigned char *) curbuf)[curlen - 1] == 0xff)
+ return gcr_ok;
+
+ /* Drop the terminating '\0'. */
+ remaining_len -= curlen - 1;
+ curbuf += curlen - 1;
+ }
+ /* fgets copies one less than the input length. Our last iteration is of
+ REMAINING_LEN and once that is done, REMAINING_LEN is decremented by
+ REMAINING_LEN - 1, leaving the result as 1. */
+ while (remaining_len > 1);
+
+ /* This means that the current buffer was not large enough. */
+ return gcr_overflow;
+}
+
+/* Parsing the database file into `struct STRUCTURE' data structures. */
static enum nss_status
internal_getent (struct STRUCTURE *result,
char *buffer, size_t buflen, int *errnop H_ERRNO_PROTO
@@ -188,7 +231,7 @@ internal_getent (struct STRUCTURE *result,
{
char *p;
struct parser_data *data = (void *) buffer;
- int linebuflen = buffer + buflen - data->linebuffer;
+ size_t linebuflen = buffer + buflen - data->linebuffer;
int parse_result;
if (buflen < sizeof *data + 2)
@@ -200,17 +243,16 @@ internal_getent (struct STRUCTURE *result,
do
{
- /* Terminate the line so that we can test for overflow. */
- ((unsigned char *) data->linebuffer)[linebuflen - 1] = '\xff';
+ get_contents_ret r = get_contents (data->linebuffer, linebuflen, stream);
- p = fgets_unlocked (data->linebuffer, linebuflen, stream);
- if (p == NULL)
+ if (r == gcr_error)
{
/* End of file or read error. */
H_ERRNO_SET (HOST_NOT_FOUND);
return NSS_STATUS_NOTFOUND;
}
- else if (((unsigned char *) data->linebuffer)[linebuflen - 1] != 0xff)
+
+ if (r == gcr_overflow)
{
/* The line is too long. Give the user the opportunity to
enlarge the buffer. */
@@ -219,7 +261,8 @@ internal_getent (struct STRUCTURE *result,
return NSS_STATUS_TRYAGAIN;
}
- /* Skip leading blanks. */
+ /* Everything OK. Now skip leading blanks. */
+ p = data->linebuffer;
while (isspace (*p))
++p;
}