summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Stenberg <daniel@haxx.se>2018-08-31 10:17:40 +0200
committerDaniel Stenberg <daniel@haxx.se>2018-09-01 10:40:42 +0200
commit1a890997a47d4d22df58b5183181685e49ed6e61 (patch)
treed6510eb7592ca21eb554c23795c12daacccf1694
parent9dda13bbac1938c13ddf0a9cc4d9dd0302ff0331 (diff)
downloadcurl-1a890997a47d4d22df58b5183181685e49ed6e61.tar.gz
all: s/int/size_t cleanup
Assisted-by: Rikard Falkeborn Closes #2922
-rw-r--r--lib/cookie.c8
-rw-r--r--lib/dict.c4
-rw-r--r--lib/file.c4
-rw-r--r--lib/gopher.c4
-rw-r--r--lib/smb.c4
-rw-r--r--lib/socks.c6
-rw-r--r--lib/ssh-libssh.c8
-rw-r--r--lib/ssh.c30
-rw-r--r--lib/ssh.h4
-rw-r--r--lib/tftp.c6
-rw-r--r--lib/transfer.c25
-rw-r--r--lib/transfer.h3
12 files changed, 53 insertions, 53 deletions
diff --git a/lib/cookie.c b/lib/cookie.c
index 5a8e4fc65..fd7341f0b 100644
--- a/lib/cookie.c
+++ b/lib/cookie.c
@@ -250,9 +250,9 @@ static const char *get_top_domain(const char * const domain, size_t *outlen)
len = strlen(domain);
last = memrchr(domain, '.', len);
if(last) {
- first = memrchr(domain, '.', (size_t) (last - domain));
+ first = memrchr(domain, '.', (last - domain));
if(first)
- len -= (size_t) (++first - domain);
+ len -= (++first - domain);
}
if(outlen)
@@ -717,9 +717,9 @@ Curl_cookie_add(struct Curl_easy *data,
if(!queryp)
endslash = strrchr(path, '/');
else
- endslash = memrchr(path, '/', (size_t)(queryp - path));
+ endslash = memrchr(path, '/', (queryp - path));
if(endslash) {
- size_t pathlen = (size_t)(endslash-path + 1); /* include end slash */
+ size_t pathlen = (endslash-path + 1); /* include end slash */
co->path = malloc(pathlen + 1); /* one extra for the zero byte */
if(co->path) {
memcpy(co->path, path, pathlen);
diff --git a/lib/dict.c b/lib/dict.c
index c26d6d34c..408d57b92 100644
--- a/lib/dict.c
+++ b/lib/dict.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -101,7 +101,7 @@ static char *unescape_word(struct Curl_easy *data, const char *inputbuff)
if(!newp || result)
return NULL;
- dictp = malloc(((size_t)len)*2 + 1); /* add one for terminating zero */
+ dictp = malloc(len*2 + 1); /* add one for terminating zero */
if(dictp) {
char *ptr;
char ch;
diff --git a/lib/file.c b/lib/file.c
index 77fcf2536..e50e98876 100644
--- a/lib/file.c
+++ b/lib/file.c
@@ -306,7 +306,7 @@ static CURLcode file_upload(struct connectdata *conn)
while(!result) {
size_t nread;
size_t nwrite;
- int readcount;
+ size_t readcount;
result = Curl_fillreadbuffer(conn, (int)data->set.buffer_size, &readcount);
if(result)
break;
@@ -314,7 +314,7 @@ static CURLcode file_upload(struct connectdata *conn)
if(readcount <= 0) /* fix questionable compare error. curlvms */
break;
- nread = (size_t)readcount;
+ nread = readcount;
/*skip bytes before resume point*/
if(data->state.resume_from) {
diff --git a/lib/gopher.c b/lib/gopher.c
index d65049969..3ecee9bdc 100644
--- a/lib/gopher.c
+++ b/lib/gopher.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -89,7 +89,7 @@ static CURLcode gopher_do(struct connectdata *conn, bool *done)
/* Create selector. Degenerate cases: / and /1 => convert to "" */
if(strlen(path) <= 2) {
sel = (char *)"";
- len = (int)strlen(sel);
+ len = strlen(sel);
}
else {
char *newp;
diff --git a/lib/smb.c b/lib/smb.c
index 32f0ac889..e4b18fcf5 100644
--- a/lib/smb.c
+++ b/lib/smb.c
@@ -610,8 +610,8 @@ static CURLcode smb_send_and_recv(struct connectdata *conn, void **msg)
/* Check if there is data in the transfer buffer */
if(!smbc->send_size && smbc->upload_size) {
- int nread = smbc->upload_size > UPLOAD_BUFSIZE ? UPLOAD_BUFSIZE :
- (int) smbc->upload_size;
+ size_t nread = smbc->upload_size > UPLOAD_BUFSIZE ? UPLOAD_BUFSIZE :
+ smbc->upload_size;
conn->data->req.upload_fromhere = conn->data->state.ulbuf;
result = Curl_fillreadbuffer(conn, nread, &nread);
if(result && result != CURLE_AGAIN)
diff --git a/lib/socks.c b/lib/socks.c
index 73326e5c2..81f3eda28 100644
--- a/lib/socks.c
+++ b/lib/socks.c
@@ -5,7 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -222,8 +222,8 @@ CURLcode Curl_SOCKS4(const char *proxy_user,
ssize_t actualread;
ssize_t written;
ssize_t hostnamelen = 0;
- int packetsize = 9 +
- (int)strlen((char *)socksreq + 8); /* size including NUL */
+ ssize_t packetsize = 9 +
+ strlen((char *)socksreq + 8); /* size including NUL */
/* If SOCKS4a, set special invalid IP address 0.0.0.x */
if(protocol4a) {
diff --git a/lib/ssh-libssh.c b/lib/ssh-libssh.c
index 035f39fa7..002e1d96f 100644
--- a/lib/ssh-libssh.c
+++ b/lib/ssh-libssh.c
@@ -1290,7 +1290,7 @@ static CURLcode myssh_statemach_act(struct connectdata *conn, bool *block)
if(sshc->readdir_attrs) {
sshc->readdir_filename = sshc->readdir_attrs->name;
sshc->readdir_longentry = sshc->readdir_attrs->longname;
- sshc->readdir_len = (int)strlen(sshc->readdir_filename);
+ sshc->readdir_len = strlen(sshc->readdir_filename);
if(data->set.ftp_list_only) {
char *tmpLine;
@@ -1321,7 +1321,7 @@ static CURLcode myssh_statemach_act(struct connectdata *conn, bool *block)
}
}
else {
- sshc->readdir_currLen = (int)strlen(sshc->readdir_longentry);
+ sshc->readdir_currLen = strlen(sshc->readdir_longentry);
sshc->readdir_totalLen = 80 + sshc->readdir_currLen;
sshc->readdir_line = calloc(sshc->readdir_totalLen, 1);
if(!sshc->readdir_line) {
@@ -1382,12 +1382,12 @@ static CURLcode myssh_statemach_act(struct connectdata *conn, bool *block)
if(sshc->readdir_filename == NULL)
sshc->readdir_len = 0;
else
- sshc->readdir_len = (int)strlen(sshc->readdir_tmp);
+ sshc->readdir_len = strlen(sshc->readdir_tmp);
sshc->readdir_longentry = NULL;
sshc->readdir_filename = sshc->readdir_tmp;
}
else {
- sshc->readdir_len = (int)strlen(sshc->readdir_link_attrs->name);
+ sshc->readdir_len = strlen(sshc->readdir_link_attrs->name);
sshc->readdir_filename = sshc->readdir_link_attrs->name;
sshc->readdir_longentry = sshc->readdir_link_attrs->longname;
}
diff --git a/lib/ssh.c b/lib/ssh.c
index 5d81f649c..0aaba9a6a 100644
--- a/lib/ssh.c
+++ b/lib/ssh.c
@@ -1933,17 +1933,17 @@ static CURLcode ssh_statemach_act(struct connectdata *conn, bool *block)
break;
case SSH_SFTP_READDIR:
- sshc->readdir_len = libssh2_sftp_readdir_ex(sshc->sftp_handle,
- sshc->readdir_filename,
- PATH_MAX,
- sshc->readdir_longentry,
- PATH_MAX,
- &sshc->readdir_attrs);
- if(sshc->readdir_len == LIBSSH2_ERROR_EAGAIN) {
- rc = LIBSSH2_ERROR_EAGAIN;
+ rc = libssh2_sftp_readdir_ex(sshc->sftp_handle,
+ sshc->readdir_filename,
+ PATH_MAX,
+ sshc->readdir_longentry,
+ PATH_MAX,
+ &sshc->readdir_attrs);
+ if(rc == LIBSSH2_ERROR_EAGAIN) {
break;
}
- if(sshc->readdir_len > 0) {
+ if(rc > 0) {
+ sshc->readdir_len = (size_t) rc;
sshc->readdir_filename[sshc->readdir_len] = '\0';
if(data->set.ftp_list_only) {
@@ -1974,7 +1974,7 @@ static CURLcode ssh_statemach_act(struct connectdata *conn, bool *block)
}
}
else {
- sshc->readdir_currLen = (int)strlen(sshc->readdir_longentry);
+ sshc->readdir_currLen = strlen(sshc->readdir_longentry);
sshc->readdir_totalLen = 80 + sshc->readdir_currLen;
sshc->readdir_line = calloc(sshc->readdir_totalLen, 1);
if(!sshc->readdir_line) {
@@ -2008,13 +2008,13 @@ static CURLcode ssh_statemach_act(struct connectdata *conn, bool *block)
break;
}
}
- else if(sshc->readdir_len == 0) {
+ else if(rc == 0) {
Curl_safefree(sshc->readdir_filename);
Curl_safefree(sshc->readdir_longentry);
state(conn, SSH_SFTP_READDIR_DONE);
break;
}
- else if(sshc->readdir_len <= 0) {
+ else if(rc < 0) {
err = sftp_libssh2_last_error(sshc->sftp_session);
result = sftp_libssh2_error_to_CURLE(err);
sshc->actualcode = result?result:CURLE_SSH;
@@ -2029,16 +2029,16 @@ static CURLcode ssh_statemach_act(struct connectdata *conn, bool *block)
break;
case SSH_SFTP_READDIR_LINK:
- sshc->readdir_len =
+ rc =
libssh2_sftp_symlink_ex(sshc->sftp_session,
sshc->readdir_linkPath,
curlx_uztoui(strlen(sshc->readdir_linkPath)),
sshc->readdir_filename,
PATH_MAX, LIBSSH2_SFTP_READLINK);
- if(sshc->readdir_len == LIBSSH2_ERROR_EAGAIN) {
- rc = LIBSSH2_ERROR_EAGAIN;
+ if(rc == LIBSSH2_ERROR_EAGAIN) {
break;
}
+ sshc->readdir_len = (size_t) rc;
Curl_safefree(sshc->readdir_linkPath);
/* get room for the filename and extra output */
diff --git a/lib/ssh.h b/lib/ssh.h
index 1c1355077..0620aac32 100644
--- a/lib/ssh.h
+++ b/lib/ssh.h
@@ -7,7 +7,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2015, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2018, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
@@ -131,7 +131,7 @@ struct ssh_conn {
quote command fails) */
char *homedir; /* when doing SFTP we figure out home dir in the
connect phase */
- int readdir_len, readdir_totalLen, readdir_currLen;
+ size_t readdir_len, readdir_totalLen, readdir_currLen;
char *readdir_line;
char *readdir_linkPath;
/* end of READDIR stuff */
diff --git a/lib/tftp.c b/lib/tftp.c
index 61a3fef06..e5bc80b02 100644
--- a/lib/tftp.c
+++ b/lib/tftp.c
@@ -712,7 +712,7 @@ static CURLcode tftp_tx(tftp_state_data_t *state, tftp_event_t event)
ssize_t sbytes;
CURLcode result = CURLE_OK;
struct SingleRequest *k = &data->req;
- int cb; /* Bytes currently read */
+ size_t cb; /* Bytes currently read */
switch(event) {
@@ -765,7 +765,7 @@ static CURLcode tftp_tx(tftp_state_data_t *state, tftp_event_t event)
state->retries = 0;
setpacketevent(&state->spacket, TFTP_EVENT_DATA);
setpacketblock(&state->spacket, state->block);
- if(state->block > 1 && state->sbytes < (int)state->blksize) {
+ if(state->block > 1 && state->sbytes < state->blksize) {
state->state = TFTP_STATE_FIN;
return CURLE_OK;
}
@@ -781,7 +781,7 @@ static CURLcode tftp_tx(tftp_state_data_t *state, tftp_event_t event)
&cb);
if(result)
return result;
- state->sbytes += cb;
+ state->sbytes += (int)cb;
state->conn->data->req.upload_fromhere += cb;
} while(state->sbytes < state->blksize && cb != 0);
diff --git a/lib/transfer.c b/lib/transfer.c
index 298208703..7159d5c82 100644
--- a/lib/transfer.c
+++ b/lib/transfer.c
@@ -120,11 +120,12 @@ CURLcode Curl_get_upload_buffer(struct Curl_easy *data)
* This function will call the read callback to fill our buffer with data
* to upload.
*/
-CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
+CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
+ size_t *nreadp)
{
struct Curl_easy *data = conn->data;
- size_t buffersize = (size_t)bytes;
- int nread;
+ size_t buffersize = bytes;
+ size_t nread;
#ifdef CURL_DOES_CONVERSIONS
bool sending_http_headers = FALSE;
@@ -144,11 +145,9 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
}
- /* this function returns a size_t, so we typecast to int to prevent warnings
- with picky compilers */
Curl_set_in_callback(data, true);
- nread = (int)data->state.fread_func(data->req.upload_fromhere, 1,
- buffersize, data->state.in);
+ nread = data->state.fread_func(data->req.upload_fromhere, 1,
+ buffersize, data->state.in);
Curl_set_in_callback(data, false);
if(nread == CURL_READFUNC_ABORT) {
@@ -177,7 +176,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
return CURLE_OK; /* nothing was read */
}
- else if((size_t)nread > buffersize) {
+ else if(nread > buffersize) {
/* the read function returned a too large value */
*nreadp = 0;
failf(data, "read function returned funny value");
@@ -236,13 +235,13 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
#ifdef CURL_DOES_CONVERSIONS
{
CURLcode result;
- int length;
+ size_t length;
if(data->set.prefer_ascii)
/* translate the protocol and data */
length = nread;
else
/* just translate the protocol portion */
- length = (int)strlen(hexbuffer);
+ length = strlen(hexbuffer);
result = Curl_convert_to_network(data, data->req.upload_fromhere,
length);
/* Curl_convert_to_network calls failf if unsuccessful */
@@ -257,7 +256,7 @@ CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
infof(data, "Signaling end of chunked upload via terminating chunk.\n");
}
- nread += (int)strlen(endofline_native); /* for the added end of line */
+ nread += strlen(endofline_native); /* for the added end of line */
}
#ifdef CURL_DOES_CONVERSIONS
else if((data->set.prefer_ascii) && (!sending_http_headers)) {
@@ -933,7 +932,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data,
if(!k->upload_done) {
/* HTTP pollution, this should be written nicer to become more
protocol agnostic. */
- int fillcount;
+ size_t fillcount;
struct HTTP *http = k->protop;
if((k->exp100 == EXP100_SENDING_REQUEST) &&
@@ -964,7 +963,7 @@ static CURLcode readwrite_upload(struct Curl_easy *data,
if(result)
return result;
- nread = (ssize_t)fillcount;
+ nread = fillcount;
}
else
nread = 0; /* we're done uploading/reading */
diff --git a/lib/transfer.h b/lib/transfer.h
index df75f9a97..9263e5b69 100644
--- a/lib/transfer.h
+++ b/lib/transfer.h
@@ -51,7 +51,8 @@ int Curl_single_getsock(const struct connectdata *conn,
curl_socket_t *socks,
int numsocks);
CURLcode Curl_readrewind(struct connectdata *conn);
-CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp);
+CURLcode Curl_fillreadbuffer(struct connectdata *conn, size_t bytes,
+ size_t *nreadp);
CURLcode Curl_retry_request(struct connectdata *conn, char **url);
bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc);
CURLcode Curl_get_upload_buffer(struct Curl_easy *data);