Newer
Older
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
***************************************************************************/
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>
#include "strequal.h"
#include <time.h>
#include <io.h>
#else
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#ifdef HAVE_SYS_TIME_H
#include <sys/time.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_NETDB_H
#endif
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif
#include <signal.h>
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifndef HAVE_SOCKET
#error "We can't compile without socket() support!"
#endif
#endif /* WIN32 */
#include "urldata.h"
#include <curl/curl.h>
#include "netrc.h"
#include "transfer.h"
#include "sendf.h"
#include "speedcheck.h"
#include "progress.h"
#include "url.h"
#include "getinfo.h"
Daniel Stenberg
committed
#include "sslgen.h"
#include "http_digest.h"
#include "http_ntlm.h"
#include "http_negotiate.h"
#include "memory.h"
Daniel Stenberg
committed
#include "multiif.h"
#include "easyif.h" /* for Curl_convert_to_network prototype */
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
/* The last #include file should be: */
#include "memdebug.h"
#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
Daniel Stenberg
committed
/*
* This function will call the read callback to fill our buffer with data
* to upload.
*/
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
Daniel Stenberg
committed
{
struct SessionHandle *data = conn->data;
size_t buffersize = (size_t)bytes;
Daniel Stenberg
committed
int nread;
Daniel Stenberg
committed
if(data->req.upload_chunky) {
Daniel Stenberg
committed
/* if chunked Transfer-Encoding */
buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
Daniel Stenberg
committed
data->req.upload_fromhere += 10; /* 32bit hex + CRLF */
Daniel Stenberg
committed
}
/* this function returns a size_t, so we typecast to int to prevent warnings
with picky compilers */
Daniel Stenberg
committed
nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
Daniel Stenberg
committed
buffersize, conn->fread_in);
if(nread == CURL_READFUNC_ABORT) {
Daniel Stenberg
committed
failf(data, "operation aborted by callback");
return CURLE_ABORTED_BY_CALLBACK;
}
Daniel Stenberg
committed
else if(nread == CURL_READFUNC_PAUSE) {
struct SingleRequest *k = &data->req;
k->keepon |= KEEP_READ_PAUSE; /* mark reading as paused */
Daniel Stenberg
committed
}
else if((size_t)nread > buffersize)
/* the read function returned a too large value */
return CURLE_READ_ERROR;
Daniel Stenberg
committed
if(!data->req.forbidchunk && data->req.upload_chunky) {
Daniel Stenberg
committed
/* if chunked Transfer-Encoding */
char hexbuffer[11];
int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
"%x\r\n", nread);
/* move buffer pointer */
Daniel Stenberg
committed
data->req.upload_fromhere -= hexlen;
Daniel Stenberg
committed
nread += hexlen;
/* copy the prefix to the buffer */
Daniel Stenberg
committed
memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
/* always append CRLF to the data */
Daniel Stenberg
committed
memcpy(data->req.upload_fromhere + nread, "\r\n", 2);
if((nread - hexlen) == 0) {
Daniel Stenberg
committed
/* mark this as done once this chunk is transfered */
Daniel Stenberg
committed
data->req.upload_done = TRUE;
Daniel Stenberg
committed
}
nread+=2; /* for the added CRLF */
Daniel Stenberg
committed
}
*nreadp = nread;
#ifdef CURL_DOES_CONVERSIONS
if(data->set.prefer_ascii) {
CURLcode res;
Daniel Stenberg
committed
res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
/* Curl_convert_to_network calls failf if unsuccessful */
if(res != CURLE_OK) {
return(res);
}
}
#endif /* CURL_DOES_CONVERSIONS */
return CURLE_OK;
Daniel Stenberg
committed
}
/*
* checkhttpprefix()
*
* Returns TRUE if member of the list matches prefix of string
*/
static bool
checkhttpprefix(struct SessionHandle *data,
const char *s)
{
struct curl_slist *head = data->set.http200aliases;
bool rc = FALSE;
#ifdef CURL_DOES_CONVERSIONS
/* convert from the network encoding using a scratch area */
char *scratch = calloc(1, strlen(s)+1);
Daniel Stenberg
committed
if(NULL == scratch) {
failf (data, "Failed to calloc memory for conversion!");
return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
}
strcpy(scratch, s);
Daniel Stenberg
committed
if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
/* Curl_convert_from_network calls failf if unsuccessful */
free(scratch);
return FALSE; /* can't return CURLE_foobar so return FALSE */
}
s = scratch;
#endif /* CURL_DOES_CONVERSIONS */
Daniel Stenberg
committed
while(head) {
if(checkprefix(head->data, s)) {
rc = TRUE;
break;
}
Daniel Stenberg
committed
if((rc != TRUE) && (checkprefix("HTTP/", s))) {
rc = TRUE;
}
#ifdef CURL_DOES_CONVERSIONS
free(scratch);
#endif /* CURL_DOES_CONVERSIONS */
return rc;
Daniel Stenberg
committed
Daniel Stenberg
committed
* Curl_readrewind() rewinds the read stream. This is typically used for HTTP
* POST/PUT with multi-pass authentication when a sending was denied and a
* resend is necessary.
*/
CURLcode Curl_readrewind(struct connectdata *conn)
{
struct SessionHandle *data = conn->data;
conn->bits.rewindaftersend = FALSE; /* we rewind now */
/* explicitly switch off sending data on this connection now since we are
about to restart a new transfer and thus we want to avoid inadvertently
sending more data on the existing connection until the next transfer
starts */
Daniel Stenberg
committed
data->req.keepon &= ~KEEP_WRITE;
/* We have sent away data. If not using CURLOPT_POSTFIELDS or
CURLOPT_HTTPPOST, call app to rewind
*/
if(data->set.postfields ||
(data->set.httpreq == HTTPREQ_POST_FORM))
; /* do nothing */
else {
Daniel Stenberg
committed
if(data->set.seek_func) {
int err;
err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
if(err) {
Daniel Stenberg
committed
failf(data, "seek callback returned error %d", (int)err);
Daniel Stenberg
committed
return CURLE_SEND_FAIL_REWIND;
}
}
else if(data->set.ioctl_func) {
Daniel Stenberg
committed
err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
data->set.ioctl_client);
infof(data, "the ioctl callback returned %d\n", (int)err);
if(err) {
/* FIXME: convert to a human readable error message */
Daniel Stenberg
committed
failf(data, "ioctl callback returned error %d", (int)err);
return CURLE_SEND_FAIL_REWIND;
}
}
else {
/* If no CURLOPT_READFUNCTION is used, we know that we operate on a
given FILE * stream and we can actually attempt to rewind that
ourself with fseek() */
if(data->set.fread_func == (curl_read_callback)fread) {
if(-1 != fseek(data->set.in, 0, SEEK_SET))
/* successful rewind */
return CURLE_OK;
}
/* no callback set or failure aboe, makes us fail at once */
Daniel Stenberg
committed
failf(data, "necessary data rewind wasn't possible");
return CURLE_SEND_FAIL_REWIND;
}
}
return CURLE_OK;
}
static int data_pending(const struct connectdata *conn)
{
Daniel Stenberg
committed
/* in the case of libssh2, we can never be really sure that we have emptied
its internal buffers so we MUST always try until we get EAGAIN back */
return conn->protocol&(PROT_SCP|PROT_SFTP) ||
Curl_ssl_data_pending(conn, FIRSTSOCKET);
}
Daniel Stenberg
committed
#ifndef MIN
#define MIN(a,b) (a < b ? a : b)
#endif
static void read_rewind(struct connectdata *conn,
size_t thismuch)
{
conn->read_pos -= thismuch;
conn->bits.stream_was_rewound = TRUE;
#ifdef CURLDEBUG
{
char buf[512 + 1];
size_t show;
show = MIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
Daniel Stenberg
committed
if(conn->master_buffer) {
Daniel Stenberg
committed
memcpy(buf, conn->master_buffer + conn->read_pos, show);
buf[show] = '\0';
}
else {
buf[0] = '\0';
}
Daniel Stenberg
committed
DEBUGF(infof(conn->data,
"Buffer after stream rewind (read_pos = %d): [%s]",
conn->read_pos, buf));
}
#endif
}
/*
* Curl_readwrite() is the low-level function to be called when data is to
* be read and written to/from the connection.
*/
CURLcode Curl_readwrite(struct connectdata *conn,
bool *done)
struct SessionHandle *data = conn->data;
Daniel Stenberg
committed
struct SingleRequest *k = &data->req;
ssize_t nread; /* number of bytes read */
int didwhat=0;
curl_socket_t fd_read;
curl_socket_t fd_write;
Daniel Stenberg
committed
curl_off_t contentlength;
Daniel Stenberg
committed
int select_res = conn->cselect_bits;
conn->cselect_bits = 0;
/* only use the proper socket if the *_HOLD bit is not set simultaneously as
then we are in rate limiting state in that transfer direction */
Daniel Stenberg
committed
if((k->keepon & KEEP_READBITS) == KEEP_READ) {
#if defined(USE_LIBSSH2)
Daniel Stenberg
committed
if(conn->protocol & (PROT_SCP|PROT_SFTP))
James Housley
committed
select_res |= CURL_CSELECT_IN;
#endif /* USE_LIBSSH2 */
James Housley
committed
} else
Daniel Stenberg
committed
if((k->keepon & KEEP_WRITEBITS) == KEEP_WRITE)
fd_write = conn->writesockfd;
else
fd_write = CURL_SOCKET_BAD;
Daniel Stenberg
committed
if(!select_res) { /* Call for select()/poll() only, if read/write/error
Daniel Stenberg
committed
status is not known. */
select_res = Curl_socket_ready(fd_read, fd_write, 0);
}
James Housley
committed
Daniel Stenberg
committed
if(select_res == CURL_CSELECT_ERR) {
failf(data, "select/poll returned error");
return CURLE_SEND_ERROR;
}
/* We go ahead and do a read if we have a readable socket or if
the stream was rewound (in which case we have data in a
buffer) */
if((k->keepon & KEEP_READ) &&
((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
/* read */
bool is_empty_data = FALSE;
/* This is where we loop until we have read everything there is to
read or we get a EWOULDBLOCK */
do {
size_t buffersize = data->set.buffer_size?
data->set.buffer_size : BUFSIZE;
size_t bytestoread = buffersize;
int readrc;
if(k->size != -1 && !k->header) {
/* make sure we don't read "too much" if we can help it since we
might be pipelining and then someone else might want to read what
follows! */
curl_off_t totalleft = k->size - k->bytecount;
if(totalleft < (curl_off_t)bytestoread)
bytestoread = (size_t)totalleft;
}
if(bytestoread) {
/* receive data from the network! */
readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
Daniel Stenberg
committed
/* subzero, this would've blocked */
if(0 > readrc)
break; /* get out of loop */
/* get the CURLcode from the int */
result = (CURLcode)readrc;
if(result>0)
return result;
}
else {
/* read nothing but since we wanted nothing we consider this an OK
situation to proceed from */
nread = 0;
result = CURLE_OK;
}
Daniel Stenberg
committed
if((k->bytecount == 0) && (k->writebytecount == 0)) {
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
if(k->exp100 > EXP100_SEND_DATA)
/* set time stamp to compare with when waiting for the 100 */
k->start100 = Curl_tvnow();
}
Daniel Stenberg
committed
didwhat |= KEEP_READ;
/* indicates data of zero size, i.e. empty file */
is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
Daniel Stenberg
committed
/* NULL terminate, allowing string ops to be used */
if(0 < nread || is_empty_data) {
k->buf[nread] = 0;
}
else if(0 >= nread) {
/* if we receive 0 or less here, the server closed the connection
and we bail out from this! */
DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
k->keepon &= ~KEEP_READ;
break;
}
Daniel Stenberg
committed
/* Default buffer to use when we write the buffer, it may be changed
in the flow below before the actual storing is done. */
k->str = k->buf;
Daniel Stenberg
committed
/* Since this is a two-state thing, we check if we are parsing
headers at the moment or not. */
if(k->header) {
/* we are in parse-the-header-mode */
bool stop_reading = FALSE;
Daniel Stenberg
committed
/* header line within buffer loop */
do {
size_t hbufp_index;
size_t rest_length;
size_t full_length;
int writetype;
Daniel Stenberg
committed
/* str_start is start of line within buf */
k->str_start = k->str;
Daniel Stenberg
committed
/* data is in network encoding so use 0x0a instead of '\n' */
k->end_ptr = memchr(k->str_start, 0x0a, nread);
if(!k->end_ptr) {
/* Not a complete header line within buffer, append the data to
the end of the headerbuff. */
if(k->hbuflen + nread >= data->state.headersize) {
/* We enlarge the header buffer as it is too small */
size_t newsize=CURLMAX((k->hbuflen+nread)*3/2,
data->state.headersize*2);
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = (char *)realloc(data->state.headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
data->state.headersize=newsize;
data->state.headerbuff = newbuff;
k->hbufp = data->state.headerbuff + hbufp_index;
}
memcpy(k->hbufp, k->str, nread);
k->hbufp += nread;
k->hbuflen += nread;
if(!k->headerline && (k->hbuflen>5)) {
/* make a first check that this looks like a HTTP header */
if(!checkhttpprefix(data, data->state.headerbuff)) {
Daniel Stenberg
committed
/* this is not the beginning of a HTTP first header line */
k->header = FALSE;
k->badheader = HEADER_ALLBAD;
Daniel Stenberg
committed
break;
}
}
break; /* read more and try again */
}
Daniel Stenberg
committed
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
/* decrease the size of the remaining (supposed) header line */
rest_length = (k->end_ptr - k->str)+1;
nread -= (ssize_t)rest_length;
k->str = k->end_ptr + 1; /* move past new line */
full_length = k->str - k->str_start;
/*
* We're about to copy a chunk of data to the end of the
* already received header. We make sure that the full string
* fit in the allocated header buffer, or else we enlarge
* it.
*/
if(k->hbuflen + full_length >=
data->state.headersize) {
char *newbuff;
size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
data->state.headersize*2);
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = (char *)realloc(data->state.headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
}
data->state.headersize= newsize;
data->state.headerbuff = newbuff;
k->hbufp = data->state.headerbuff + hbufp_index;
}
/* copy to end of line */
memcpy(k->hbufp, k->str_start, full_length);
k->hbufp += full_length;
k->hbuflen += full_length;
*k->hbufp = 0;
k->end_ptr = k->hbufp;
k->p = data->state.headerbuff;
/****
* We now have a FULL header line that p points to
*****/
if(!k->headerline) {
/* the first read header */
if((k->hbuflen>5) &&
!checkhttpprefix(data, data->state.headerbuff)) {
/* this is not the beginning of a HTTP first header line */
k->header = FALSE;
if(nread)
/* since there's more, this is a partial bad header */
k->badheader = HEADER_PARTHEADER;
else {
/* this was all we read so its all a bad header */
k->badheader = HEADER_ALLBAD;
nread = (ssize_t)rest_length;
}
break;
}
}
/* headers are in network encoding so
use 0x0a and 0x0d instead of '\n' and '\r' */
if((0x0a == *k->p) || (0x0d == *k->p)) {
size_t headerlen;
/* Zero-length header line means end of headers! */
#ifdef CURL_DOES_CONVERSIONS
if(0x0d == *k->p) {
*k->p = '\r'; /* replace with CR in host encoding */
k->p++; /* pass the CR byte */
}
if(0x0a == *k->p) {
*k->p = '\n'; /* replace with LF in host encoding */
k->p++; /* pass the LF byte */
}
#else
if('\r' == *k->p)
k->p++; /* pass the \r byte */
if('\n' == *k->p)
k->p++; /* pass the \n byte */
#endif /* CURL_DOES_CONVERSIONS */
Daniel Stenberg
committed
if(100 == k->httpcode) {
/*
* We have made a HTTP PUT or POST and this is 1.1-lingo
* that tells us that the server is OK with this and ready
* to receive the data.
* However, we'll get more headers now so we must get
* back into the header-parsing state!
*/
k->header = TRUE;
k->headerline = 0; /* restart the header line counter */
Daniel Stenberg
committed
/* if we did wait for this do enable write now! */
if(k->exp100) {
k->exp100 = EXP100_SEND_DATA;
k->keepon |= KEEP_WRITE;
Daniel Stenberg
committed
}
}
else {
k->header = FALSE; /* no more header to parse! */
if((k->size == -1) && !k->chunk && !conn->bits.close &&
(k->httpversion >= 11) ) {
/* On HTTP 1.1, when connection is not to get closed, but no
Content-Length nor Content-Encoding chunked have been
received, according to RFC2616 section 4.4 point 5, we
assume that the server will close the connection to
signal the end of the document. */
infof(data, "no chunk, no close, no size. Assume close to "
"signal end\n");
conn->bits.close = TRUE;
}
}
if(417 == k->httpcode) {
Daniel Stenberg
committed
/*
* we got: "417 Expectation Failed" this means:
* we have made a HTTP call and our Expect Header
* seems to cause a problem => abort the write operations
* (or prevent them from starting).
k->exp100 = EXP100_FAILED;
k->keepon &= ~KEEP_WRITE;
}
#ifndef CURL_DISABLE_HTTP
/*
* When all the headers have been parsed, see if we should give
* up and return an error.
*/
if(Curl_http_should_fail(conn)) {
failf (data, "The requested URL returned error: %d",
k->httpcode);
return CURLE_HTTP_RETURNED_ERROR;
}
#endif /* CURL_DISABLE_HTTP */
Daniel Stenberg
committed
/* now, only output this if the header AND body are requested:
*/
writetype = CLIENTWRITE_HEADER;
if(data->set.include_header)
writetype |= CLIENTWRITE_BODY;
Daniel Stenberg
committed
headerlen = k->p - data->state.headerbuff;
Daniel Stenberg
committed
result = Curl_client_write(conn, writetype,
data->state.headerbuff,
headerlen);
if(result)
return result;
Daniel Stenberg
committed
data->info.header_size += (long)headerlen;
data->req.headerbytecount += (long)headerlen;
Daniel Stenberg
committed
data->req.deductheadercount =
(100 == k->httpcode)?data->req.headerbytecount:0;
if(data->state.resume_from &&
(data->set.httpreq==HTTPREQ_GET) &&
(k->httpcode == 416)) {
/* "Requested Range Not Satisfiable" */
stop_reading = TRUE;
}
#ifndef CURL_DISABLE_HTTP
if(!stop_reading) {
/* Curl_http_auth_act() checks what authentication methods
* that are available and decides which one (if any) to
* use. It will set 'newurl' if an auth metod was picked. */
result = Curl_http_auth_act(conn);
if(result)
return result;
if(conn->bits.rewindaftersend) {
/* We rewind after a complete send, so thus we continue
sending now */
infof(data, "Keep sending data to get tossed away!\n");
k->keepon |= KEEP_WRITE;
}
#endif /* CURL_DISABLE_HTTP */
if(!k->header) {
/*
* really end-of-headers.
*
* If we requested a "no body", this is a good time to get
* out and return home.
*/
if(data->set.opt_no_body)
stop_reading = TRUE;
else {
/* If we know the expected size of this document, we set the
maximum download size to the size of the expected
document or else, we won't know when to stop reading!
Note that we set the download maximum even if we read a
"Connection: close" header, to make sure that
"Content-Length: 0" still prevents us from attempting to
read the (missing) response-body.
*/
/* According to RFC2616 section 4.4, we MUST ignore
Content-Length: headers if we are now receiving data
using chunked Transfer-Encoding.
*/
if(k->chunk)
k->size=-1;
}
if(-1 != k->size) {
/* We do this operation even if no_body is true, since this
data might be retrieved later with curl_easy_getinfo()
and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
Curl_pgrsSetDownloadSize(data, k->size);
k->maxdownload = k->size;
}
/* If max download size is *zero* (nothing) we already
have nothing and can safely return ok now! */
if(0 == k->maxdownload)
stop_reading = TRUE;
Daniel Stenberg
committed
if(stop_reading) {
/* we make sure that this socket isn't read more now */
k->keepon &= ~KEEP_READ;
Daniel Stenberg
committed
}
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN,
k->str_start, headerlen, conn);
break; /* exit header line loop */
/* We continue reading headers, so reset the line-based
header parsing variables hbufp && hbuflen */
k->hbufp = data->state.headerbuff;
k->hbuflen = 0;
continue;
}
/*
* Checks for special headers coming up.
*/
if(!k->headerline++) {
/* This is the first header, it MUST be the error code line
or else we consider this to be the body right away! */
int httpversion_major;
int nc;
#ifdef CURL_DOES_CONVERSIONS
#define HEADER1 scratch
#define SCRATCHSIZE 21
CURLcode res;
char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
/* We can't really convert this yet because we
don't know if it's the 1st header line or the body.
So we do a partial conversion into a scratch area,
leaving the data at k->p as-is.
*/
strncpy(&scratch[0], k->p, SCRATCHSIZE);
scratch[SCRATCHSIZE] = 0; /* null terminate */
res = Curl_convert_from_network(data,
&scratch[0],
SCRATCHSIZE);
if(CURLE_OK != res) {
/* Curl_convert_from_network calls failf if unsuccessful */
return res;
}
#else
#define HEADER1 k->p /* no conversion needed, just use k->p */
#endif /* CURL_DOES_CONVERSIONS */
nc = sscanf(HEADER1,
" HTTP/%d.%d %3d",
&httpversion_major,
&k->httpversion,
&k->httpcode);
if(nc==3) {
k->httpversion += 10 * httpversion_major;
}
else {
/* this is the real world, not a Nirvana
NCSA 1.5.x returns this crap when asked for HTTP/1.1
*/
nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
k->httpversion = 10;
/* If user has set option HTTP200ALIASES,
compare header line against list of aliases
*/
if(!nc) {
if(checkhttpprefix(data, k->p)) {
nc = 1;
k->httpcode = 200;
k->httpversion = 10;
Daniel Stenberg
committed
}
}
if(nc) {
data->info.httpcode = k->httpcode;
data->info.httpversion = k->httpversion;
Daniel Stenberg
committed
/*
* This code executes as part of processing the header. As a
* result, it's not totally clear how to interpret the
* response code yet as that depends on what other headers may
* be present. 401 and 407 may be errors, but may be OK
* depending on how authentication is working. Other codes
* are definitely errors, so give up here.
*/
if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
((k->httpcode != 401) || !conn->bits.user_passwd) &&
((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {
if(data->state.resume_from &&
(data->set.httpreq==HTTPREQ_GET) &&
(k->httpcode == 416)) {
/* "Requested Range Not Satisfiable", just proceed and
pretend this is no error */
Daniel Stenberg
committed
}
else {
/* serious error, go home! */
failf (data, "The requested URL returned error: %d",
k->httpcode);
return CURLE_HTTP_RETURNED_ERROR;
Daniel Stenberg
committed
}
}
Daniel Stenberg
committed
if(k->httpversion == 10) {
/* Default action for HTTP/1.0 must be to close, unless
we get one of those fancy headers that tell us the
server keeps it open for us! */
infof(data, "HTTP 1.0, assume close after body\n");
conn->bits.close = TRUE;
Daniel Stenberg
committed
}
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
else if(k->httpversion >= 11 &&
!conn->bits.close) {
/* If HTTP version is >= 1.1 and connection is persistent
server supports pipelining. */
DEBUGF(infof(data,
"HTTP 1.1 or later with persistent connection, "
"pipelining supported\n"));
conn->server_supports_pipelining = TRUE;
}
switch(k->httpcode) {
case 204:
/* (quote from RFC2616, section 10.2.5): The server has
* fulfilled the request but does not need to return an
* entity-body ... The 204 response MUST NOT include a
* message-body, and thus is always terminated by the first
* empty line after the header fields. */
/* FALLTHROUGH */
case 416: /* Requested Range Not Satisfiable, it has the
Content-Length: set as the "real" document but no
actual response is sent. */
case 304:
/* (quote from RFC2616, section 10.3.5): The 304 response
* MUST NOT contain a message-body, and thus is always
* terminated by the first empty line after the header
* fields. */
k->size=0;
k->maxdownload=0;
k->ignorecl = TRUE; /* ignore Content-Length headers */
break;
default:
/* nothing */
Daniel Stenberg
committed
break;
else {
k->header = FALSE; /* this is not a header line */
break;
}
}
#ifdef CURL_DOES_CONVERSIONS
/* convert from the network encoding */
result = Curl_convert_from_network(data, k->p, strlen(k->p));
if(CURLE_OK != result) {
return(result);
}
/* Curl_convert_from_network calls failf if unsuccessful */
#endif /* CURL_DOES_CONVERSIONS */
/* Check for Content-Length: header lines to get size. Ignore
the header completely if we get a 416 response as then we're
resuming a document that we don't get, and this header contains
info about the true size of the document we didn't get now. */
if(!k->ignorecl && !data->set.ignorecl &&
checkprefix("Content-Length:", k->p)) {
contentlength = curlx_strtoofft(k->p+15, NULL, 10);
if(data->set.max_filesize &&
contentlength > data->set.max_filesize) {
failf(data, "Maximum file size exceeded");
return CURLE_FILESIZE_EXCEEDED;
}
if(contentlength >= 0) {
k->size = contentlength;
k->maxdownload = k->size;
/* we set the progress download size already at this point
just to make it easier for apps/callbacks to extract this
info as soon as possible */
Curl_pgrsSetDownloadSize(data, k->size);
Daniel Stenberg
committed
}
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
else {
/* Negative Content-Length is really odd, and we know it
happens for example when older Apache servers send large
files */
conn->bits.close = TRUE;
infof(data, "Negative content-length: %" FORMAT_OFF_T
", closing after transfer\n", contentlength);
}
}
/* check for Content-Type: header lines to get the mime-type */
else if(checkprefix("Content-Type:", k->p)) {
char *start;
char *end;
size_t len;
/* Find the first non-space letter */
for(start=k->p+13;
*start && ISSPACE(*start);
start++)
; /* empty loop */
/* data is now in the host encoding so
use '\r' and '\n' instead of 0x0d and 0x0a */
end = strchr(start, '\r');
if(!end)
end = strchr(start, '\n');
if(end) {
/* skip all trailing space letters */
for(; ISSPACE(*end) && (end > start); end--)
Daniel Stenberg
committed
; /* empty loop */
Daniel Stenberg
committed
/* get length of the type */
len = end-start+1;
Daniel Stenberg
committed
/* allocate memory of a cloned copy */
Curl_safefree(data->info.contenttype);
Daniel Stenberg
committed
data->info.contenttype = malloc(len + 1);
if(NULL == data->info.contenttype)
return CURLE_OUT_OF_MEMORY;
Daniel Stenberg
committed
/* copy the content-type string */
memcpy(data->info.contenttype, start, len);
data->info.contenttype[len] = 0; /* zero terminate */
Daniel Stenberg
committed
}
}
#ifndef CURL_DISABLE_HTTP
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
else if((k->httpversion == 10) &&
conn->bits.httpproxy &&
Curl_compareheader(k->p,
"Proxy-Connection:", "keep-alive")) {
/*
* When a HTTP/1.0 reply comes when using a proxy, the
* 'Proxy-Connection: keep-alive' line tells us the
* connection will be kept alive for our pleasure.
* Default action for 1.0 is to close.
*/
conn->bits.close = FALSE; /* don't close when done */
infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
}
else if((k->httpversion == 11) &&
conn->bits.httpproxy &&
Curl_compareheader(k->p,
"Proxy-Connection:", "close")) {
/*
* We get a HTTP/1.1 response from a proxy and it says it'll
* close down after this transfer.
*/
conn->bits.close = TRUE; /* close when done */
infof(data, "HTTP/1.1 proxy connection set close!\n");
}
else if((k->httpversion == 10) &&
Curl_compareheader(k->p, "Connection:", "keep-alive")) {
/*
* A HTTP/1.0 reply with the 'Connection: keep-alive' line
* tells us the connection will be kept alive for our
* pleasure. Default action for 1.0 is to close.
*
* [RFC2068, section 19.7.1] */
conn->bits.close = FALSE; /* don't close when done */
infof(data, "HTTP/1.0 connection set to keep alive!\n");
}
else if(Curl_compareheader(k->p, "Connection:", "close")) {
/*