Newer
Older
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2004, Daniel Stenberg, <daniel@haxx.se>, et al.
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
***************************************************************************/
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#include <sys/stat.h>
#include <errno.h>
#include "strequal.h"
#if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
#include <time.h>
#include <io.h>
#else
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#ifdef HAVE_NETINET_IN_H
#include <netinet/in.h>
#endif
#include <sys/time.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <netdb.h>
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
#include <sys/ioctl.h>
#endif
#include <signal.h>
#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif
#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif
#ifndef HAVE_SELECT
#error "We can't compile without select() support!"
#endif
#ifndef HAVE_SOCKET
#error "We can't compile without socket() support!"
#endif
#endif
#include "urldata.h"
#include <curl/curl.h>
#include "netrc.h"
#include "transfer.h"
#include "sendf.h"
#include "speedcheck.h"
#include "progress.h"
#include "getdate.h"
#include "url.h"
#include "getinfo.h"
#include "ssluse.h"
#include "http_digest.h"
#include "http_ntlm.h"
#include "http_negotiate.h"
#include "memory.h"
#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>
/* The last #include file should be: */
#include "memdebug.h"
#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
enum {
KEEP_NONE,
KEEP_READ,
KEEP_WRITE
};
/* We keep this static and global since this is read-only and NEVER
changed. It should just remain a blanked-out timeout value. */
static struct timeval notimeout={0,0};
Daniel Stenberg
committed
/*
* This function will call the read callback to fill our buffer with data
* to upload.
*/
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
Daniel Stenberg
committed
{
struct SessionHandle *data = conn->data;
Daniel Stenberg
committed
int buffersize = bytes;
int nread;
if(conn->bits.upload_chunky) {
/* if chunked Transfer-Encoding */
buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
conn->upload_fromhere += 10; /* 32bit hex + CRLF */
}
Daniel Stenberg
committed
nread = conn->fread(conn->upload_fromhere, 1,
buffersize, conn->fread_in);
if(nread == CURL_READFUNC_ABORT) {
failf(data, "operation aborted by callback\n");
return CURLE_ABORTED_BY_CALLBACK;
}
Daniel Stenberg
committed
if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
/* if chunked Transfer-Encoding */
char hexbuffer[11];
int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
"%x\r\n", nread);
/* move buffer pointer */
conn->upload_fromhere -= hexlen;
nread += hexlen;
/* copy the prefix to the buffer */
memcpy(conn->upload_fromhere, hexbuffer, hexlen);
/* always append CRLF to the data */
memcpy(conn->upload_fromhere + nread, "\r\n", 2);
if((nread - hexlen) == 0) {
Daniel Stenberg
committed
/* mark this as done once this chunk is transfered */
conn->keep.upload_done = TRUE;
}
nread+=2; /* for the added CRLF */
Daniel Stenberg
committed
}
*nreadp = nread;
return CURLE_OK;
Daniel Stenberg
committed
}
/*
* checkhttpprefix()
*
* Returns TRUE if member of the list matches prefix of string
*/
static bool
checkhttpprefix(struct SessionHandle *data,
const char *s)
{
struct curl_slist *head = data->set.http200aliases;
while (head) {
if (checkprefix(head->data, s))
return TRUE;
head = head->next;
}
if(checkprefix("HTTP/", s))
return TRUE;
return FALSE;
}
Daniel Stenberg
committed
/*
* Curl_readwrite() is the low-level function to be called when data is to
* be read and written to/from the connection.
*/
CURLcode Curl_readwrite(struct connectdata *conn,
bool *done)
struct Curl_transfer_keeper *k = &conn->keep;
struct SessionHandle *data = conn->data;
ssize_t nread; /* number of bytes read */
int didwhat=0;
/* These two are used only if no other select() or _fdset() have been
invoked before this. This typicly happens if you use the multi interface
and call curl_multi_perform() without calling curl_multi_fdset()
first. */
fd_set extrareadfd;
fd_set extrawritefd;
fd_set *readfdp = k->readfdp;
fd_set *writefdp = k->writefdp;
Daniel Stenberg
committed
curl_off_t contentlength;
if((k->keepon & KEEP_READ) && !readfdp) {
/* reading is requested, but no socket descriptor pointer was set */
FD_ZERO(&extrareadfd);
FD_SET(conn->sockfd, &extrareadfd);
readfdp = &extrareadfd;
/* no write, no exceptions, no timeout */
select(conn->sockfd+1, readfdp, NULL, NULL, ¬imeout);
}
if((k->keepon & KEEP_WRITE) && !writefdp) {
/* writing is requested, but no socket descriptor pointer was set */
FD_ZERO(&extrawritefd);
FD_SET(conn->writesockfd, &extrawritefd);
writefdp = &extrawritefd;
/* no read, no exceptions, no timeout */
select(conn->writesockfd+1, NULL, writefdp, NULL, ¬imeout);
}
Daniel Stenberg
committed
/* If we still have reading to do, we check if we have a readable
socket. Sometimes the reafdp is NULL, if no fd_set was done using
Daniel Stenberg
committed
the multi interface and then we can do nothing but to attempt a
read to be sure. */
if((k->keepon & KEEP_READ) &&
(!readfdp || FD_ISSET(conn->sockfd, readfdp))) {
Daniel Stenberg
committed
bool readdone = TRUE;
Daniel Stenberg
committed
/* This is where we loop until we have read everything there is to
read or we get a EWOULDBLOCK */
do {
Daniel Stenberg
committed
int buffersize = data->set.buffer_size?
data->set.buffer_size:BUFSIZE -1;
Daniel Stenberg
committed
/* receive data from the network! */
int readrc = Curl_read(conn, conn->sockfd, k->buf, buffersize, &nread);
Daniel Stenberg
committed
/* subzero, this would've blocked */
if(0>readrc)
Daniel Stenberg
committed
break; /* get out of loop */
/* get the CURLcode from the int */
result = (CURLcode)readrc;
Daniel Stenberg
committed
if(result>0)
return result;
if ((k->bytecount == 0) && (k->writebytecount == 0)) {
Daniel Stenberg
committed
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
if(k->wait100_after_headers)
/* set time stamp to compare with when waiting for the 100 */
k->start100 = Curl_tvnow();
}
Daniel Stenberg
committed
didwhat |= KEEP_READ;
/* NULL terminate, allowing string ops to be used */
if (0 < nread)
k->buf[nread] = 0;
/* if we receive 0 or less here, the server closed the connection and
we bail out from this! */
else if (0 >= nread) {
k->keepon &= ~KEEP_READ;
FD_ZERO(&k->rkeepfd);
readdone = TRUE;
break;
}
/* Default buffer to use when we write the buffer, it may be changed
in the flow below before the actual storing is done. */
k->str = k->buf;
/* Since this is a two-state thing, we check if we are parsing
headers at the moment or not. */
Daniel Stenberg
committed
if (k->header) {
/* we are in parse-the-header-mode */
bool stop_reading = FALSE;
Daniel Stenberg
committed
/* header line within buffer loop */
do {
int hbufp_index;
int rest_length;
Daniel Stenberg
committed
/* str_start is start of line within buf */
k->str_start = k->str;
Daniel Stenberg
committed
k->end_ptr = strchr (k->str_start, '\n');
Daniel Stenberg
committed
if (!k->end_ptr) {
/* Not a complete header line within buffer, append the data to
the end of the headerbuff. */
if (k->hbuflen + nread >= data->state.headersize) {
/* We enlarge the header buffer as it is too small */
char *newbuff;
size_t newsize=CURLMAX((k->hbuflen+nread)*3/2,
data->state.headersize*2);
Daniel Stenberg
committed
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = (char *)realloc(data->state.headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
}
data->state.headersize=newsize;
data->state.headerbuff = newbuff;
k->hbufp = data->state.headerbuff + hbufp_index;
}
memcpy(k->hbufp, k->str, nread);
k->hbufp += nread;
k->hbuflen += nread;
if (!k->headerline && (k->hbuflen>5)) {
/* make a first check that this looks like a HTTP header */
if(!checkhttpprefix(data, data->state.headerbuff)) {
Daniel Stenberg
committed
/* this is not the beginning of a HTTP first header line */
k->header = FALSE;
k->badheader = HEADER_ALLBAD;
break;
}
}
Daniel Stenberg
committed
}
/* decrease the size of the remaining (supposed) header line */
rest_length = (k->end_ptr - k->str)+1;
nread -= rest_length;
Daniel Stenberg
committed
Daniel Stenberg
committed
k->str = k->end_ptr + 1; /* move past new line */
full_length = k->str - k->str_start;
Daniel Stenberg
committed
/*
* We're about to copy a chunk of data to the end of the
* already received header. We make sure that the full string
* fit in the allocated header buffer, or else we enlarge
Daniel Stenberg
committed
* it.
*/
if (k->hbuflen + full_length >=
Daniel Stenberg
committed
data->state.headersize) {
size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
data->state.headersize*2);
hbufp_index = k->hbufp - data->state.headerbuff;
newbuff = (char *)realloc(data->state.headerbuff, newsize);
if(!newbuff) {
failf (data, "Failed to alloc memory for big header!");
return CURLE_OUT_OF_MEMORY;
Daniel Stenberg
committed
data->state.headersize= newsize;
data->state.headerbuff = newbuff;
k->hbufp = data->state.headerbuff + hbufp_index;
}
Daniel Stenberg
committed
/* copy to end of line */
strncpy (k->hbufp, k->str_start, full_length);
k->hbufp += full_length;
k->hbuflen += full_length;
Daniel Stenberg
committed
*k->hbufp = 0;
k->end_ptr = k->hbufp;
Daniel Stenberg
committed
k->p = data->state.headerbuff;
Daniel Stenberg
committed
/****
* We now have a FULL header line that p points to
*****/
if(!k->headerline) {
/* the first read header */
if((k->hbuflen>5) &&
!checkhttpprefix(data, data->state.headerbuff)) {
Daniel Stenberg
committed
/* this is not the beginning of a HTTP first header line */
k->header = FALSE;
if(nread)
/* since there's more, this is a partial bad header */
k->badheader = HEADER_PARTHEADER;
else {
/* this was all we read so its all a bad header */
k->badheader = HEADER_ALLBAD;
nread = rest_length;
}
Daniel Stenberg
committed
break;
}
}
Daniel Stenberg
committed
if (('\n' == *k->p) || ('\r' == *k->p)) {
int headerlen;
/* Zero-length header line means end of headers! */
if ('\r' == *k->p)
k->p++; /* pass the \r byte */
if ('\n' == *k->p)
k->p++; /* pass the \n byte */
if(100 == k->httpcode) {
/*
* We have made a HTTP PUT or POST and this is 1.1-lingo
Daniel Stenberg
committed
* that tells us that the server is OK with this and ready
* to receive the data.
Daniel Stenberg
committed
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
* However, we'll get more headers now so we must get
* back into the header-parsing state!
*/
k->header = TRUE;
k->headerline = 0; /* restart the header line counter */
/* if we did wait for this do enable write now! */
if (k->write_after_100_header) {
k->write_after_100_header = FALSE;
FD_SET (conn->writesockfd, &k->writefd); /* write */
k->keepon |= KEEP_WRITE;
k->wkeepfd = k->writefd;
}
}
else
k->header = FALSE; /* no more header to parse! */
if (417 == k->httpcode) {
/*
* we got: "417 Expectation Failed" this means:
* we have made a HTTP call and our Expect Header
* seems to cause a problem => abort the write operations
* (or prevent them from starting).
*/
k->write_after_100_header = FALSE;
k->keepon &= ~KEEP_WRITE;
FD_ZERO(&k->wkeepfd);
}
#ifndef CURL_DISABLE_HTTP
Daniel Stenberg
committed
/*
* When all the headers have been parsed, see if we should give
* up and return an error.
*/
Daniel Stenberg
committed
if (Curl_http_should_fail(conn)) {
failf (data, "The requested URL returned error: %d",
k->httpcode);
return CURLE_HTTP_RETURNED_ERROR;
}
#endif /* CURL_DISABLE_HTTP */
Daniel Stenberg
committed
Daniel Stenberg
committed
/* now, only output this if the header AND body are requested:
*/
if (data->set.include_header)
Daniel Stenberg
committed
headerlen = k->p - data->state.headerbuff;
Daniel Stenberg
committed
data->state.headerbuff,
headerlen);
if(result)
return result;
data->info.header_size += headerlen;
conn->headerbytecount += headerlen;
conn->deductheadercount =
(100 == k->httpcode)?conn->headerbytecount:0;
if (conn->resume_from &&
!k->content_range &&
(data->set.httpreq==HTTPREQ_GET)) {
if(k->httpcode == 416) {
/* "Requested Range Not Satisfiable" */
stop_reading = TRUE;
}
else {
/* we wanted to resume a download, although the server
* doesn't seem to support this and we did this with a GET
* (if it wasn't a GET we did a POST or PUT resume) */
failf (data, "HTTP server doesn't seem to support "
"byte ranges. Cannot resume.");
return CURLE_HTTP_RANGE_ERROR;
}
}
#ifndef CURL_DISABLE_HTTP
if(!stop_reading) {
/* Curl_http_auth_act() checks what authentication methods
* that are available and decides which one (if any) to
* use. It will set 'newurl' if an auth metod was picked. */
result = Curl_http_auth_act(conn);
if(result)
return result;
}
#endif /* CURL_DISABLE_HTTP */
Daniel Stenberg
committed
if(!k->header) {
/*
* really end-of-headers.
*
* If we requested a "no body", this is a good time to get
* out and return home.
*/
Daniel Stenberg
committed
if(conn->bits.no_body)
Daniel Stenberg
committed
stop_reading = TRUE;
Daniel Stenberg
committed
else {
/* If we know the expected size of this document, we set the
maximum download size to the size of the expected
document or else, we won't know when to stop reading!
Note that we set the download maximum even if we read a
"Connection: close" header, to make sure that
"Content-Length: 0" still prevents us from attempting to
read the (missing) response-body.
*/
/* According to RFC2616 section 4.4, we MUST ignore
Content-Length: headers if we are now receiving data
using chunked Transfer-Encoding.
*/
if(conn->bits.chunk)
conn->size=-1;
}
if(-1 != conn->size) {
/* We do this operation even if no_body is true, since this
data might be retrieved later with curl_easy_getinfo()
and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
Curl_pgrsSetDownloadSize(data, conn->size);
conn->maxdownload = conn->size;
Daniel Stenberg
committed
}
/* If max download size is *zero* (nothing) we already
have nothing and can safely return ok now! */
if(0 == conn->maxdownload)
stop_reading = TRUE;
Daniel Stenberg
committed
if(stop_reading) {
/* we make sure that this socket isn't read more now */
k->keepon &= ~KEEP_READ;
FD_ZERO(&k->rkeepfd);
}
Daniel Stenberg
committed
Daniel Stenberg
committed
}
Daniel Stenberg
committed
/* We continue reading headers, so reset the line-based
header parsing variables hbufp && hbuflen */
k->hbufp = data->state.headerbuff;
k->hbuflen = 0;
continue;
Daniel Stenberg
committed
/*
* Checks for special headers coming up.
*/
Daniel Stenberg
committed
if (!k->headerline++) {
/* This is the first header, it MUST be the error code line
or else we consiser this to be the body right away! */
int httpversion_major;
Daniel Stenberg
committed
int nc=sscanf(k->p, " HTTP/%d.%d %3d",
&httpversion_major,
&k->httpversion,
&k->httpcode);
Daniel Stenberg
committed
if (nc==3) {
k->httpversion += 10 * httpversion_major;
}
else {
/* this is the real world, not a Nirvana
NCSA 1.5.x returns this crap when asked for HTTP/1.1
*/
Daniel Stenberg
committed
nc=sscanf(k->p, " HTTP %3d", &k->httpcode);
Daniel Stenberg
committed
k->httpversion = 10;
/* If user has set option HTTP200ALIASES,
compare header line against list of aliases
*/
if (!nc) {
if (checkhttpprefix(data, k->p)) {
nc = 1;
k->httpcode = 200;
k->httpversion =
(data->set.httpversion==CURL_HTTP_VERSION_1_0)? 10 : 11;
}
}
Daniel Stenberg
committed
}
Daniel Stenberg
committed
if (nc) {
data->info.httpcode = k->httpcode;
data->info.httpversion = k->httpversion;
Daniel Stenberg
committed
/*
* This code executes as part of processing the header. As a
* result, it's not totally clear how to interpret the
* response code yet as that depends on what other headers may
* be present. 401 and 407 may be errors, but may be OK
* depending on how authentication is working. Other codes
* are definitely errors, so give up here.
*/
Daniel Stenberg
committed
if (data->set.http_fail_on_error &&
Daniel Stenberg
committed
(k->httpcode >= 400) &&
(k->httpcode != 401) &&
(k->httpcode != 407)) {
Daniel Stenberg
committed
/* serious error, go home! */
failf (data, "The requested URL returned error: %d",
k->httpcode);
return CURLE_HTTP_RETURNED_ERROR;
Daniel Stenberg
committed
}
if(k->httpversion == 10)
/* Default action for HTTP/1.0 must be to close, unless
we get one of those fancy headers that tell us the
server keeps it open for us! */
conn->bits.close = TRUE;
switch(k->httpcode) {
case 204:
/* (quote from RFC2616, section 10.2.5): The server has
* fulfilled the request but does not need to return an
* entity-body ... The 204 response MUST NOT include a
* message-body, and thus is always terminated by the first
* empty line after the header fields. */
/* FALLTHROUGH */
Daniel Stenberg
committed
case 416: /* Requested Range Not Satisfiable, it has the
Content-Length: set as the "real" document but no
actual response is sent. */
Daniel Stenberg
committed
case 304:
Daniel Stenberg
committed
/* (quote from RFC2616, section 10.3.5): The 304 response
* MUST NOT contain a message-body, and thus is always
* terminated by the first empty line after the header
* fields. */
Daniel Stenberg
committed
conn->size=0;
conn->maxdownload=0;
break;
default:
/* nothing */
break;
}
}
else {
k->header = FALSE; /* this is not a header line */
break;
Daniel Stenberg
committed
/* Check for Content-Length: header lines to get size. Ignore
the header completely if we get a 416 response as then we're
resuming a document that we don't get, and this header contains
Daniel Stenberg
committed
info about the true size of the document we didn't get now. */
if ((k->httpcode != 416) &&
checkprefix("Content-Length:", k->p)) {
Daniel Stenberg
committed
contentlength = curlx_strtoofft(k->p+15, NULL, 10);
if (data->set.max_filesize && contentlength >
data->set.max_filesize) {
failf(data, "Maximum file size exceeded");
return CURLE_FILESIZE_EXCEEDED;
}
conn->size = contentlength;
Daniel Stenberg
committed
}
/* check for Content-Type: header lines to get the mime-type */
else if (checkprefix("Content-Type:", k->p)) {
char *start;
char *end;
Daniel Stenberg
committed
/* Find the first non-space letter */
for(start=k->p+13;
Daniel Stenberg
committed
*start && isspace((int)*start);
start++);
Daniel Stenberg
committed
end = strchr(start, '\r');
if(!end)
end = strchr(start, '\n');
if(end) {
/* skip all trailing space letters */
for(; isspace((int)*end) && (end > start); end--);
Daniel Stenberg
committed
/* get length of the type */
len = end-start+1;
Daniel Stenberg
committed
/* allocate memory of a cloned copy */
Daniel Stenberg
committed
Curl_safefree(data->info.contenttype);
Daniel Stenberg
committed
data->info.contenttype = malloc(len + 1);
if (NULL == data->info.contenttype)
return CURLE_OUT_OF_MEMORY;
Daniel Stenberg
committed
Daniel Stenberg
committed
/* copy the content-type string */
memcpy(data->info.contenttype, start, len);
data->info.contenttype[len] = 0; /* zero terminate */
}
Daniel Stenberg
committed
}
#ifndef CURL_DISABLE_HTTP
Daniel Stenberg
committed
else if((k->httpversion == 10) &&
conn->bits.httpproxy &&
Curl_compareheader(k->p,
"Proxy-Connection:", "keep-alive")) {
Daniel Stenberg
committed
* When a HTTP/1.0 reply comes when using a proxy, the
* 'Proxy-Connection: keep-alive' line tells us the
* connection will be kept alive for our pleasure.
* Default action for 1.0 is to close.
Daniel Stenberg
committed
conn->bits.close = FALSE; /* don't close when done */
infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
Daniel Stenberg
committed
else if((k->httpversion == 10) &&
Curl_compareheader(k->p, "Connection:", "keep-alive")) {
Daniel Stenberg
committed
* A HTTP/1.0 reply with the 'Connection: keep-alive' line
* tells us the connection will be kept alive for our
* pleasure. Default action for 1.0 is to close.
Daniel Stenberg
committed
* [RFC2068, section 19.7.1] */
conn->bits.close = FALSE; /* don't close when done */
infof(data, "HTTP/1.0 connection set to keep alive!\n");
}
else if (Curl_compareheader(k->p, "Connection:", "close")) {
/*
* [RFC 2616, section 8.1.2.1]
* "Connection: close" is HTTP/1.1 language and means that
* the connection will close when this request has been
* served.
Daniel Stenberg
committed
conn->bits.close = TRUE; /* close when done */
Daniel Stenberg
committed
else if (Curl_compareheader(k->p,
"Transfer-Encoding:", "chunked")) {
/*
* [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
* means that the server will send a series of "chunks". Each
* chunk starts with line with info (including size of the
* coming block) (terminated with CRLF), then a block of data
* with the previously mentioned size. There can be any amount
* of chunks, and a chunk-data set to zero signals the
* end-of-chunks. */
conn->bits.chunk = TRUE; /* chunks coming our way */
/* init our chunky engine */
Curl_httpchunk_init(conn);
Daniel Stenberg
committed
else if (checkprefix("Content-Encoding:", k->p) &&
data->set.encoding) {
/*
* Process Content-Encoding. Look for the values: identity,
* gzip, deflate, compress, x-gzip and x-compress. x-gzip and
Daniel Stenberg
committed
* x-compress are the same as gzip and compress. (Sec 3.5 RFC
* 2616). zlib cannot handle compress. However, errors are
* handled further down when the response body is processed
*/
Daniel Stenberg
committed
char *start;
/* Find the first non-space letter */
for(start=k->p+17;
*start && isspace((int)*start);
start++);
/* Record the content-encoding for later use */
Daniel Stenberg
committed
if (checkprefix("identity", start))
k->content_encoding = IDENTITY;
else if (checkprefix("deflate", start))
k->content_encoding = DEFLATE;
else if (checkprefix("gzip", start)
Daniel Stenberg
committed
|| checkprefix("x-gzip", start))
k->content_encoding = GZIP;
else if (checkprefix("compress", start)
Daniel Stenberg
committed
|| checkprefix("x-compress", start))
k->content_encoding = COMPRESS;
else if (Curl_compareheader(k->p, "Content-Range:", "bytes")) {
/* Content-Range: bytes [num]-
Content-Range: bytes: [num]-
The second format was added August 1st 2000 by Igor
Khristophorov since Sun's webserver JavaWebServer/1.1.1
obviously sends the header this way! :-( */
char *ptr = strstr(k->p, "bytes");
ptr+=5;
if(*ptr == ':')
/* stupid colon skip */
ptr++;
Daniel Stenberg
committed
k->offset = curlx_strtoofft(ptr, NULL, 10);
if (conn->resume_from == k->offset)
/* we asked for a resume and we got it */
k->content_range = TRUE;
Daniel Stenberg
committed
}
Daniel Stenberg
committed
else if(data->cookies &&
checkprefix("Set-Cookie:", k->p)) {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
CURL_LOCK_ACCESS_SINGLE);
Curl_cookie_add(data,
data->cookies, TRUE, k->p+11,
/* If there is a custom-set Host: name, use it
here, or else use real peer host name. */
conn->allocptr.cookiehost?
conn->allocptr.cookiehost:conn->host.name,
Daniel Stenberg
committed
conn->path);
Daniel Stenberg
committed
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
Daniel Stenberg
committed
else if(checkprefix("Last-Modified:", k->p) &&
(data->set.timecondition || data->set.get_filetime) ) {
time_t secs=time(NULL);
k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
&secs);
if(data->set.get_filetime)
data->info.filetime = k->timeofdoc;
}
else if((checkprefix("WWW-Authenticate:", k->p) &&
(401 == k->httpcode)) ||
(checkprefix("Proxy-authenticate:", k->p) &&
(407 == k->httpcode))) {
result = Curl_http_input_auth(conn, k->httpcode, k->p);
if(result)
return result;
else if ((k->httpcode >= 300 && k->httpcode < 400) &&
checkprefix("Location:", k->p)) {
if(data->set.http_follow_location) {
/* this is the URL that the server advices us to get instead */
char *ptr;
char *start=k->p;
char backup;
start += 9; /* pass "Location:" */
/* Skip spaces and tabs. We do this to support multiple
white spaces after the "Location:" keyword. */
while(*start && isspace((int)*start ))
start++;
/* Scan through the string from the end to find the last
non-space. k->end_ptr points to the actual terminating zero
letter, move pointer one letter back and start from
there. This logic strips off trailing whitespace, but keeps
any embedded whitespace. */
ptr = k->end_ptr-1;
while((ptr>=start) && isspace((int)*ptr))
ptr--;
ptr++;
backup = *ptr; /* store the ending letter */
if(ptr != start) {
*ptr = '\0'; /* zero terminate */
conn->newurl = strdup(start); /* clone string */
*ptr = backup; /* restore ending letter */
if(!conn->newurl)
return CURLE_OUT_OF_MEMORY;
}
#endif /* CURL_DISABLE_HTTP */
/*
* End of header-checks. Write them to the client.
*/
if (data->set.include_header)
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN,
k->p, k->hbuflen, conn->host.dispname);
Daniel Stenberg
committed
result = Curl_client_write(data, writetype, k->p, k->hbuflen);
if(result)
return result;
data->info.header_size += k->hbuflen;
conn->headerbytecount += k->hbuflen;
/* reset hbufp pointer && hbuflen */
k->hbufp = data->state.headerbuff;
k->hbuflen = 0;
}
while (!stop_reading && *k->str); /* header line within buffer */
if(stop_reading)
/* We've stopped dealing with input, get out of the do-while loop */
break;
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if (k->str && !k->header && (nread > 0)) {
if(0 == k->bodywrites) {
/* These checks are only made the first time we are about to
write a piece of the body */
if(conn->protocol&PROT_HTTP) {
/* HTTP-only checks */
if (conn->newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
and we're set to close anyway. */
k->keepon &= ~KEEP_READ;
FD_ZERO(&k->rkeepfd);
*done = TRUE;
return CURLE_OK;
}
/* We have a new url to load, but since we want to be able
to re-use this connection properly, we read the full
response in "ignore more" */
k->ignorebody = TRUE;
infof(data, "Ignoring the response-body\n");
}
if(data->set.timecondition && !conn->range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
switch(data->set.timecondition) {
case CURL_TIMECOND_IFMODSINCE:
default:
if(k->timeofdoc < data->set.timevalue) {
infof(data,
"The requested document is not new enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
case CURL_TIMECOND_IFUNMODSINCE:
if(k->timeofdoc > data->set.timevalue) {
infof(data,
"The requested document is not old enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
} /* switch */
} /* two valid time strings */
} /* we have a time condition */
} /* this is HTTP */
} /* this is the first time we write a body part */
k->bodywrites++;
/* pass data to the debug function before it gets "dechunked" */
if(data->set.verbose) {
if(k->badheader) {
Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
k->hbuflen, conn->host.dispname);
if(k->badheader == HEADER_PARTHEADER)
Curl_debug(data, CURLINFO_DATA_IN, k->str, nread,
conn->host.dispname);
else
Curl_debug(data, CURLINFO_DATA_IN, k->str, nread,
conn->host.dispname);
}
Daniel Stenberg
committed
#ifndef CURL_DISABLE_HTTP
if(conn->bits.chunk) {
/*
* Bless me father for I have sinned. Here comes a chunked
* transfer flying and we need to decode this properly. While
* the name says read, this function both reads and writes away
* the data. The returned 'nread' holds the number of actual
* data it wrote to the client. */
CHUNKcode res =
Curl_httpchunk_read(conn, k->str, nread, &nread);
if(CHUNKE_OK < res) {
if(CHUNKE_WRITE_ERROR == res) {
failf(data, "Failed writing data");
return CURLE_WRITE_ERROR;
}
Daniel Stenberg
committed
failf(data, "Received problem %d in the chunky parser", res);
return CURLE_RECV_ERROR;