Skip to content
transfer.c 86.6 KiB
Newer Older
/***************************************************************************
 *                                  _   _ ____  _
 *  Project                     ___| | | |  _ \| |
 *                             / __| | | | |_) | |
 *                            | (__| |_| |  _ <| |___
 *                             \___|\___/|_| \_\_____|
 *
Yang Tse's avatar
Yang Tse committed
 * Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
 * This software is licensed as described in the file COPYING, which
 * you should have received as part of this distribution. The terms
 * are also available at http://curl.haxx.se/docs/copyright.html.
Daniel Stenberg's avatar
Daniel Stenberg committed
 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
 * copies of the Software, and permit persons to whom the Software is
 * furnished to do so, under the terms of the COPYING file.
Daniel Stenberg's avatar
Daniel Stenberg committed
 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
 * KIND, either express or implied.
Daniel Stenberg's avatar
Daniel Stenberg committed
 * $Id$
 ***************************************************************************/
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>

#include "strtoofft.h"
#ifdef WIN32
#include <time.h>
#include <io.h>
#else
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <netinet/in.h>
#include <sys/time.h>
Gisle Vanem's avatar
Gisle Vanem committed
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#include <sys/ioctl.h>
#include <signal.h>

#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif

#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif

#ifndef HAVE_SOCKET
#error "We can't compile without socket() support!"
#endif


#include "urldata.h"
#include <curl/curl.h>
#include "netrc.h"

#include "content_encoding.h"
#include "transfer.h"
#include "sendf.h"
#include "speedcheck.h"
#include "progress.h"
#include "http.h"
#include "url.h"
#include "getinfo.h"
#include "http_digest.h"
#include "share.h"
#include "select.h"
#include "easyif.h" /* for Curl_convert_to_network prototype */

#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>

/* The last #include file should be: */
#include "memdebug.h"

#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */

/*
 * This function will call the read callback to fill our buffer with data
 * to upload.
 */
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
  struct SessionHandle *data = conn->data;
  size_t buffersize = (size_t)bytes;
    /* if chunked Transfer-Encoding */
    buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
    data->req.upload_fromhere += 10; /* 32bit hex + CRLF */
  /* this function returns a size_t, so we typecast to int to prevent warnings
     with picky compilers */
  nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
    failf(data, "operation aborted by callback");
  else if(nread == CURL_READFUNC_PAUSE) {
    struct SingleRequest *k = &data->req;
    /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
    k->keepon |= KEEP_WRITE_PAUSE; /* mark socket send as paused */
    *nreadp = 0;
Yang Tse's avatar
Yang Tse committed
    return CURLE_OK; /* nothing was read */
  else if((size_t)nread > buffersize) {
    /* the read function returned a too large value */
  if(!data->req.forbidchunk && data->req.upload_chunky) {
    /* if chunked Transfer-Encoding */
    char hexbuffer[11];
    int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
                          "%x\r\n", nread);
    /* move buffer pointer */
    nread += hexlen;

    /* copy the prefix to the buffer */
    memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
    memcpy(data->req.upload_fromhere + nread, "\r\n", 2);
      /* mark this as done once this chunk is transfered */
#ifdef CURL_DOES_CONVERSIONS
    res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
    /* Curl_convert_to_network calls failf if unsuccessful */
    if(res != CURLE_OK) {
      return(res);
    }
  }
#endif /* CURL_DOES_CONVERSIONS */

/*
 * checkhttpprefix()
 *
 * Returns TRUE if member of the list matches prefix of string
 */
static bool
checkhttpprefix(struct SessionHandle *data,
                const char *s)
{
  struct curl_slist *head = data->set.http200aliases;
  bool rc = FALSE;
#ifdef CURL_DOES_CONVERSIONS
  /* convert from the network encoding using a scratch area */
  char *scratch = calloc(1, strlen(s)+1);
     failf (data, "Failed to calloc memory for conversion!");
     return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
  }
  strcpy(scratch, s);
  if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
    /* Curl_convert_from_network calls failf if unsuccessful */
     free(scratch);
     return FALSE; /* can't return CURLE_foobar so return FALSE */
  }
  s = scratch;
#endif /* CURL_DOES_CONVERSIONS */
  while(head) {
    if(checkprefix(head->data, s)) {
    head = head->next;
  }

  if((rc != TRUE) && (checkprefix("HTTP/", s))) {
#ifdef CURL_DOES_CONVERSIONS
  free(scratch);
#endif /* CURL_DOES_CONVERSIONS */
  return rc;
 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
 * POST/PUT with multi-pass authentication when a sending was denied and a
 * resend is necessary.
 */
CURLcode Curl_readrewind(struct connectdata *conn)
{
  struct SessionHandle *data = conn->data;

  conn->bits.rewindaftersend = FALSE; /* we rewind now */

  /* explicitly switch off sending data on this connection now since we are
     about to restart a new transfer and thus we want to avoid inadvertently
     sending more data on the existing connection until the next transfer
     starts */
  /* We have sent away data. If not using CURLOPT_POSTFIELDS or
     CURLOPT_HTTPPOST, call app to rewind
  */
  if(data->set.postfields ||
     (data->set.httpreq == HTTPREQ_POST_FORM))
    ; /* do nothing */
  else {
    if(data->set.seek_func) {
      int err;

      err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
      if(err) {
        failf(data, "seek callback returned error %d", (int)err);
        return CURLE_SEND_FAIL_REWIND;
      }
    }
    else if(data->set.ioctl_func) {
      err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
                                   data->set.ioctl_client);
      infof(data, "the ioctl callback returned %d\n", (int)err);

      if(err) {
        /* FIXME: convert to a human readable error message */
        failf(data, "ioctl callback returned error %d", (int)err);
        return CURLE_SEND_FAIL_REWIND;
      }
    }
    else {
      /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
         given FILE * stream and we can actually attempt to rewind that
         ourself with fseek() */
      if(data->set.fread_func == (curl_read_callback)fread) {
        if(-1 != fseek(data->set.in, 0, SEEK_SET))
          /* successful rewind */
          return CURLE_OK;
      }

      /* no callback set or failure aboe, makes us fail at once */
      failf(data, "necessary data rewind wasn't possible");
      return CURLE_SEND_FAIL_REWIND;
    }
  }
  return CURLE_OK;
}

static int data_pending(const struct connectdata *conn)
  /* in the case of libssh2, we can never be really sure that we have emptied
     its internal buffers so we MUST always try until we get EAGAIN back */
  return conn->protocol&(PROT_SCP|PROT_SFTP) ||
    Curl_ssl_data_pending(conn, FIRSTSOCKET);
#ifndef MIN
#define MIN(a,b) (a < b ? a : b)
#endif

static void read_rewind(struct connectdata *conn,
                        size_t thismuch)
{
  conn->read_pos -= thismuch;
  conn->bits.stream_was_rewound = TRUE;

#ifdef CURLDEBUG
  {
    char buf[512 + 1];
    size_t show;

    show = MIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
        memcpy(buf, conn->master_buffer + conn->read_pos, show);
        buf[show] = '\0';
    }
    else {
        buf[0] = '\0';
    }

    DEBUGF(infof(conn->data,
                 "Buffer after stream rewind (read_pos = %d): [%s]",
                 conn->read_pos, buf));
  }
#endif
}

Daniel Stenberg's avatar
Daniel Stenberg committed
/*
 * Curl_readwrite() is the low-level function to be called when data is to
 * be read and written to/from the connection.
 */
CURLcode Curl_readwrite(struct connectdata *conn,
                        bool *done)
  struct SessionHandle *data = conn->data;
  ssize_t nread; /* number of bytes read */
  int didwhat=0;
  curl_socket_t fd_read;
  curl_socket_t fd_write;
  int select_res = conn->cselect_bits;

  conn->cselect_bits = 0;
  /* only use the proper socket if the *_HOLD bit is not set simultaneously as
     then we are in rate limiting state in that transfer direction */

  if((k->keepon & KEEP_READBITS) == KEEP_READ) {
    fd_read = conn->sockfd;
    if(conn->protocol & (PROT_SCP|PROT_SFTP))
    fd_read = CURL_SOCKET_BAD;
  if((k->keepon & KEEP_WRITEBITS) == KEEP_WRITE)
    fd_write = conn->writesockfd;
  else
    fd_write = CURL_SOCKET_BAD;

   if(!select_res) { /* Call for select()/poll() only, if read/write/error
                         status is not known. */
       select_res = Curl_socket_ready(fd_read, fd_write, 0);
   }
    failf(data, "select/poll returned error");
  /* We go ahead and do a read if we have a readable socket or if
     the stream was rewound (in which case we have data in a
     buffer) */
  if((k->keepon & KEEP_READ) &&
     ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
    /* read */
    bool is_empty_data = FALSE;

    /* This is where we loop until we have read everything there is to
       read or we get a EWOULDBLOCK */
    do {
      size_t buffersize = data->set.buffer_size?
        data->set.buffer_size : BUFSIZE;
      size_t bytestoread = buffersize;
      int readrc;

      if(k->size != -1 && !k->header) {
        /* make sure we don't read "too much" if we can help it since we
           might be pipelining and then someone else might want to read what
           follows! */
        curl_off_t totalleft = k->size - k->bytecount;
        if(totalleft < (curl_off_t)bytestoread)
          bytestoread = (size_t)totalleft;
      }
      if(bytestoread) {
        /* receive data from the network! */
        readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
        /* subzero, this would've blocked */
        if(0 > readrc)
          break; /* get out of loop */
        /* get the CURLcode from the int */
        result = (CURLcode)readrc;
        if(result>0)
          return result;
      }
      else {
        /* read nothing but since we wanted nothing we consider this an OK
           situation to proceed from */
        nread = 0;
        result = CURLE_OK;
      }
      if((k->bytecount == 0) && (k->writebytecount == 0)) {
        Curl_pgrsTime(data, TIMER_STARTTRANSFER);
        if(k->exp100 > EXP100_SEND_DATA)
          /* set time stamp to compare with when waiting for the 100 */
          k->start100 = Curl_tvnow();
      }
      didwhat |= KEEP_READ;
      /* indicates data of zero size, i.e. empty file */
      is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
      /* NULL terminate, allowing string ops to be used */
      if(0 < nread || is_empty_data) {
        k->buf[nread] = 0;
      }
      else if(0 >= nread) {
        /* if we receive 0 or less here, the server closed the connection
           and we bail out from this! */
        DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
        k->keepon &= ~KEEP_READ;
        break;
      }
      /* Default buffer to use when we write the buffer, it may be changed
         in the flow below before the actual storing is done. */
      k->str = k->buf;
      /* Since this is a two-state thing, we check if we are parsing
         headers at the moment or not. */
      if(k->header) {
        /* we are in parse-the-header-mode */
        bool stop_reading = FALSE;
        /* header line within buffer loop */
        do {
          size_t hbufp_index;
          size_t rest_length;
          size_t full_length;
          int writetype;
          /* str_start is start of line within buf */
          k->str_start = k->str;
          /* data is in network encoding so use 0x0a instead of '\n' */
          k->end_ptr = memchr(k->str_start, 0x0a, nread);
          if(!k->end_ptr) {
            /* Not a complete header line within buffer, append the data to
               the end of the headerbuff. */

            if(k->hbuflen + nread >= data->state.headersize) {
              /* We enlarge the header buffer as it is too small */
              size_t newsize=CURLMAX((k->hbuflen+nread)*3/2,
                                     data->state.headersize*2);
              hbufp_index = k->hbufp - data->state.headerbuff;
              newbuff = (char *)realloc(data->state.headerbuff, newsize);
              if(!newbuff) {
                failf (data, "Failed to alloc memory for big header!");
                return CURLE_OUT_OF_MEMORY;
              data->state.headerbuff = newbuff;
              k->hbufp = data->state.headerbuff + hbufp_index;
            }
            memcpy(k->hbufp, k->str, nread);
            k->hbufp += nread;
            k->hbuflen += nread;
            if(!k->headerline && (k->hbuflen>5)) {
              /* make a first check that this looks like a HTTP header */
              if(!checkhttpprefix(data, data->state.headerbuff)) {
                /* this is not the beginning of a HTTP first header line */
                k->header = FALSE;
          /* decrease the size of the remaining (supposed) header line */
          rest_length = (k->end_ptr - k->str)+1;
          nread -= (ssize_t)rest_length;

          k->str = k->end_ptr + 1; /* move past new line */

          full_length = k->str - k->str_start;

          /*
           * We're about to copy a chunk of data to the end of the
           * already received header. We make sure that the full string
           * fit in the allocated header buffer, or else we enlarge
           * it.
           */
          if(k->hbuflen + full_length >=
             data->state.headersize) {
            char *newbuff;
            size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
                                   data->state.headersize*2);
            hbufp_index = k->hbufp - data->state.headerbuff;
            newbuff = (char *)realloc(data->state.headerbuff, newsize);
            if(!newbuff) {
              failf (data, "Failed to alloc memory for big header!");
              return CURLE_OUT_OF_MEMORY;
            }
            data->state.headersize= newsize;
            data->state.headerbuff = newbuff;
            k->hbufp = data->state.headerbuff + hbufp_index;
          }

          /* copy to end of line */
          memcpy(k->hbufp, k->str_start, full_length);
          k->hbufp += full_length;
          k->hbuflen += full_length;
          *k->hbufp = 0;
          k->end_ptr = k->hbufp;

          k->p = data->state.headerbuff;

          /****
           * We now have a FULL header line that p points to
           *****/

          if(!k->headerline) {
            /* the first read header */
            if((k->hbuflen>5) &&
               !checkhttpprefix(data, data->state.headerbuff)) {
              /* this is not the beginning of a HTTP first header line */
              k->header = FALSE;
              if(nread)
                /* since there's more, this is a partial bad header */
                k->badheader = HEADER_PARTHEADER;
              else {
                /* this was all we read so its all a bad header */
                k->badheader = HEADER_ALLBAD;
                nread = (ssize_t)rest_length;
              break;
            }
          }

          /* headers are in network encoding so
             use 0x0a and 0x0d instead of '\n' and '\r' */
          if((0x0a == *k->p) || (0x0d == *k->p)) {
            size_t headerlen;
            /* Zero-length header line means end of headers! */

#ifdef CURL_DOES_CONVERSIONS
            if(0x0d == *k->p) {
              *k->p = '\r'; /* replace with CR in host encoding */
              k->p++;       /* pass the CR byte */
            }
            if(0x0a == *k->p) {
              *k->p = '\n'; /* replace with LF in host encoding */
              k->p++;       /* pass the LF byte */
            }
            if('\r' == *k->p)
              k->p++; /* pass the \r byte */
            if('\n' == *k->p)
              k->p++; /* pass the \n byte */
            if(100 <= k->httpcode && 199 >= k->httpcode) {
              /*
               * We have made a HTTP PUT or POST and this is 1.1-lingo
               * that tells us that the server is OK with this and ready
               * to receive the data.
               * However, we'll get more headers now so we must get
               * back into the header-parsing state!
               */
              k->header = TRUE;
              k->headerline = 0; /* restart the header line counter */
              /* if we did wait for this do enable write now! */
              if(k->exp100) {
                k->exp100 = EXP100_SEND_DATA;
                k->keepon |= KEEP_WRITE;
            }
            else {
              k->header = FALSE; /* no more header to parse! */

              if((k->size == -1) && !k->chunk && !conn->bits.close &&
                 (k->httpversion >= 11) ) {
                /* On HTTP 1.1, when connection is not to get closed, but no
                   Content-Length nor Content-Encoding chunked have been
                   received, according to RFC2616 section 4.4 point 5, we
                   assume that the server will close the connection to
                   signal the end of the document. */
                infof(data, "no chunk, no close, no size. Assume close to "
                      "signal end\n");
                conn->bits.close = TRUE;
              }
            }
               * we got: "417 Expectation Failed" this means:
               * we have made a HTTP call and our Expect Header
               * seems to cause a problem => abort the write operations
               * (or prevent them from starting).
              k->exp100 = EXP100_FAILED;
              k->keepon &= ~KEEP_WRITE;
            }

            /*
             * When all the headers have been parsed, see if we should give
             * up and return an error.
             */
            if(Curl_http_should_fail(conn)) {
              failf (data, "The requested URL returned error: %d",
                     k->httpcode);
              return CURLE_HTTP_RETURNED_ERROR;
            }
            /* now, only output this if the header AND body are requested:
             */
            writetype = CLIENTWRITE_HEADER;
            if(data->set.include_header)
              writetype |= CLIENTWRITE_BODY;
            headerlen = k->p - data->state.headerbuff;
            result = Curl_client_write(conn, writetype,
                                       data->state.headerbuff,
                                       headerlen);
            if(result)
              return result;
            data->info.header_size += (long)headerlen;
            data->req.headerbytecount += (long)headerlen;
              (100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
            if(data->state.resume_from &&
               (data->set.httpreq==HTTPREQ_GET) &&
               (k->httpcode == 416)) {
              /* "Requested Range Not Satisfiable" */
              stop_reading = TRUE;
            }
            if(!stop_reading) {
              /* Curl_http_auth_act() checks what authentication methods
               * that are available and decides which one (if any) to
               * use. It will set 'newurl' if an auth metod was picked. */
              result = Curl_http_auth_act(conn);

              if(result)
                return result;

              if(conn->bits.rewindaftersend) {
                /* We rewind after a complete send, so thus we continue
                   sending now */
                infof(data, "Keep sending data to get tossed away!\n");
                k->keepon |= KEEP_WRITE;
            if(!k->header) {
              /*
               * really end-of-headers.
               *
               * If we requested a "no body", this is a good time to get
               * out and return home.
               */
              if(data->set.opt_no_body)
                stop_reading = TRUE;
              else {
                /* If we know the expected size of this document, we set the
                   maximum download size to the size of the expected
                   document or else, we won't know when to stop reading!

                   Note that we set the download maximum even if we read a
                   "Connection: close" header, to make sure that
                   "Content-Length: 0" still prevents us from attempting to
                   read the (missing) response-body.
                */
                /* According to RFC2616 section 4.4, we MUST ignore
                   Content-Length: headers if we are now receiving data
                   using chunked Transfer-Encoding.
                */
                if(k->chunk)
                  k->size=-1;
              }
              if(-1 != k->size) {
                /* We do this operation even if no_body is true, since this
                   data might be retrieved later with curl_easy_getinfo()
                   and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
                Curl_pgrsSetDownloadSize(data, k->size);
                k->maxdownload = k->size;
              }
              /* If max download size is *zero* (nothing) we already
                 have nothing and can safely return ok now! */
              if(0 == k->maxdownload)
                stop_reading = TRUE;
              if(stop_reading) {
                /* we make sure that this socket isn't read more now */
                k->keepon &= ~KEEP_READ;
              if(data->set.verbose)
                Curl_debug(data, CURLINFO_HEADER_IN,
                           k->str_start, headerlen, conn);
              break;          /* exit header line loop */
            /* We continue reading headers, so reset the line-based
               header parsing variables hbufp && hbuflen */
            k->hbufp = data->state.headerbuff;
            k->hbuflen = 0;
            continue;
          }

          /*
           * Checks for special headers coming up.
           */
          if(!k->headerline++) {
            /* This is the first header, it MUST be the error code line
               or else we consider this to be the body right away! */
            int httpversion_major;
            int nc;
#ifdef CURL_DOES_CONVERSIONS
#define HEADER1 scratch
#define SCRATCHSIZE 21
            CURLcode res;
            char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
            /* We can't really convert this yet because we
               don't know if it's the 1st header line or the body.
               So we do a partial conversion into a scratch area,
               leaving the data at k->p as-is.
            */
            strncpy(&scratch[0], k->p, SCRATCHSIZE);
            scratch[SCRATCHSIZE] = 0; /* null terminate */
            res = Curl_convert_from_network(data,
                                            &scratch[0],
                                            SCRATCHSIZE);
            if(CURLE_OK != res) {
              /* Curl_convert_from_network calls failf if unsuccessful */
              return res;
            }
#else
#define HEADER1 k->p /* no conversion needed, just use k->p */
#endif /* CURL_DOES_CONVERSIONS */

            nc = sscanf(HEADER1,
                        " HTTP/%d.%d %3d",
                        &httpversion_major,
                        &k->httpversion,
                        &k->httpcode);
            if(nc==3) {
              k->httpversion += 10 * httpversion_major;
            }
            else {
              /* this is the real world, not a Nirvana
                 NCSA 1.5.x returns this crap when asked for HTTP/1.1
              */
              nc=sscanf(HEADER1, " HTTP %3d", &k->httpcode);
              k->httpversion = 10;
              /* If user has set option HTTP200ALIASES,
                 compare header line against list of aliases
              */
              if(!nc) {
                if(checkhttpprefix(data, k->p)) {
                  nc = 1;
                  k->httpcode = 200;
                  k->httpversion = 10;
            if(nc) {
              data->info.httpcode = k->httpcode;
              data->info.httpversion = k->httpversion;
              /*
               * This code executes as part of processing the header.  As a
               * result, it's not totally clear how to interpret the
               * response code yet as that depends on what other headers may
               * be present.  401 and 407 may be errors, but may be OK
               * depending on how authentication is working.  Other codes
               * are definitely errors, so give up here.
               */
              if(data->set.http_fail_on_error && (k->httpcode >= 400) &&
                 ((k->httpcode != 401) || !conn->bits.user_passwd) &&
                 ((k->httpcode != 407) || !conn->bits.proxy_user_passwd) ) {

                if(data->state.resume_from &&
                   (data->set.httpreq==HTTPREQ_GET) &&
                   (k->httpcode == 416)) {
                  /* "Requested Range Not Satisfiable", just proceed and
                     pretend this is no error */
                else {
                  /* serious error, go home! */
                  failf (data, "The requested URL returned error: %d",
                         k->httpcode);
                  return CURLE_HTTP_RETURNED_ERROR;
              if(k->httpversion == 10) {
                /* Default action for HTTP/1.0 must be to close, unless
                   we get one of those fancy headers that tell us the
                   server keeps it open for us! */
                infof(data, "HTTP 1.0, assume close after body\n");
                conn->bits.close = TRUE;
              else if(k->httpversion >= 11 &&
                      !conn->bits.close) {
                /* If HTTP version is >= 1.1 and connection is persistent
                   server supports pipelining. */
                DEBUGF(infof(data,
                             "HTTP 1.1 or later with persistent connection, "
                             "pipelining supported\n"));
                conn->server_supports_pipelining = TRUE;
              }

              switch(k->httpcode) {
              case 204:
                /* (quote from RFC2616, section 10.2.5): The server has
                 * fulfilled the request but does not need to return an
                 * entity-body ... The 204 response MUST NOT include a
                 * message-body, and thus is always terminated by the first
                 * empty line after the header fields. */
                /* FALLTHROUGH */
              case 416: /* Requested Range Not Satisfiable, it has the
                           Content-Length: set as the "real" document but no
                           actual response is sent. */
              case 304:
                /* (quote from RFC2616, section 10.3.5): The 304 response
                 * MUST NOT contain a message-body, and thus is always
                 * terminated by the first empty line after the header
                 * fields.  */
                k->size=0;
                k->maxdownload=0;
                k->ignorecl = TRUE; /* ignore Content-Length headers */
                break;
              default:
                /* nothing */
            else {
              k->header = FALSE;   /* this is not a header line */
              break;
            }
          }
#endif /* CURL_DISABLE_HTTP */
          /* convert from the network encoding */
          result = Curl_convert_from_network(data, k->p, strlen(k->p));
          if(CURLE_OK != result) {
            return(result);
          }
          /* Curl_convert_from_network calls failf if unsuccessful */
          /* Check for Content-Length: header lines to get size. Ignore
             the header completely if we get a 416 response as then we're
             resuming a document that we don't get, and this header contains
             info about the true size of the document we didn't get now. */
          if(!k->ignorecl && !data->set.ignorecl &&
             checkprefix("Content-Length:", k->p)) {
            curl_off_t contentlength = curlx_strtoofft(k->p+15, NULL, 10);
            if(data->set.max_filesize &&
               contentlength > data->set.max_filesize) {
              failf(data, "Maximum file size exceeded");
              return CURLE_FILESIZE_EXCEEDED;
            }
            if(contentlength >= 0) {
              k->size = contentlength;
              k->maxdownload = k->size;
              /* we set the progress download size already at this point
                 just to make it easier for apps/callbacks to extract this
                 info as soon as possible */
              Curl_pgrsSetDownloadSize(data, k->size);
            else {
              /* Negative Content-Length is really odd, and we know it
                 happens for example when older Apache servers send large
                 files */
              conn->bits.close = TRUE;
              infof(data, "Negative content-length: %" FORMAT_OFF_T
                    ", closing after transfer\n", contentlength);
            }
          }
          /* check for Content-Type: header lines to get the MIME-type */
          else if(checkprefix("Content-Type:", k->p)) {
            char *contenttype = Curl_copy_header_value(k->p);
            if (!contenttype)
              return CURLE_OUT_OF_MEMORY;
            if (!*contenttype)
              /* ignore empty data */
              free(contenttype);
            else {
              Curl_safefree(data->info.contenttype);
              data->info.contenttype = contenttype;
          }
          else if((k->httpversion == 10) &&
                  conn->bits.httpproxy &&
                  Curl_compareheader(k->p,
                                     "Proxy-Connection:", "keep-alive")) {
            /*
             * When a HTTP/1.0 reply comes when using a proxy, the
             * 'Proxy-Connection: keep-alive' line tells us the
             * connection will be kept alive for our pleasure.
             * Default action for 1.0 is to close.
             */
            conn->bits.close = FALSE; /* don't close when done */
            infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
          }
          else if((k->httpversion == 11) &&
                  conn->bits.httpproxy &&
                  Curl_compareheader(k->p,
                                     "Proxy-Connection:", "close")) {
            /*
             * We get a HTTP/1.1 response from a proxy and it says it'll
             * close down after this transfer.
             */
            conn->bits.close = TRUE; /* close when done */
            infof(data, "HTTP/1.1 proxy connection set close!\n");
          }
          else if((k->httpversion == 10) &&
                  Curl_compareheader(k->p, "Connection:", "keep-alive")) {
            /*
             * A HTTP/1.0 reply with the 'Connection: keep-alive' line
             * tells us the connection will be kept alive for our
             * pleasure.  Default action for 1.0 is to close.
             *
             * [RFC2068, section 19.7.1] */
            conn->bits.close = FALSE; /* don't close when done */
            infof(data, "HTTP/1.0 connection set to keep alive!\n");
          }
          else if(Curl_compareheader(k->p, "Connection:", "close")) {
            /*
             * [RFC 2616, section 8.1.2.1]
             * "Connection: close" is HTTP/1.1 language and means that
             * the connection will close when this request has been
             * served.
             */
            conn->bits.close = TRUE; /* close when done */
          }
          else if(Curl_compareheader(k->p,
                                     "Transfer-Encoding:", "chunked")) {
            /*
             * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
             * means that the server will send a series of "chunks". Each
             * chunk starts with line with info (including size of the
             * coming block) (terminated with CRLF), then a block of data
             * with the previously mentioned size. There can be any amount
             * of chunks, and a chunk-data set to zero signals the
             * end-of-chunks. */
            k->chunk = TRUE; /* chunks coming our way */