Skip to content
Snippets Groups Projects
transfer.c 81 KiB
Newer Older
  • Learn to ignore specific revisions
  • /***************************************************************************
    
     *                                  _   _ ____  _
     *  Project                     ___| | | |  _ \| |
     *                             / __| | | | |_) | |
     *                            | (__| |_| |  _ <| |___
    
     *                             \___|\___/|_| \_\_____|
     *
    
    Yang Tse's avatar
    Yang Tse committed
     * Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
    
     * This software is licensed as described in the file COPYING, which
     * you should have received as part of this distribution. The terms
     * are also available at http://curl.haxx.se/docs/copyright.html.
    
    Daniel Stenberg's avatar
    Daniel Stenberg committed
     * You may opt to use, copy, modify, merge, publish, distribute and/or sell
     * copies of the Software, and permit persons to whom the Software is
    
     * furnished to do so, under the terms of the COPYING file.
    
    Daniel Stenberg's avatar
    Daniel Stenberg committed
     * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
     * KIND, either express or implied.
    
    Daniel Stenberg's avatar
    Daniel Stenberg committed
     * $Id$
    
     ***************************************************************************/
    
    /* -- WIN32 approved -- */
    #include <stdio.h>
    #include <string.h>
    #include <stdarg.h>
    #include <stdlib.h>
    #include <ctype.h>
    #include <errno.h>
    
    
    #include "strtoofft.h"
    
    #ifdef WIN32
    
    #include <time.h>
    #include <io.h>
    #else
    #ifdef HAVE_SYS_SOCKET_H
    #include <sys/socket.h>
    #endif
    
    #include <netinet/in.h>
    
    #include <sys/time.h>
    
    Gisle Vanem's avatar
    Gisle Vanem committed
    #endif
    
    #ifdef HAVE_UNISTD_H
    #include <unistd.h>
    #endif
    
    #ifdef HAVE_ARPA_INET_H
    #include <arpa/inet.h>
    #endif
    #ifdef HAVE_NET_IF_H
    #include <net/if.h>
    #endif
    
    #include <sys/ioctl.h>
    
    #include <signal.h>
    
    #ifdef HAVE_SYS_PARAM_H
    #include <sys/param.h>
    #endif
    
    #ifdef HAVE_SYS_SELECT_H
    #include <sys/select.h>
    #endif
    
    #ifndef HAVE_SOCKET
    #error "We can't compile without socket() support!"
    #endif
    
    
    
    #include "urldata.h"
    #include <curl/curl.h>
    #include "netrc.h"
    
    
    #include "content_encoding.h"
    
    #include "transfer.h"
    
    #include "sendf.h"
    #include "speedcheck.h"
    #include "progress.h"
    
    #include "http.h"
    
    #include "url.h"
    #include "getinfo.h"
    
    #include "http_digest.h"
    
    #include "share.h"
    
    #include "select.h"
    
    #include "easyif.h" /* for Curl_convert_to_network prototype */
    
    
    #define _MPRINTF_REPLACE /* use our functions only */
    #include <curl/mprintf.h>
    
    
    /* The last #include file should be: */
    #include "memdebug.h"
    
    
    #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
    
    
    static CURLcode readwrite_headers(struct SessionHandle *data,
                                      struct connectdata *conn,
                                      struct SingleRequest *k,
                                      ssize_t *nread,
                                      bool *stop_reading);
    
    
    /*
     * This function will call the read callback to fill our buffer with data
     * to upload.
     */
    
    CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
    
      struct SessionHandle *data = conn->data;
    
      size_t buffersize = (size_t)bytes;
    
        /* if chunked Transfer-Encoding */
        buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
    
        data->req.upload_fromhere += 10; /* 32bit hex + CRLF */
    
      /* this function returns a size_t, so we typecast to int to prevent warnings
         with picky compilers */
    
      nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
    
        failf(data, "operation aborted by callback");
    
      else if(nread == CURL_READFUNC_PAUSE) {
        struct SingleRequest *k = &data->req;
    
        /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
        k->keepon |= KEEP_WRITE_PAUSE; /* mark socket send as paused */
        *nreadp = 0;
    
    Yang Tse's avatar
    Yang Tse committed
        return CURLE_OK; /* nothing was read */
    
      else if((size_t)nread > buffersize) {
    
        /* the read function returned a too large value */
    
      if(!data->req.forbidchunk && data->req.upload_chunky) {
    
        /* if chunked Transfer-Encoding */
        char hexbuffer[11];
        int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
                              "%x\r\n", nread);
        /* move buffer pointer */
    
        nread += hexlen;
    
        /* copy the prefix to the buffer */
    
        memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
    
        memcpy(data->req.upload_fromhere + nread, "\r\n", 2);
    
          /* mark this as done once this chunk is transfered */
    
    #ifdef CURL_DOES_CONVERSIONS
    
        res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
    
        /* Curl_convert_to_network calls failf if unsuccessful */
        if(res != CURLE_OK) {
          return(res);
        }
      }
    #endif /* CURL_DOES_CONVERSIONS */
    
    
    /*
     * checkhttpprefix()
     *
     * Returns TRUE if member of the list matches prefix of string
     */
    static bool
    checkhttpprefix(struct SessionHandle *data,
                    const char *s)
    {
      struct curl_slist *head = data->set.http200aliases;
    
      bool rc = FALSE;
    #ifdef CURL_DOES_CONVERSIONS
      /* convert from the network encoding using a scratch area */
      char *scratch = calloc(1, strlen(s)+1);
    
         failf (data, "Failed to calloc memory for conversion!");
         return FALSE; /* can't return CURLE_OUT_OF_MEMORY so return FALSE */
      }
      strcpy(scratch, s);
    
      if(CURLE_OK != Curl_convert_from_network(data, scratch, strlen(s)+1)) {
    
        /* Curl_convert_from_network calls failf if unsuccessful */
         free(scratch);
         return FALSE; /* can't return CURLE_foobar so return FALSE */
      }
      s = scratch;
    #endif /* CURL_DOES_CONVERSIONS */
    
      while(head) {
        if(checkprefix(head->data, s)) {
    
        head = head->next;
      }
    
    
      if((rc != TRUE) && (checkprefix("HTTP/", s))) {
    
    #ifdef CURL_DOES_CONVERSIONS
      free(scratch);
    #endif /* CURL_DOES_CONVERSIONS */
      return rc;
    
     * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
     * POST/PUT with multi-pass authentication when a sending was denied and a
     * resend is necessary.
    
     */
    CURLcode Curl_readrewind(struct connectdata *conn)
    {
      struct SessionHandle *data = conn->data;
    
      conn->bits.rewindaftersend = FALSE; /* we rewind now */
    
    
      /* explicitly switch off sending data on this connection now since we are
         about to restart a new transfer and thus we want to avoid inadvertently
         sending more data on the existing connection until the next transfer
         starts */
    
      /* We have sent away data. If not using CURLOPT_POSTFIELDS or
         CURLOPT_HTTPPOST, call app to rewind
      */
    
      if(data->set.postfields ||
    
         (data->set.httpreq == HTTPREQ_POST_FORM))
        ; /* do nothing */
      else {
    
        if(data->set.seek_func) {
          int err;
    
          err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
          if(err) {
    
            failf(data, "seek callback returned error %d", (int)err);
    
            return CURLE_SEND_FAIL_REWIND;
          }
        }
        else if(data->set.ioctl_func) {
    
          err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
                                       data->set.ioctl_client);
    
          infof(data, "the ioctl callback returned %d\n", (int)err);
    
    
          if(err) {
            /* FIXME: convert to a human readable error message */
    
            failf(data, "ioctl callback returned error %d", (int)err);
    
            return CURLE_SEND_FAIL_REWIND;
          }
        }
        else {
          /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
             given FILE * stream and we can actually attempt to rewind that
             ourself with fseek() */
    
          if(data->set.fread_func == (curl_read_callback)fread) {
    
            if(-1 != fseek(data->set.in, 0, SEEK_SET))
              /* successful rewind */
              return CURLE_OK;
          }
    
    
          /* no callback set or failure above, makes us fail at once */
    
          failf(data, "necessary data rewind wasn't possible");
    
          return CURLE_SEND_FAIL_REWIND;
        }
      }
      return CURLE_OK;
    }
    
    
    static int data_pending(const struct connectdata *conn)
    
      /* in the case of libssh2, we can never be really sure that we have emptied
         its internal buffers so we MUST always try until we get EAGAIN back */
      return conn->protocol&(PROT_SCP|PROT_SFTP) ||
        Curl_ssl_data_pending(conn, FIRSTSOCKET);
    
    #ifndef MIN
    #define MIN(a,b) (a < b ? a : b)
    #endif
    
    static void read_rewind(struct connectdata *conn,
                            size_t thismuch)
    {
      conn->read_pos -= thismuch;
      conn->bits.stream_was_rewound = TRUE;
    
    #ifdef CURLDEBUG
      {
        char buf[512 + 1];
        size_t show;
    
        show = MIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
    
            memcpy(buf, conn->master_buffer + conn->read_pos, show);
            buf[show] = '\0';
        }
        else {
            buf[0] = '\0';
        }
    
    
        DEBUGF(infof(conn->data,
                     "Buffer after stream rewind (read_pos = %d): [%s]",
                     conn->read_pos, buf));
      }
    #endif
    }
    
    
    Daniel Stenberg's avatar
    Daniel Stenberg committed
    /*
    
     * Go ahead and do a read if we have a readable socket or if
     * the stream was rewound (in which case we have data in a
     * buffer)
    
    Daniel Stenberg's avatar
    Daniel Stenberg committed
     */
    
    static CURLcode readwrite_data(struct SessionHandle *data,
                                   struct connectdata *conn,
                                   struct SingleRequest *k,
                                   int *didwhat, bool *done)
    
      ssize_t nread; /* number of bytes read */
    
      /* This is where we loop until we have read everything there is to
         read or we get a EWOULDBLOCK */
      do {
        size_t buffersize = data->set.buffer_size?
          data->set.buffer_size : BUFSIZE;
        size_t bytestoread = buffersize;
        int readrc;
    
        if(k->size != -1 && !k->header) {
          /* make sure we don't read "too much" if we can help it since we
    	 might be pipelining and then someone else might want to read what
    	 follows! */
          curl_off_t totalleft = k->size - k->bytecount;
          if(totalleft < (curl_off_t)bytestoread)
    	bytestoread = (size_t)totalleft;
        }
    
        if(bytestoread) {
          /* receive data from the network! */
          readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
    
          /* subzero, this would've blocked */
          if(0 > readrc)
    	break; /* get out of loop */
    
          /* get the CURLcode from the int */
          result = (CURLcode)readrc;
    
          if(result>0)
    	return result;
        }
        else {
          /* read nothing but since we wanted nothing we consider this an OK
    	 situation to proceed from */
          nread = 0;
        }
    
        if((k->bytecount == 0) && (k->writebytecount == 0)) {
          Curl_pgrsTime(data, TIMER_STARTTRANSFER);
          if(k->exp100 > EXP100_SEND_DATA)
    	/* set time stamp to compare with when waiting for the 100 */
    	k->start100 = Curl_tvnow();
        }
    
        *didwhat |= KEEP_READ;
        /* indicates data of zero size, i.e. empty file */
        is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
    
        /* NUL terminate, allowing string ops to be used */
        if(0 < nread || is_empty_data) {
          k->buf[nread] = 0;
        }
        else if(0 >= nread) {
          /* if we receive 0 or less here, the server closed the connection
    	 and we bail out from this! */
          DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
          k->keepon &= ~KEEP_READ;
          break;
        }
    
        /* Default buffer to use when we write the buffer, it may be changed
           in the flow below before the actual storing is done. */
        k->str = k->buf;
    
        /* Since this is a two-state thing, we check if we are parsing
           headers at the moment or not. */
        if(k->header) {
          /* we are in parse-the-header-mode */
          bool stop_reading = FALSE;
          result = readwrite_headers(data, conn, k, &nread, &stop_reading);
          if(result)
    	return result;
          if(stop_reading)
    	/* We've stopped dealing with input, get out of the do-while loop */
    	break;
        }
    
        /* This is not an 'else if' since it may be a rest from the header
           parsing, where the beginning of the buffer is headers and the end
           is non-headers. */
        if(k->str && !k->header && (nread > 0 || is_empty_data)) {
    
    
    #ifndef CURL_DISABLE_HTTP
    
          if(0 == k->bodywrites && !is_empty_data) {
    	/* These checks are only made the first time we are about to
    	   write a piece of the body */
    	if(conn->protocol&PROT_HTTP) {
    	  /* HTTP-only checks */
    
    	  if(data->req.newurl) {
    	    if(conn->bits.close) {
    	      /* Abort after the headers if "follow Location" is set
    		 and we're set to close anyway. */
    	      k->keepon &= ~KEEP_READ;
    	      *done = TRUE;
    	      return CURLE_OK;
    	    }
    	    /* We have a new url to load, but since we want to be able
    	       to re-use this connection properly, we read the full
    	       response in "ignore more" */
    	    k->ignorebody = TRUE;
    	    infof(data, "Ignoring the response-body\n");
    	  }
    	  if(data->state.resume_from && !k->content_range &&
    	     (data->set.httpreq==HTTPREQ_GET) &&
    	     !k->ignorebody) {
    	    /* we wanted to resume a download, although the server doesn't
    	     * seem to support this and we did this with a GET (if it
    	     * wasn't a GET we did a POST or PUT resume) */
    	    failf(data, "HTTP server doesn't seem to support "
    		  "byte ranges. Cannot resume.");
    	    return CURLE_RANGE_ERROR;
    	  }
    
    	  if(data->set.timecondition && !data->state.range) {
    	    /* A time condition has been set AND no ranges have been
    	       requested. This seems to be what chapter 13.3.4 of
    	       RFC 2616 defines to be the correct action for a
    	       HTTP/1.1 client */
    	    if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
    	      switch(data->set.timecondition) {
    	      case CURL_TIMECOND_IFMODSINCE:
    	      default:
    		if(k->timeofdoc < data->set.timevalue) {
    		  infof(data,
    			"The requested document is not new enough\n");
    		  *done = TRUE;
    		  return CURLE_OK;
    		}
    		break;
    	      case CURL_TIMECOND_IFUNMODSINCE:
    		if(k->timeofdoc > data->set.timevalue) {
    		  infof(data,
    			"The requested document is not old enough\n");
    		  *done = TRUE;
    		  return CURLE_OK;
    		}
    		break;
    	      } /* switch */
    	    } /* two valid time strings */
    	  } /* we have a time condition */
    
    	} /* this is HTTP */
          } /* this is the first time we write a body part */
    
    #endif /* CURL_DISABLE_HTTP */
    
          k->bodywrites++;
    
          /* pass data to the debug function before it gets "dechunked" */
          if(data->set.verbose) {
    	if(k->badheader) {
    	  Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
    		     (size_t)k->hbuflen, conn);
    	  if(k->badheader == HEADER_PARTHEADER)
    	    Curl_debug(data, CURLINFO_DATA_IN,
    		       k->str, (size_t)nread, conn);
    	}
    	else
    	  Curl_debug(data, CURLINFO_DATA_IN,
    		     k->str, (size_t)nread, conn);
    
    
    #ifndef CURL_DISABLE_HTTP
          if(k->chunk) {
    	/*
    	 * Here comes a chunked transfer flying and we need to decode this
    	 * properly.  While the name says read, this function both reads
    	 * and writes away the data. The returned 'nread' holds the number
    	 * of actual data it wrote to the client.
    	 */
    
    	CHUNKcode res =
    	  Curl_httpchunk_read(conn, k->str, nread, &nread);
    
    	if(CHUNKE_OK < res) {
    	  if(CHUNKE_WRITE_ERROR == res) {
    	    failf(data, "Failed writing data");
    	    return CURLE_WRITE_ERROR;
    	  }
    	  failf(data, "Received problem %d in the chunky parser", res);
    	  return CURLE_RECV_ERROR;
    	}
    	else if(CHUNKE_STOP == res) {
    	  size_t dataleft;
    	  /* we're done reading chunks! */
    	  k->keepon &= ~KEEP_READ; /* read no more */
    
    	  /* There are now possibly N number of bytes at the end of the
    	     str buffer that weren't written to the client.
    
    	     We DO care about this data if we are pipelining.
    	     Push it back to be read on the next pass. */
    
    	  dataleft = conn->chunk.dataleft;
    	  if(dataleft != 0) {
    	    infof(conn->data, "Leftovers after chunking. "
    		  " Rewinding %d bytes\n",dataleft);
    	    read_rewind(conn, dataleft);
    	  }
    	}
    	/* If it returned OK, we just keep going */
          }
    #endif   /* CURL_DISABLE_HTTP */
    
          if((-1 != k->maxdownload) &&
    	 (k->bytecount + nread >= k->maxdownload)) {
    	/* The 'excess' amount below can't be more than BUFSIZE which
    	   always will fit in a size_t */
    	size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
    	if(excess > 0 && !k->ignorebody) {
    	  infof(data,
    		"Rewinding stream by : %d"
    
    		" bytes on url %s (size = %" FORMAT_OFF_T
    		", maxdownload = %" FORMAT_OFF_T
    		", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
    
    		excess, data->state.path,
    		k->size, k->maxdownload, k->bytecount, nread);
    	  read_rewind(conn, excess);
    	}
    
    	nread = (ssize_t) (k->maxdownload - k->bytecount);
    	if(nread < 0 ) /* this should be unusual */
    	  nread = 0;
    
    	k->keepon &= ~KEEP_READ; /* we're done reading */
    
          k->bytecount += nread;
    
          Curl_pgrsSetDownloadCounter(data, k->bytecount);
    
          if(!k->chunk && (nread || k->badheader || is_empty_data)) {
    	/* If this is chunky transfer, it was already written */
    
    	if(k->badheader && !k->ignorebody) {
    	  /* we parsed a piece of data wrongly assuming it was a header
    	     and now we output it as body instead */
    	  result = Curl_client_write(conn, CLIENTWRITE_BODY,
    				     data->state.headerbuff,
    				     k->hbuflen);
    	  if(result)
    	    return result;
    	}
    	if(k->badheader < HEADER_ALLBAD) {
    	  /* This switch handles various content encodings. If there's an
    	     error here, be sure to check over the almost identical code
    	     in http_chunks.c.
    	     Make sure that ALL_CONTENT_ENCODINGS contains all the
    	     encodings handled here. */
    #ifdef HAVE_LIBZ
    	  switch (conn->data->set.http_ce_skip ?
    		  IDENTITY : k->content_encoding) {
    	  case IDENTITY:
    #endif
    	    /* This is the default when the server sends no
    	       Content-Encoding header. See Curl_readwrite_init; the
    	       memset() call initializes k->content_encoding to zero. */
    	    if(!k->ignorebody)
    	      result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
    					 nread);
    #ifdef HAVE_LIBZ
    	    break;
    
    	  case DEFLATE:
    	    /* Assume CLIENTWRITE_BODY; headers are not encoded. */
    	    if(!k->ignorebody)
    	      result = Curl_unencode_deflate_write(conn, k, nread);
    	    break;
    
    	  case GZIP:
    	    /* Assume CLIENTWRITE_BODY; headers are not encoded. */
    	    if(!k->ignorebody)
    	      result = Curl_unencode_gzip_write(conn, k, nread);
    	    break;
    
    	  case COMPRESS:
    	  default:
    	    failf (data, "Unrecognized content encoding type. "
    		   "libcurl understands `identity', `deflate' and `gzip' "
    		   "content encodings.");
    	    result = CURLE_BAD_CONTENT_ENCODING;
    	    break;
    	  }
    #endif
    	}
    	k->badheader = HEADER_NORMAL; /* taken care of now */
    
    	if(result)
    	  return result;
    
        } /* if(! header and data to read ) */
    
        if(is_empty_data) {
          /* if we received nothing, the server closed the connection and we
    	 are done */
          k->keepon &= ~KEEP_READ;
        }
    
      } while(data_pending(conn));
    
    
      if(((k->keepon & (KEEP_READ|KEEP_WRITE)) == KEEP_WRITE) &&
         conn->bits.close ) {
    
    Daniel Stenberg's avatar
    Daniel Stenberg committed
        /* When we've read the entire thing and the close bit is set, the server
           may now close the connection. If there's now any kind of sending going
           on from our side, we need to stop that immediately. */
    
        infof(data, "we are done reading and this is set to close, stop send\n");
        k->keepon &= ~KEEP_WRITE; /* no writing anymore either */
      }
    
    
      return CURLE_OK;
    }
    
    /*
     * Read any header lines from the server and pass them to the client app.
     */
    static CURLcode readwrite_headers(struct SessionHandle *data,
                                      struct connectdata *conn,
                                      struct SingleRequest *k,
                                      ssize_t *nread,
                                      bool *stop_reading)
    {
      CURLcode result;
    
      /* header line within buffer loop */
      do {
        size_t hbufp_index;
        size_t rest_length;
        size_t full_length;
        int writetype;
    
        /* str_start is start of line within buf */
        k->str_start = k->str;
    
        /* data is in network encoding so use 0x0a instead of '\n' */
        k->end_ptr = memchr(k->str_start, 0x0a, *nread);
    
        if(!k->end_ptr) {
          /* Not a complete header line within buffer, append the data to
    	 the end of the headerbuff. */
    
          if(k->hbuflen + *nread >= data->state.headersize) {
    	/* We enlarge the header buffer as it is too small */
    	char *newbuff;
    	size_t newsize=CURLMAX((k->hbuflen+*nread)*3/2,
    			       data->state.headersize*2);
    	hbufp_index = k->hbufp - data->state.headerbuff;
    
    	newbuff = realloc(data->state.headerbuff, newsize);
    
    	if(!newbuff) {
    	  failf (data, "Failed to alloc memory for big header!");
    	  return CURLE_OUT_OF_MEMORY;
    	}
    	data->state.headersize=newsize;
    	data->state.headerbuff = newbuff;
    	k->hbufp = data->state.headerbuff + hbufp_index;
    
          memcpy(k->hbufp, k->str, *nread);
          k->hbufp += *nread;
          k->hbuflen += *nread;
          if(!k->headerline && (k->hbuflen>5)) {
    	/* make a first check that this looks like a HTTP header */
    	if(!checkhttpprefix(data, data->state.headerbuff)) {
    	  /* this is not the beginning of a HTTP first header line */
    	  k->header = FALSE;
    	  k->badheader = HEADER_ALLBAD;
    	  break;
    	}
    
          break; /* read more and try again */
        }
    
        /* decrease the size of the remaining (supposed) header line */
        rest_length = (k->end_ptr - k->str)+1;
        *nread -= (ssize_t)rest_length;
    
        k->str = k->end_ptr + 1; /* move past new line */
    
        full_length = k->str - k->str_start;
    
        /*
         * We're about to copy a chunk of data to the end of the
         * already received header. We make sure that the full string
         * fit in the allocated header buffer, or else we enlarge
         * it.
         */
        if(k->hbuflen + full_length >=
           data->state.headersize) {
          char *newbuff;
          size_t newsize=CURLMAX((k->hbuflen+full_length)*3/2,
    			     data->state.headersize*2);
          hbufp_index = k->hbufp - data->state.headerbuff;
    
          newbuff = realloc(data->state.headerbuff, newsize);
    
          if(!newbuff) {
    	failf (data, "Failed to alloc memory for big header!");
    	return CURLE_OUT_OF_MEMORY;
          }
          data->state.headersize= newsize;
          data->state.headerbuff = newbuff;
          k->hbufp = data->state.headerbuff + hbufp_index;
        }
    
        /* copy to end of line */
        memcpy(k->hbufp, k->str_start, full_length);
        k->hbufp += full_length;
        k->hbuflen += full_length;
        *k->hbufp = 0;
        k->end_ptr = k->hbufp;
    
        k->p = data->state.headerbuff;
    
        /****
         * We now have a FULL header line that p points to
         *****/
    
        if(!k->headerline) {
          /* the first read header */
          if((k->hbuflen>5) &&
    	 !checkhttpprefix(data, data->state.headerbuff)) {
    	/* this is not the beginning of a HTTP first header line */
    	k->header = FALSE;
    	if(*nread)
    	  /* since there's more, this is a partial bad header */
    	  k->badheader = HEADER_PARTHEADER;
    	else {
    	  /* this was all we read so it's all a bad header */
    	  k->badheader = HEADER_ALLBAD;
    	  *nread = (ssize_t)rest_length;
    	}
    	break;
          }
        }
    
        /* headers are in network encoding so
           use 0x0a and 0x0d instead of '\n' and '\r' */
        if((0x0a == *k->p) || (0x0d == *k->p)) {
          size_t headerlen;
          /* Zero-length header line means end of headers! */
    
          if(0x0d == *k->p) {
    	*k->p = '\r'; /* replace with CR in host encoding */
    	k->p++;       /* pass the CR byte */
          }
          if(0x0a == *k->p) {
    	*k->p = '\n'; /* replace with LF in host encoding */
    	k->p++;       /* pass the LF byte */
          }
    
          if('\r' == *k->p)
    	k->p++; /* pass the \r byte */
          if('\n' == *k->p)
    	k->p++; /* pass the \n byte */
    
          if(100 <= k->httpcode && 199 >= k->httpcode) {
    	/*
    	 * We have made a HTTP PUT or POST and this is 1.1-lingo
    	 * that tells us that the server is OK with this and ready
    	 * to receive the data.
    	 * However, we'll get more headers now so we must get
    	 * back into the header-parsing state!
    	 */
    	k->header = TRUE;
    	k->headerline = 0; /* restart the header line counter */
    
    	/* if we did wait for this do enable write now! */
    	if(k->exp100) {
    	  k->exp100 = EXP100_SEND_DATA;
    	  k->keepon |= KEEP_WRITE;
    	}
          }
          else {
    	k->header = FALSE; /* no more header to parse! */
    
    	if((k->size == -1) && !k->chunk && !conn->bits.close &&
    
    	  /* On HTTP 1.1, when connection is not to get closed, but no
    	     Content-Length nor Content-Encoding chunked have been
    	     received, according to RFC2616 section 4.4 point 5, we
    	     assume that the server will close the connection to
    	     signal the end of the document. */
    	  infof(data, "no chunk, no close, no size. Assume close to "
    		"signal end\n");
    	  conn->bits.close = TRUE;
    	}
          }
    
          if(417 == k->httpcode) {
    	/*
    	 * we got: "417 Expectation Failed" this means:
    	 * we have made a HTTP call and our Expect Header
    	 * seems to cause a problem => abort the write operations
    	 * (or prevent them from starting).
    	 */
    	k->exp100 = EXP100_FAILED;
    	k->keepon &= ~KEEP_WRITE;
          }
    
          /*
           * When all the headers have been parsed, see if we should give
           * up and return an error.
           */
          if(Curl_http_should_fail(conn)) {
    	failf (data, "The requested URL returned error: %d",
    	       k->httpcode);
    	return CURLE_HTTP_RETURNED_ERROR;
          }
    
          /* now, only output this if the header AND body are requested:
           */
          writetype = CLIENTWRITE_HEADER;
          if(data->set.include_header)
    	writetype |= CLIENTWRITE_BODY;
    
          headerlen = k->p - data->state.headerbuff;
    
          result = Curl_client_write(conn, writetype,
    				 data->state.headerbuff,
    				 headerlen);
          if(result)
    	return result;
    
          data->info.header_size += (long)headerlen;
          data->req.headerbytecount += (long)headerlen;
    
    #ifndef CURL_DISABLE_HTTP
    
          data->req.deductheadercount =
    	(100 <= k->httpcode && 199 >= k->httpcode)?data->req.headerbytecount:0;
    
          if(data->state.resume_from &&
    	 (data->set.httpreq==HTTPREQ_GET) &&
    	 (k->httpcode == 416)) {
    	/* "Requested Range Not Satisfiable" */
    	*stop_reading = TRUE;
          }
    
          if(!*stop_reading) {
    	/* Curl_http_auth_act() checks what authentication methods
    	 * that are available and decides which one (if any) to
    	 * use. It will set 'newurl' if an auth method was picked. */
    	result = Curl_http_auth_act(conn);
    
    	if(result)
    	  return result;
    
    	if(conn->bits.rewindaftersend) {
    	  /* We rewind after a complete send, so thus we continue
    	     sending now */
    	  infof(data, "Keep sending data to get tossed away!\n");
    	  k->keepon |= KEEP_WRITE;
    	}
          }
    
          if(!k->header) {
    	/*
    	 * really end-of-headers.
    	 *
    	 * If we requested a "no body", this is a good time to get
    	 * out and return home.
    	 */
    	if(data->set.opt_no_body)
    	  *stop_reading = TRUE;
    	else {
    	  /* If we know the expected size of this document, we set the
    	     maximum download size to the size of the expected
    	     document or else, we won't know when to stop reading!
    
    	     Note that we set the download maximum even if we read a
    	     "Connection: close" header, to make sure that
    	     "Content-Length: 0" still prevents us from attempting to
    	     read the (missing) response-body.
    	  */
    	  /* According to RFC2616 section 4.4, we MUST ignore
    	     Content-Length: headers if we are now receiving data
    	     using chunked Transfer-Encoding.
    	  */
    	  if(k->chunk)
    	    k->size=-1;
    
    	}
    	if(-1 != k->size) {
    	  /* We do this operation even if no_body is true, since this
    	     data might be retrieved later with curl_easy_getinfo()
    	     and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
    
    	  Curl_pgrsSetDownloadSize(data, k->size);
    	  k->maxdownload = k->size;
    	}
    	/* If max download size is *zero* (nothing) we already
    	   have nothing and can safely return ok now! */
    	if(0 == k->maxdownload)
    	  *stop_reading = TRUE;
    
    	if(*stop_reading) {
    	  /* we make sure that this socket isn't read more now */
    	  k->keepon &= ~KEEP_READ;
    	}
    
    	if(data->set.verbose)
    	  Curl_debug(data, CURLINFO_HEADER_IN,
    		     k->str_start, headerlen, conn);
    	break;          /* exit header line loop */
          }
    
          /* We continue reading headers, so reset the line-based
    	 header parsing variables hbufp && hbuflen */
          k->hbufp = data->state.headerbuff;
          k->hbuflen = 0;
          continue;
        }
    
        /*
         * Checks for special headers coming up.
         */
    
        if(!k->headerline++) {
          /* This is the first header, it MUST be the error code line
    	 or else we consider this to be the body right away! */
          int httpversion_major;
          int nc;
    
    #ifdef CURL_DOES_CONVERSIONS
    #define HEADER1 scratch
    #define SCRATCHSIZE 21
    
          CURLcode res;
          char scratch[SCRATCHSIZE+1]; /* "HTTP/major.minor 123" */
          /* We can't really convert this yet because we
    	 don't know if it's the 1st header line or the body.
    	 So we do a partial conversion into a scratch area,
    	 leaving the data at k->p as-is.
          */
          strncpy(&scratch[0], k->p, SCRATCHSIZE);
          scratch[SCRATCHSIZE] = 0; /* null terminate */
          res = Curl_convert_from_network(data,
    				      &scratch[0],
    				      SCRATCHSIZE);
          if(CURLE_OK != res) {