Skip to content
transfer.c 65.7 KiB
Newer Older
/***************************************************************************
 *                                  _   _ ____  _
 *  Project                     ___| | | |  _ \| |
 *                             / __| | | | |_) | |
 *                            | (__| |_| |  _ <| |___
 *                             \___|\___/|_| \_\_____|
 *
 * Copyright (C) 1998 - 2009, Daniel Stenberg, <daniel@haxx.se>, et al.
 * This software is licensed as described in the file COPYING, which
 * you should have received as part of this distribution. The terms
 * are also available at http://curl.haxx.se/docs/copyright.html.
Daniel Stenberg's avatar
Daniel Stenberg committed
 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
 * copies of the Software, and permit persons to whom the Software is
 * furnished to do so, under the terms of the COPYING file.
Daniel Stenberg's avatar
Daniel Stenberg committed
 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
 * KIND, either express or implied.
Daniel Stenberg's avatar
Daniel Stenberg committed
 * $Id$
 ***************************************************************************/
/* -- WIN32 approved -- */
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <stdlib.h>
#include <ctype.h>
#include <errno.h>

#include "strtoofft.h"
#include "strequal.h"
#ifdef WIN32
#include <time.h>
#include <io.h>
#else
#ifdef HAVE_SYS_SOCKET_H
#include <sys/socket.h>
#endif
#include <netinet/in.h>
#include <sys/time.h>
Gisle Vanem's avatar
Gisle Vanem committed
#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_ARPA_INET_H
#include <arpa/inet.h>
#endif
#ifdef HAVE_NET_IF_H
#include <net/if.h>
#endif
#include <sys/ioctl.h>

#ifdef HAVE_SYS_PARAM_H
#include <sys/param.h>
#endif

#ifdef HAVE_SYS_SELECT_H
#include <sys/select.h>
#endif

#ifndef HAVE_SOCKET
#error "We can't compile without socket() support!"
#endif


#include "urldata.h"
#include <curl/curl.h>
#include "netrc.h"

#include "content_encoding.h"
#include "transfer.h"
#include "sendf.h"
#include "speedcheck.h"
#include "progress.h"
#include "http.h"
#include "url.h"
#include "getinfo.h"
#include "http_digest.h"
#include "share.h"
#include "curl_memory.h"
#include "select.h"
#include "easyif.h" /* for Curl_convert_to_network prototype */

#define _MPRINTF_REPLACE /* use our functions only */
#include <curl/mprintf.h>

/* The last #include file should be: */
#include "memdebug.h"

#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */

/*
 * This function will call the read callback to fill our buffer with data
 * to upload.
 */
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
  struct SessionHandle *data = conn->data;
  size_t buffersize = (size_t)bytes;
#ifdef CURL_DOES_CONVERSIONS
  if((conn->protocol&PROT_HTTP) &&
     (data->state.proto.http->sending == HTTPSEND_REQUEST)) {
    /* We're sending the HTTP request headers, not the data.
       Remember that so we don't re-translate them into garbage. */
    sending_http_headers = TRUE;
#endif

  if(data->req.upload_chunky) {
    /* if chunked Transfer-Encoding */
    buffersize -= (8 + 2 + 2);   /* 32bit hex + CRLF + CRLF */
    data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
  }
  /* this function returns a size_t, so we typecast to int to prevent warnings
     with picky compilers */
  nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
    failf(data, "operation aborted by callback");
  else if(nread == CURL_READFUNC_PAUSE) {
    struct SingleRequest *k = &data->req;
    /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
    k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
    if(data->req.upload_chunky) {
      /* Back out the preallocation done above */
      data->req.upload_fromhere -= (8 + 2);
    }
Yang Tse's avatar
Yang Tse committed
    return CURLE_OK; /* nothing was read */
  else if((size_t)nread > buffersize) {
    /* the read function returned a too large value */
    failf(data, "read function returned funny value");
  if(!data->req.forbidchunk && data->req.upload_chunky) {
     *    build chunk:
     *
     *        <HEX SIZE> CRLF
     *        <DATA> CRLF
     */
    /* On non-ASCII platforms the <DATA> may or may not be
       translated based on set.prefer_ascii while the protocol
       portion must always be translated to the network encoding.
       To further complicate matters, line end conversion might be
       done later on, so we need to prevent CRLFs from becoming
       CRCRLFs if that's the case.  To do this we use bare LFs
       here, knowing they'll become CRLFs later on.
     */

    const char *endofline_native;
    const char *endofline_network;
    int hexlen;
#ifdef CURL_DO_LINEEND_CONV
    if((data->set.crlf) || (data->set.prefer_ascii)) {
#else
    if(data->set.crlf) {
#endif /* CURL_DO_LINEEND_CONV */
      /* \n will become \r\n later on */
      endofline_native  = "\n";
      endofline_network = "\x0a";
    } else {
      endofline_native  = "\r\n";
      endofline_network = "\x0d\x0a";
    }
    hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
                      "%x%s", nread, endofline_native);

    /* copy the prefix to the buffer, leaving out the NUL */
    memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
    /* always append ASCII CRLF to the data */
    memcpy(data->req.upload_fromhere + nread,
           endofline_network,
           strlen(endofline_network));

#ifdef CURL_DOES_CONVERSIONS
    CURLcode res;
    int length;
    if(data->set.prefer_ascii) {
      /* translate the protocol and data */
      length = nread;
    } else {
      /* just translate the protocol portion */
      length = strlen(hexbuffer);
    }
    res = Curl_convert_to_network(data, data->req.upload_fromhere, length);
    /* Curl_convert_to_network calls failf if unsuccessful */
    if(res != CURLE_OK) {
      return(res);
    }
#endif /* CURL_DOES_CONVERSIONS */
      /* mark this as done once this chunk is transfered */
    nread+=(int)strlen(endofline_native); /* for the added end of line */
#ifdef CURL_DOES_CONVERSIONS
  else if((data->set.prefer_ascii) && (!sending_http_headers)) {
    CURLcode res;
    res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
    /* Curl_convert_to_network calls failf if unsuccessful */
    if(res != CURLE_OK)
      return(res);
 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
 * POST/PUT with multi-pass authentication when a sending was denied and a
 * resend is necessary.
 */
CURLcode Curl_readrewind(struct connectdata *conn)
{
  struct SessionHandle *data = conn->data;

  conn->bits.rewindaftersend = FALSE; /* we rewind now */

  /* explicitly switch off sending data on this connection now since we are
     about to restart a new transfer and thus we want to avoid inadvertently
     sending more data on the existing connection until the next transfer
     starts */
  /* We have sent away data. If not using CURLOPT_POSTFIELDS or
     CURLOPT_HTTPPOST, call app to rewind
  */
  if(data->set.postfields ||
     (data->set.httpreq == HTTPREQ_POST_FORM))
    ; /* do nothing */
  else {
    if(data->set.seek_func) {
      int err;

      err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
      if(err) {
        failf(data, "seek callback returned error %d", (int)err);
        return CURLE_SEND_FAIL_REWIND;
      }
    }
    else if(data->set.ioctl_func) {
      err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
                                   data->set.ioctl_client);
      infof(data, "the ioctl callback returned %d\n", (int)err);

      if(err) {
        /* FIXME: convert to a human readable error message */
        failf(data, "ioctl callback returned error %d", (int)err);
        return CURLE_SEND_FAIL_REWIND;
      }
    }
    else {
      /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
         given FILE * stream and we can actually attempt to rewind that
         ourself with fseek() */
      if(data->set.fread_func == (curl_read_callback)fread) {
        if(-1 != fseek(data->set.in, 0, SEEK_SET))
          /* successful rewind */
          return CURLE_OK;
      }

      /* no callback set or failure above, makes us fail at once */
      failf(data, "necessary data rewind wasn't possible");
      return CURLE_SEND_FAIL_REWIND;
    }
  }
  return CURLE_OK;
}

static int data_pending(const struct connectdata *conn)
  /* in the case of libssh2, we can never be really sure that we have emptied
     its internal buffers so we MUST always try until we get EAGAIN back */
  return conn->protocol&(PROT_SCP|PROT_SFTP) ||
    Curl_ssl_data_pending(conn, FIRSTSOCKET);
static void read_rewind(struct connectdata *conn,
                        size_t thismuch)
{
  DEBUGASSERT(conn->read_pos >= thismuch);

  conn->read_pos -= thismuch;
  conn->bits.stream_was_rewound = TRUE;

#ifdef DEBUGBUILD
    show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
        memcpy(buf, conn->master_buffer + conn->read_pos, show);
        buf[show] = '\0';
    }
    else {
        buf[0] = '\0';
    }

    DEBUGF(infof(conn->data,
                 "Buffer after stream rewind (read_pos = %d): [%s]",
                 conn->read_pos, buf));
  }
#endif
}

Daniel Stenberg's avatar
Daniel Stenberg committed
/*
 * Go ahead and do a read if we have a readable socket or if
 * the stream was rewound (in which case we have data in a
 * buffer)
Daniel Stenberg's avatar
Daniel Stenberg committed
 */
static CURLcode readwrite_data(struct SessionHandle *data,
                               struct connectdata *conn,
                               struct SingleRequest *k,
                               int *didwhat, bool *done)
  ssize_t nread; /* number of bytes read */
  /* This is where we loop until we have read everything there is to
     read or we get a EWOULDBLOCK */
  do {
    size_t buffersize = data->set.buffer_size?
      data->set.buffer_size : BUFSIZE;
    size_t bytestoread = buffersize;
    int readrc;

    if(k->size != -1 && !k->header) {
      /* make sure we don't read "too much" if we can help it since we
         might be pipelining and then someone else might want to read what
         follows! */
      curl_off_t totalleft = k->size - k->bytecount;
      if(totalleft < (curl_off_t)bytestoread)
    if(bytestoread) {
      /* receive data from the network! */
      readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
      /* subzero, this would've blocked */
      if(0 > readrc)
      /* get the CURLcode from the int */
      result = (CURLcode)readrc;
    }
    else {
      /* read nothing but since we wanted nothing we consider this an OK
    if((k->bytecount == 0) && (k->writebytecount == 0)) {
      Curl_pgrsTime(data, TIMER_STARTTRANSFER);
      if(k->exp100 > EXP100_SEND_DATA)
        /* set time stamp to compare with when waiting for the 100 */
        k->start100 = Curl_tvnow();
    /* indicates data of zero size, i.e. empty file */
    is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
    /* NUL terminate, allowing string ops to be used */
    if(0 < nread || is_empty_data) {
      k->buf[nread] = 0;
    }
    else if(0 >= nread) {
      /* if we receive 0 or less here, the server closed the connection
      DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
    /* Default buffer to use when we write the buffer, it may be changed
       in the flow below before the actual storing is done. */
    k->str = k->buf;
#ifndef CURL_DISABLE_HTTP
    /* Since this is a two-state thing, we check if we are parsing
       headers at the moment or not. */
    if(k->header) {
      /* we are in parse-the-header-mode */
      bool stop_reading = FALSE;
      result = Curl_http_readwrite_headers(data, conn, k, &nread, &stop_reading);
        /* We've stopped dealing with input, get out of the do-while loop */
        break;
#endif /* CURL_DISABLE_HTTP */
    /* This is not an 'else if' since it may be a rest from the header
       parsing, where the beginning of the buffer is headers and the end
       is non-headers. */
    if(k->str && !k->header && (nread > 0 || is_empty_data)) {

#ifndef CURL_DISABLE_HTTP
      if(0 == k->bodywrites && !is_empty_data) {
        /* These checks are only made the first time we are about to
           write a piece of the body */
        if(conn->protocol&PROT_HTTP) {
          /* HTTP-only checks */

          if(data->req.newurl) {
            if(conn->bits.close) {
              /* Abort after the headers if "follow Location" is set
                 and we're set to close anyway. */
              *done = TRUE;
              return CURLE_OK;
            }
            /* We have a new url to load, but since we want to be able
               to re-use this connection properly, we read the full
               response in "ignore more" */
            k->ignorebody = TRUE;
            infof(data, "Ignoring the response-body\n");
          }
          if(data->state.resume_from && !k->content_range &&
             (data->set.httpreq==HTTPREQ_GET) &&
             !k->ignorebody) {
            /* we wanted to resume a download, although the server doesn't
             * seem to support this and we did this with a GET (if it
             * wasn't a GET we did a POST or PUT resume) */
            failf(data, "HTTP server doesn't seem to support "
                  "byte ranges. Cannot resume.");
            return CURLE_RANGE_ERROR;
          }

          if(data->set.timecondition && !data->state.range) {
            /* A time condition has been set AND no ranges have been
               requested. This seems to be what chapter 13.3.4 of
               RFC 2616 defines to be the correct action for a
               HTTP/1.1 client */
            if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
              switch(data->set.timecondition) {
              case CURL_TIMECOND_IFMODSINCE:
              default:
                if(k->timeofdoc < data->set.timevalue) {
                  infof(data,
                        "The requested document is not new enough\n");
                  *done = TRUE;
                  return CURLE_OK;
                }
                break;
              case CURL_TIMECOND_IFUNMODSINCE:
                if(k->timeofdoc > data->set.timevalue) {
                  infof(data,
                        "The requested document is not old enough\n");
                  *done = TRUE;
                  return CURLE_OK;
                }
                break;
              } /* switch */
            } /* two valid time strings */
          } /* we have a time condition */

        } /* this is HTTP */
      } /* this is the first time we write a body part */
#endif /* CURL_DISABLE_HTTP */
      k->bodywrites++;

      /* pass data to the debug function before it gets "dechunked" */
      if(data->set.verbose) {
        if(k->badheader) {
          Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
                     (size_t)k->hbuflen, conn);
          if(k->badheader == HEADER_PARTHEADER)
            Curl_debug(data, CURLINFO_DATA_IN,
                       k->str, (size_t)nread, conn);
        }
        else
          Curl_debug(data, CURLINFO_DATA_IN,
                     k->str, (size_t)nread, conn);

#ifndef CURL_DISABLE_HTTP
      if(k->chunk) {
        /*
         * Here comes a chunked transfer flying and we need to decode this
         * properly.  While the name says read, this function both reads
         * and writes away the data. The returned 'nread' holds the number
         * of actual data it wrote to the client.
         */

        CHUNKcode res =
          Curl_httpchunk_read(conn, k->str, nread, &nread);

        if(CHUNKE_OK < res) {
          if(CHUNKE_WRITE_ERROR == res) {
            failf(data, "Failed writing data");
            return CURLE_WRITE_ERROR;
          }
          failf(data, "Received problem %d in the chunky parser", res);
          return CURLE_RECV_ERROR;
        }
        else if(CHUNKE_STOP == res) {
          size_t dataleft;
          /* we're done reading chunks! */
          k->keepon &= ~KEEP_RECV; /* read no more */

          /* There are now possibly N number of bytes at the end of the
             str buffer that weren't written to the client.

             We DO care about this data if we are pipelining.
             Push it back to be read on the next pass. */

          dataleft = conn->chunk.dataleft;
          if(dataleft != 0) {
            infof(conn->data, "Leftovers after chunking. "
                  " Rewinding %d bytes\n",dataleft);
            read_rewind(conn, dataleft);
          }
        }
        /* If it returned OK, we just keep going */
      }
#endif   /* CURL_DISABLE_HTTP */

      if((-1 != k->maxdownload) &&
         (k->bytecount + nread >= k->maxdownload)) {

        if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) {
          /* The 'excess' amount below can't be more than BUFSIZE which
             always will fit in a size_t */
          size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
          if(excess > 0 && !k->ignorebody) {
            infof(data,
                  "Rewinding stream by : %d"
                  " bytes on url %s (size = %" FORMAT_OFF_T
                  ", maxdownload = %" FORMAT_OFF_T
                  ", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
                  excess, data->state.path,
                  k->size, k->maxdownload, k->bytecount, nread);
            read_rewind(conn, excess);
          }
        }

        nread = (ssize_t) (k->maxdownload - k->bytecount);
        if(nread < 0 ) /* this should be unusual */
          nread = 0;

        k->keepon &= ~KEEP_RECV; /* we're done reading */
      k->bytecount += nread;

      Curl_pgrsSetDownloadCounter(data, k->bytecount);

      if(!k->chunk && (nread || k->badheader || is_empty_data)) {
        /* If this is chunky transfer, it was already written */

        if(k->badheader && !k->ignorebody) {
          /* we parsed a piece of data wrongly assuming it was a header
             and now we output it as body instead */
          result = Curl_client_write(conn, CLIENTWRITE_BODY,
                                     data->state.headerbuff,
                                     k->hbuflen);
          if(result)
            return result;
        }
        if(k->badheader < HEADER_ALLBAD) {
          /* This switch handles various content encodings. If there's an
             error here, be sure to check over the almost identical code
             in http_chunks.c.
             Make sure that ALL_CONTENT_ENCODINGS contains all the
             encodings handled here. */
          switch (conn->data->set.http_ce_skip ?
                  IDENTITY : k->content_encoding) {
          case IDENTITY:
            /* This is the default when the server sends no
               Content-Encoding header. See Curl_readwrite_init; the
               memset() call initializes k->content_encoding to zero. */
            if(!k->ignorebody) {

#ifndef CURL_DISABLE_POP3
              if(conn->protocol&PROT_POP3)
                result = Curl_pop3_write(conn, k->str, nread);
              else
#endif /* CURL_DISABLE_POP3 */

              result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
                                         nread);
            break;

          case DEFLATE:
            /* Assume CLIENTWRITE_BODY; headers are not encoded. */
            if(!k->ignorebody)
              result = Curl_unencode_deflate_write(conn, k, nread);
            break;

          case GZIP:
            /* Assume CLIENTWRITE_BODY; headers are not encoded. */
            if(!k->ignorebody)
              result = Curl_unencode_gzip_write(conn, k, nread);
            break;

          case COMPRESS:
          default:
            failf (data, "Unrecognized content encoding type. "
                   "libcurl understands `identity', `deflate' and `gzip' "
                   "content encodings.");
            result = CURLE_BAD_CONTENT_ENCODING;
            break;
          }
        }
        k->badheader = HEADER_NORMAL; /* taken care of now */
    } /* if(! header and data to read ) */
    if(is_empty_data) {
      /* if we received nothing, the server closed the connection and we
  if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
Daniel Stenberg's avatar
Daniel Stenberg committed
    /* When we've read the entire thing and the close bit is set, the server
       may now close the connection. If there's now any kind of sending going
       on from our side, we need to stop that immediately. */
    infof(data, "we are done reading and this is set to close, stop send\n");
    k->keepon &= ~KEEP_SEND; /* no writing anymore either */
  return CURLE_OK;
}

/*
 * Send data to upload to the server, when the socket is writable.
 */
static CURLcode readwrite_upload(struct SessionHandle *data,
                                 struct connectdata *conn,
                                 struct SingleRequest *k,
                                 int *didwhat)
{
  ssize_t i, si;
  ssize_t bytes_written;
  CURLcode result;
  ssize_t nread; /* number of bytes read */
  if((k->bytecount == 0) && (k->writebytecount == 0))
    Curl_pgrsTime(data, TIMER_STARTTRANSFER);
  /*
   * We loop here to do the READ and SEND loop until we run out of
   * data to send or until we get EWOULDBLOCK back
   */
  do {
    /* only read more data if there's no upload data already
       present in the upload buffer */
    if(0 == data->req.upload_present) {
      /* init the "upload from here" pointer */
      data->req.upload_fromhere = k->uploadbuf;

      if(!k->upload_done) {
        /* HTTP pollution, this should be written nicer to become more
           protocol agnostic. */
        int fillcount;

        if((k->exp100 == EXP100_SENDING_REQUEST) &&
           (data->state.proto.http->sending == HTTPSEND_BODY)) {
          /* If this call is to send body data, we must take some action:
             We have sent off the full HTTP 1.1 request, and we shall now
             go into the Expect: 100 state and await such a header */
          k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
          k->keepon &= ~KEEP_SEND;         /* disable writing */
          k->start100 = Curl_tvnow();       /* timeout count starts now */
          *didwhat &= ~KEEP_SEND;  /* we didn't write anything actually */
        if(conn->protocol&PROT_HTTP) {
          if(data->state.proto.http->sending == HTTPSEND_REQUEST)
            /* We're sending the HTTP request headers, not the data.
               Remember that so we don't change the line endings. */
        result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
        if(result)
          return result;

        nread = (ssize_t)fillcount;
        nread = 0; /* we're done uploading/reading */
      if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
        k->keepon &= ~KEEP_SEND; /* we're done writing */

        if(conn->bits.rewindaftersend) {
          result = Curl_readrewind(conn);
          if(result)
            return result;
        }
        break;
      /* store number of bytes available for upload */
      data->req.upload_present = nread;
      /* convert LF to CRLF if so asked */
        /* always convert if we're FTPing in ASCII mode */
         ((data->set.crlf) || (data->set.prefer_ascii))) {
         (data->set.crlf)) {
#endif
        if(data->state.scratch == NULL)
          data->state.scratch = malloc(2*BUFSIZE);
        if(data->state.scratch == NULL) {
          failf (data, "Failed to alloc scratch buffer!");
          return CURLE_OUT_OF_MEMORY;
        }
        /*
         * ASCII/EBCDIC Note: This is presumably a text (not binary)
         * transfer so the data should already be in ASCII.
         * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
         * must be used instead of the escape sequences \r & \n.
         */
        for(i = 0, si = 0; i < nread; i++, si++) {
          if(data->req.upload_fromhere[i] == 0x0a) {
            data->state.scratch[si++] = 0x0d;
            data->state.scratch[si] = 0x0a;
            if(!data->set.crlf) {
              /* we're here only because FTP is in ASCII mode...
                 bump infilesize for the LF we just added */
              data->set.infilesize++;
          else
            data->state.scratch[si] = data->req.upload_fromhere[i];
        }
        if(si != nread) {
          /* only perform the special operation if we really did replace
             anything */
          nread = si;
          /* upload from the new (replaced) buffer instead */
          data->req.upload_fromhere = data->state.scratch;
          /* set the new amount too */
          data->req.upload_present = nread;
    } /* if 0 == data->req.upload_present */
    else {
      /* We have a partial buffer left from a previous "round". Use
    /* write to socket (send away data) */
    result = Curl_write(conn,
                        conn->writesockfd,     /* socket to send to */
                        data->req.upload_fromhere, /* buffer pointer */
                        data->req.upload_present,  /* buffer size */
                        &bytes_written);       /* actually send away */
    if(data->set.verbose)
      /* show the data before we change the pointer upload_fromhere */
      Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
    if(data->req.upload_present != bytes_written) {
      /* we only wrote a part of the buffer (if anything), deal with it! */
      /* store the amount of bytes left in the buffer to write */
      data->req.upload_present -= bytes_written;
      /* advance the pointer where to find the buffer when the next send
      data->req.upload_fromhere += bytes_written;
    }
    else {
      /* we've uploaded that buffer now */
      data->req.upload_fromhere = k->uploadbuf;
      data->req.upload_present = 0; /* no more bytes left */
        k->keepon &= ~KEEP_SEND; /* we're done writing */
    k->writebytecount += bytes_written;
    Curl_pgrsSetUploadCounter(data, k->writebytecount);
  } while(0); /* just to break out from! */
/*
 * Curl_readwrite() is the low-level function to be called when data is to
 * be read and written to/from the connection.
 */
CURLcode Curl_readwrite(struct connectdata *conn,
                        bool *done)
{
  struct SessionHandle *data = conn->data;
  struct SingleRequest *k = &data->req;
  CURLcode result;
  int didwhat=0;
  curl_socket_t fd_read;
  curl_socket_t fd_write;
  int select_res = conn->cselect_bits;
  /* only use the proper socket if the *_HOLD bit is not set simultaneously as
     then we are in rate limiting state in that transfer direction */
  if((k->keepon & KEEP_RECVBITS) == KEEP_RECV) {
    fd_read = conn->sockfd;
  } else
    fd_read = CURL_SOCKET_BAD;
  if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
    fd_write = conn->writesockfd;
  else
    fd_write = CURL_SOCKET_BAD;
   if(!select_res) { /* Call for select()/poll() only, if read/write/error
                         status is not known. */
       select_res = Curl_socket_ready(fd_read, fd_write, 0);
   }
  if(select_res == CURL_CSELECT_ERR) {
    failf(data, "select/poll returned error");
    return CURLE_SEND_ERROR;
  }
  /* We go ahead and do a read if we have a readable socket or if
     the stream was rewound (in which case we have data in a
     buffer) */
     ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
    result = readwrite_data(data, conn, k, &didwhat, done);
    if(result || *done)
      return result;
  }
  /* If we still have writing to do, we check if we have a writable socket. */
  if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
    result = readwrite_upload(data, conn, k, &didwhat);
    if(result)
      return result;
  }
  if(didwhat) {
    /* Update read/write counters */
    if(k->bytecountp)
      *k->bytecountp = k->bytecount; /* read count */
    if(k->writebytecountp)
      *k->writebytecountp = k->writebytecount; /* write count */
  }
  else {
    /* no read no write, this is a timeout? */
    if(k->exp100 == EXP100_AWAITING_CONTINUE) {
      /* This should allow some time for the header to arrive, but only a
         very short time as otherwise it'll be too much wasted time too

      /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
         Therefore, when a client sends this header field to an origin server
         (possibly via a proxy) from which it has never seen a 100 (Continue)
         status, the client SHOULD NOT wait for an indefinite period before
         sending the request body.
      long ms = Curl_tvdiff(k->now, k->start100);
      if(ms > CURL_TIMEOUT_EXPECT_100) {
        /* we've waited long enough, continue anyway */
        infof(data, "Done waiting for 100-continue\n");
  if(Curl_pgrsUpdate(conn))
    result = CURLE_ABORTED_BY_CALLBACK;
  else
    result = Curl_speedcheck(data, k->now);
     (Curl_tvdiff(k->now, k->start) >= data->set.timeout)) {
Yang Tse's avatar
Yang Tse committed
      failf(data, "Operation timed out after %ld milliseconds with %"
            FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
            data->set.timeout, k->bytecount, k->size);
Yang Tse's avatar
Yang Tse committed
      failf(data, "Operation timed out after %ld milliseconds with %"
  }

  if(!k->keepon) {
    /*
     * The transfer has been performed. Just make some general checks before