Newer
Older
Daniel Stenberg
committed
* coming block) (terminated with CRLF), then a block of data
* with the previously mentioned size. There can be any amount
* of chunks, and a chunk-data set to zero signals the
* end-of-chunks. */
Daniel Stenberg
committed
k->chunk = TRUE; /* chunks coming our way */
Daniel Stenberg
committed
/* init our chunky engine */
Curl_httpchunk_init(conn);
Daniel Stenberg
committed
else if(checkprefix("Trailer:", k->p) ||
checkprefix("Trailers:", k->p)) {
/*
* This test helps Curl_httpchunk_read() to determine to look
* for well formed trailers after the zero chunksize record. In
* this case a CRLF is required after the zero chunksize record
* when no trailers are sent, or after the last trailer record.
*
* It seems both Trailer: and Trailers: occur in the wild.
*/
Daniel Stenberg
committed
k->trailerhdrpresent = TRUE;
}
Daniel Stenberg
committed
else if(checkprefix("Content-Encoding:", k->p) &&
Daniel Stenberg
committed
data->set.str[STRING_ENCODING]) {
Daniel Stenberg
committed
/*
* Process Content-Encoding. Look for the values: identity,
* gzip, deflate, compress, x-gzip and x-compress. x-gzip and
Daniel Stenberg
committed
* x-compress are the same as gzip and compress. (Sec 3.5 RFC
* 2616). zlib cannot handle compress. However, errors are
* handled further down when the response body is processed
*/
Daniel Stenberg
committed
char *start;
/* Find the first non-space letter */
for(start=k->p+17;
Daniel Stenberg
committed
*start && ISSPACE(*start);
Daniel Stenberg
committed
start++)
; /* empty loop */
Daniel Stenberg
committed
/* Record the content-encoding for later use */
Daniel Stenberg
committed
if(checkprefix("identity", start))
Daniel Stenberg
committed
k->content_encoding = IDENTITY;
Daniel Stenberg
committed
else if(checkprefix("deflate", start))
Daniel Stenberg
committed
k->content_encoding = DEFLATE;
Daniel Stenberg
committed
else if(checkprefix("gzip", start)
Daniel Stenberg
committed
|| checkprefix("x-gzip", start))
k->content_encoding = GZIP;
Daniel Stenberg
committed
else if(checkprefix("compress", start)
Daniel Stenberg
committed
|| checkprefix("x-compress", start))
k->content_encoding = COMPRESS;
Daniel Stenberg
committed
else if(checkprefix("Content-Range:", k->p)) {
/* Content-Range: bytes [num]-
Content-Range: bytes: [num]-
Daniel Stenberg
committed
Content-Range: [num]-
The second format was added since Sun's webserver
JavaWebServer/1.1.1 obviously sends the header this way!
Daniel Stenberg
committed
The third added since some servers use that!
Daniel Stenberg
committed
char *ptr = k->p + 14;
Daniel Stenberg
committed
/* Move forward until first digit */
while(*ptr && !ISDIGIT(*ptr))
Daniel Stenberg
committed
k->offset = curlx_strtoofft(ptr, NULL, 10);
Daniel Stenberg
committed
if(data->state.resume_from == k->offset)
/* we asked for a resume and we got it */
k->content_range = TRUE;
Daniel Stenberg
committed
}
#if !defined(CURL_DISABLE_COOKIES)
Daniel Stenberg
committed
else if(data->cookies &&
checkprefix("Set-Cookie:", k->p)) {
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE,
CURL_LOCK_ACCESS_SINGLE);
Curl_cookie_add(data,
data->cookies, TRUE, k->p+11,
/* If there is a custom-set Host: name, use it
here, or else use real peer host name. */
conn->allocptr.cookiehost?
conn->allocptr.cookiehost:conn->host.name,
Daniel Stenberg
committed
data->state.path);
Daniel Stenberg
committed
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
#endif
Daniel Stenberg
committed
else if(checkprefix("Last-Modified:", k->p) &&
(data->set.timecondition || data->set.get_filetime) ) {
time_t secs=time(NULL);
k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
&secs);
if(data->set.get_filetime)
data->info.filetime = (long)k->timeofdoc;
}
else if((checkprefix("WWW-Authenticate:", k->p) &&
(401 == k->httpcode)) ||
(checkprefix("Proxy-authenticate:", k->p) &&
(407 == k->httpcode))) {
result = Curl_http_input_auth(conn, k->httpcode, k->p);
if(result)
return result;
Daniel Stenberg
committed
else if((k->httpcode >= 300 && k->httpcode < 400) &&
checkprefix("Location:", k->p)) {
if(data->set.http_follow_location) {
/* this is the URL that the server advices us to get instead */
char *ptr;
char *start=k->p;
char backup;
start += 9; /* pass "Location:" */
/* Skip spaces and tabs. We do this to support multiple
white spaces after the "Location:" keyword. */
Daniel Stenberg
committed
while(*start && ISSPACE(*start ))
/* Scan through the string from the end to find the last
non-space. k->end_ptr points to the actual terminating zero
letter, move pointer one letter back and start from
there. This logic strips off trailing whitespace, but keeps
any embedded whitespace. */
ptr = k->end_ptr-1;
Daniel Stenberg
committed
while((ptr>=start) && ISSPACE(*ptr))
ptr--;
ptr++;
backup = *ptr; /* store the ending letter */
if(ptr != start) {
*ptr = '\0'; /* zero terminate */
Daniel Stenberg
committed
data->req.newurl = strdup(start); /* clone string */
*ptr = backup; /* restore ending letter */
Daniel Stenberg
committed
if(!data->req.newurl)
return CURLE_OUT_OF_MEMORY;
}
}
#endif /* CURL_DISABLE_HTTP */
/*
* End of header-checks. Write them to the client.
*/
Daniel Stenberg
committed
if(data->set.include_header)
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN,
k->p, (size_t)k->hbuflen, conn);
Daniel Stenberg
committed
Daniel Stenberg
committed
result = Curl_client_write(conn, writetype, k->p, k->hbuflen);
if(result)
return result;
data->info.header_size += (long)k->hbuflen;
Daniel Stenberg
committed
data->req.headerbytecount += (long)k->hbuflen;
/* reset hbufp pointer && hbuflen */
k->hbufp = data->state.headerbuff;
k->hbuflen = 0;
}
Daniel Stenberg
committed
while(!stop_reading && *k->str); /* header line within buffer */
if(stop_reading)
/* We've stopped dealing with input, get out of the do-while loop */
break;
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
Daniel Stenberg
committed
if(k->str && !k->header && (nread > 0 || is_empty_data)) {
Daniel Stenberg
committed
if(0 == k->bodywrites && !is_empty_data) {
/* These checks are only made the first time we are about to
write a piece of the body */
if(conn->protocol&PROT_HTTP) {
/* HTTP-only checks */
Daniel Stenberg
committed
if(data->req.newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
and we're set to close anyway. */
k->keepon &= ~KEEP_READ;
*done = TRUE;
return CURLE_OK;
}
/* We have a new url to load, but since we want to be able
to re-use this connection properly, we read the full
response in "ignore more" */
k->ignorebody = TRUE;
infof(data, "Ignoring the response-body\n");
}
Daniel Stenberg
committed
if(data->state.resume_from && !k->content_range &&
(data->set.httpreq==HTTPREQ_GET) &&
!k->ignorebody) {
/* we wanted to resume a download, although the server doesn't
* seem to support this and we did this with a GET (if it
* wasn't a GET we did a POST or PUT resume) */
failf(data, "HTTP server doesn't seem to support "
"byte ranges. Cannot resume.");
return CURLE_RANGE_ERROR;
}
Daniel Stenberg
committed
if(data->set.timecondition && !data->state.range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
HTTP/1.1 client */
if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
switch(data->set.timecondition) {
case CURL_TIMECOND_IFMODSINCE:
default:
if(k->timeofdoc < data->set.timevalue) {
infof(data,
"The requested document is not new enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
case CURL_TIMECOND_IFUNMODSINCE:
if(k->timeofdoc > data->set.timevalue) {
infof(data,
"The requested document is not old enough\n");
*done = TRUE;
return CURLE_OK;
}
break;
} /* switch */
} /* two valid time strings */
} /* we have a time condition */
} /* this is HTTP */
} /* this is the first time we write a body part */
k->bodywrites++;
/* pass data to the debug function before it gets "dechunked" */
if(data->set.verbose) {
if(k->badheader) {
Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
if(k->badheader == HEADER_PARTHEADER)
Daniel Stenberg
committed
Curl_debug(data, CURLINFO_DATA_IN,
k->str, (size_t)nread, conn);
else
Daniel Stenberg
committed
Curl_debug(data, CURLINFO_DATA_IN,
k->str, (size_t)nread, conn);
}
Daniel Stenberg
committed
#ifndef CURL_DISABLE_HTTP
Daniel Stenberg
committed
if(k->chunk) {
/*
* Here comes a chunked transfer flying and we need to decode this
* properly. While the name says read, this function both reads
* and writes away the data. The returned 'nread' holds the number
* of actual data it wrote to the client.
*/
CHUNKcode res =
Curl_httpchunk_read(conn, k->str, nread, &nread);
if(CHUNKE_OK < res) {
if(CHUNKE_WRITE_ERROR == res) {
failf(data, "Failed writing data");
return CURLE_WRITE_ERROR;
}
Daniel Stenberg
committed
failf(data, "Received problem %d in the chunky parser", res);
return CURLE_RECV_ERROR;
}
else if(CHUNKE_STOP == res) {
size_t dataleft;
/* we're done reading chunks! */
k->keepon &= ~KEEP_READ; /* read no more */
/* There are now possibly N number of bytes at the end of the
str buffer that weren't written to the client.
We DO care about this data if we are pipelining.
Push it back to be read on the next pass. */
Daniel Stenberg
committed
dataleft = conn->chunk.dataleft;
Daniel Stenberg
committed
if(dataleft != 0) {
infof(conn->data, "Leftovers after chunking. "
" Rewinding %d bytes\n",dataleft);
read_rewind(conn, dataleft);
}
}
/* If it returned OK, we just keep going */
#endif /* CURL_DISABLE_HTTP */
if((-1 != k->maxdownload) &&
(k->bytecount + nread >= k->maxdownload)) {
Daniel Stenberg
committed
/* The 'excess' amount below can't be more than BUFSIZE which
always will fit in a size_t */
size_t excess = (size_t)(k->bytecount + nread - k->maxdownload);
Daniel Stenberg
committed
if(excess > 0 && !k->ignorebody) {
Daniel Stenberg
committed
infof(data,
"Rewinding stream by : %d"
Daniel Stenberg
committed
" bytes on url %s (size = %" FORMAT_OFF_T
", maxdownload = %" FORMAT_OFF_T
", bytecount = %" FORMAT_OFF_T ", nread = %d)\n",
Daniel Stenberg
committed
excess, data->state.path,
Daniel Stenberg
committed
k->size, k->maxdownload, k->bytecount, nread);
Daniel Stenberg
committed
read_rewind(conn, excess);
}
nread = (ssize_t) (k->maxdownload - k->bytecount);
if(nread < 0 ) /* this should be unusual */
nread = 0;
k->keepon &= ~KEEP_READ; /* we're done reading */
}
k->bytecount += nread;
Daniel Stenberg
committed
Curl_pgrsSetDownloadCounter(data, k->bytecount);
Daniel Stenberg
committed
if(!k->chunk && (nread || k->badheader || is_empty_data)) {
/* If this is chunky transfer, it was already written */
/* we parsed a piece of data wrongly assuming it was a header
and now we output it as body instead */
Daniel Stenberg
committed
result = Curl_client_write(conn, CLIENTWRITE_BODY,
data->state.headerbuff,
k->hbuflen);
Daniel Stenberg
committed
if(result)
return result;
}
if(k->badheader < HEADER_ALLBAD) {
/* This switch handles various content encodings. If there's an
error here, be sure to check over the almost identical code
Daniel Stenberg
committed
Make sure that ALL_CONTENT_ENCODINGS contains all the
encodings handled here. */
Daniel Stenberg
committed
switch (conn->data->set.http_ce_skip ?
IDENTITY : k->content_encoding) {
case IDENTITY:
/* This is the default when the server sends no
Content-Encoding header. See Curl_readwrite_init; the
memset() call initializes k->content_encoding to zero. */
Daniel Stenberg
committed
result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
break;
/* Assume CLIENTWRITE_BODY; headers are not encoded. */
Daniel Stenberg
committed
if(!k->ignorebody)
Daniel Stenberg
committed
result = Curl_unencode_deflate_write(conn, k, nread);
break;
case GZIP:
/* Assume CLIENTWRITE_BODY; headers are not encoded. */
Daniel Stenberg
committed
if(!k->ignorebody)
Daniel Stenberg
committed
result = Curl_unencode_gzip_write(conn, k, nread);
default:
failf (data, "Unrecognized content encoding type. "
"libcurl understands `identity', `deflate' and `gzip' "
"content encodings.");
result = CURLE_BAD_CONTENT_ENCODING;
break;
}
}
k->badheader = HEADER_NORMAL; /* taken care of now */
if(result)
return result;
}
Daniel Stenberg
committed
} /* if(! header and data to read ) */
Daniel Stenberg
committed
Daniel Stenberg
committed
if(is_empty_data) {
Daniel Stenberg
committed
/* if we received nothing, the server closed the connection and we
are done */
k->keepon &= ~KEEP_READ;
}
} while(data_pending(conn));
Daniel Stenberg
committed
} /* if( read from socket ) */
Daniel Stenberg
committed
/* If we still have writing to do, we check if we have a writable
Daniel Stenberg
committed
if((k->keepon & KEEP_WRITE) && (select_res & CURL_CSELECT_OUT)) {
ssize_t i, si;
ssize_t bytes_written;
Daniel Stenberg
committed
if((k->bytecount == 0) && (k->writebytecount == 0))
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
didwhat |= KEEP_WRITE;
Daniel Stenberg
committed
/*
* We loop here to do the READ and SEND loop until we run out of
* data to send or until we get EWOULDBLOCK back
*/
do {
Daniel Stenberg
committed
/* only read more data if there's no upload data already
present in the upload buffer */
Daniel Stenberg
committed
if(0 == data->req.upload_present) {
Daniel Stenberg
committed
/* init the "upload from here" pointer */
Daniel Stenberg
committed
data->req.upload_fromhere = k->uploadbuf;
Daniel Stenberg
committed
if(!k->upload_done) {
/* HTTP pollution, this should be written nicer to become more
protocol agnostic. */
if(k->wait100_after_headers &&
Daniel Stenberg
committed
(data->state.proto.http->sending == HTTPSEND_BODY)) {
/* If this call is to send body data, we must take some action:
We have sent off the full HTTP 1.1 request, and we shall now
go into the Expect: 100 state and await such a header */
k->wait100_after_headers = FALSE; /* headers sent */
k->write_after_100_header = TRUE; /* wait for the header */
k->keepon &= ~KEEP_WRITE; /* disable writing */
k->start100 = Curl_tvnow(); /* timeout count starts now */
didwhat &= ~KEEP_WRITE; /* we didn't write anything actually */
break;
}
result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
if(result)
return result;
nread = (ssize_t)fillcount;
}
Daniel Stenberg
committed
else
nread = 0; /* we're done uploading/reading */
Daniel Stenberg
committed
if(!nread && (k->keepon & KEEP_READ_PAUSE)) {
/* this is a paused transfer */
break;
}
else if(nread<=0) {
Daniel Stenberg
committed
/* done */
k->keepon &= ~KEEP_WRITE; /* we're done writing */
writedone = TRUE;
if(conn->bits.rewindaftersend) {
result = Curl_readrewind(conn);
if(result)
return result;
}
Daniel Stenberg
committed
break;
}
Daniel Stenberg
committed
/* store number of bytes available for upload */
Daniel Stenberg
committed
data->req.upload_present = nread;
Daniel Stenberg
committed
/* convert LF to CRLF if so asked */
#ifdef CURL_DO_LINEEND_CONV
/* always convert if we're FTPing in ASCII mode */
Daniel Stenberg
committed
if((data->set.crlf) || (data->set.prefer_ascii)) {
#else
Daniel Stenberg
committed
if(data->set.crlf) {
#endif /* CURL_DO_LINEEND_CONV */
if(data->state.scratch == NULL)
data->state.scratch = malloc(2*BUFSIZE);
if(data->state.scratch == NULL) {
failf (data, "Failed to alloc scratch buffer!");
return CURLE_OUT_OF_MEMORY;
}
/*
* ASCII/EBCDIC Note: This is presumably a text (not binary)
* transfer so the data should already be in ASCII.
* That means the hex values for ASCII CR (0x0d) & LF (0x0a)
* must be used instead of the escape sequences \r & \n.
*/
Daniel Stenberg
committed
for(i = 0, si = 0; i < nread; i++, si++) {
Daniel Stenberg
committed
if(data->req.upload_fromhere[i] == 0x0a) {
Daniel Stenberg
committed
data->state.scratch[si++] = 0x0d;
data->state.scratch[si] = 0x0a;
Daniel Stenberg
committed
if(!data->set.crlf) {
/* we're here only because FTP is in ASCII mode...
bump infilesize for the LF we just added */
data->set.infilesize++;
}
Daniel Stenberg
committed
}
else
Daniel Stenberg
committed
data->state.scratch[si] = data->req.upload_fromhere[i];
}
Daniel Stenberg
committed
if(si != nread) {
/* only perform the special operation if we really did replace
anything */
nread = si;
Daniel Stenberg
committed
Daniel Stenberg
committed
/* upload from the new (replaced) buffer instead */
Daniel Stenberg
committed
data->req.upload_fromhere = data->state.scratch;
Daniel Stenberg
committed
Daniel Stenberg
committed
/* set the new amount too */
Daniel Stenberg
committed
data->req.upload_present = nread;
Daniel Stenberg
committed
}
Daniel Stenberg
committed
else {
/* We have a partial buffer left from a previous "round". Use
that instead of reading more data */
}
/* write to socket (send away data) */
Daniel Stenberg
committed
result = Curl_write(conn,
conn->writesockfd, /* socket to send to */
Daniel Stenberg
committed
data->req.upload_fromhere, /* buffer pointer */
data->req.upload_present, /* buffer size */
&bytes_written); /* actually send away */
Daniel Stenberg
committed
if(result)
return result;
if(data->set.verbose)
/* show the data before we change the pointer upload_fromhere */
Daniel Stenberg
committed
Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
(size_t)bytes_written, conn);
Daniel Stenberg
committed
if(data->req.upload_present != bytes_written) {
Daniel Stenberg
committed
/* we only wrote a part of the buffer (if anything), deal with it! */
/* store the amount of bytes left in the buffer to write */
Daniel Stenberg
committed
data->req.upload_present -= bytes_written;
Daniel Stenberg
committed
/* advance the pointer where to find the buffer when the next send
is to happen */
Daniel Stenberg
committed
data->req.upload_fromhere += bytes_written;
Daniel Stenberg
committed
writedone = TRUE; /* we are done, stop the loop */
}
else {
/* we've uploaded that buffer now */
Daniel Stenberg
committed
data->req.upload_fromhere = k->uploadbuf;
data->req.upload_present = 0; /* no more bytes left */
Daniel Stenberg
committed
if(k->upload_done) {
/* switch off writing, we're done! */
k->keepon &= ~KEEP_WRITE; /* we're done writing */
writedone = TRUE;
}
Daniel Stenberg
committed
}
Daniel Stenberg
committed
k->writebytecount += bytes_written;
Daniel Stenberg
committed
Curl_pgrsSetUploadCounter(data, k->writebytecount);
Daniel Stenberg
committed
} while(!writedone); /* loop until we're done writing! */
} while(0); /* just to break out from! */
Daniel Stenberg
committed
k->now = Curl_tvnow();
if(didwhat) {
/* Update read/write counters */
if(k->bytecountp)
*k->bytecountp = k->bytecount; /* read count */
if(k->writebytecountp)
*k->writebytecountp = k->writebytecount; /* write count */
}
else {
/* no read no write, this is a timeout? */
Daniel Stenberg
committed
if(k->write_after_100_header) {
/* This should allow some time for the header to arrive, but only a
very short time as otherwise it'll be too much wasted times too
often. */
/* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
Therefore, when a client sends this header field to an origin server
(possibly via a proxy) from which it has never seen a 100 (Continue)
status, the client SHOULD NOT wait for an indefinite period before
sending the request body.
*/
long ms = Curl_tvdiff(k->now, k->start100);
if(ms > CURL_TIMEOUT_EXPECT_100) {
/* we've waited long enough, continue anyway */
k->write_after_100_header = FALSE;
k->keepon |= KEEP_WRITE;
}
if(Curl_pgrsUpdate(conn))
result = CURLE_ABORTED_BY_CALLBACK;
else
result = Curl_speedcheck(data, k->now);
Daniel Stenberg
committed
if(result)
Daniel Stenberg
committed
if(data->set.timeout &&
Daniel Stenberg
committed
(Curl_tvdiff(k->now, k->start) >= data->set.timeout)) {
Daniel Stenberg
committed
if(k->size != -1) {
failf(data, "Operation timed out after %ld milliseconds with %"
FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
data->set.timeout, k->bytecount, k->size);
} else {
failf(data, "Operation timed out after %ld milliseconds with %"
FORMAT_OFF_T " bytes received",
data->set.timeout, k->bytecount);
}
return CURLE_OPERATION_TIMEDOUT;
}
if(!k->keepon) {
/*
* The transfer has been performed. Just make some general checks before
* returning.
*/
Daniel Stenberg
committed
if(!(data->set.opt_no_body) && (k->size != -1) &&
(k->bytecount != k->size) &&
#ifdef CURL_DO_LINEEND_CONV
/* Most FTP servers don't adjust their file SIZE response for CRLFs,
so we'll check to see if the discrepancy can be explained
by the number of CRLFs we've changed to LFs.
*/
(k->bytecount != (k->size + data->state.crlf_conversions)) &&
#endif /* CURL_DO_LINEEND_CONV */
Daniel Stenberg
committed
!data->req.newurl) {
Daniel Stenberg
committed
failf(data, "transfer closed with %" FORMAT_OFF_T
" bytes remaining to read",
k->size - k->bytecount);
return CURLE_PARTIAL_FILE;
}
Daniel Stenberg
committed
else if(!(data->set.opt_no_body) &&
k->chunk &&
Daniel Stenberg
committed
(conn->chunk.state != CHUNK_STOP)) {
Daniel Stenberg
committed
/*
* In chunked mode, return an error if the connection is closed prior to
* the empty (terminiating) chunk is read.
*
* The condition above used to check for
* conn->proto.http->chunk.datasize != 0 which is true after reading
* *any* chunk, not just the empty chunk.
*
*/
failf(data, "transfer closed with outstanding read data remaining");
return CURLE_PARTIAL_FILE;
Daniel Stenberg
committed
if(Curl_pgrsUpdate(conn))
return CURLE_ABORTED_BY_CALLBACK;
/* Now update the "done" boolean we return */
Daniel Stenberg
committed
*done = (bool)(0 == (k->keepon&(KEEP_READ|KEEP_WRITE|KEEP_READ_PAUSE|KEEP_WRITE_PAUSE)));
return CURLE_OK;
}
Daniel Stenberg
committed
* Curl_single_getsock() gets called by the multi interface code when the app
* has requested to get the sockets for the current connection. This function
* will then be called once for every connection that the multi interface
* keeps track of. This function will only be called for connections that are
* in the proper state to have this information available.
*/
int Curl_single_getsock(const struct connectdata *conn,
Daniel Stenberg
committed
curl_socket_t *sock, /* points to numsocks number
of sockets */
int numsocks)
const struct SessionHandle *data = conn->data;
Daniel Stenberg
committed
int bitmap = GETSOCK_BLANK;
unsigned sockindex = 0;
Daniel Stenberg
committed
if(numsocks < 2)
/* simple check but we might need two slots */
return GETSOCK_BLANK;
Daniel Stenberg
committed
/* don't include HOLD and PAUSE connections */
if((data->req.keepon & KEEP_READBITS) == KEEP_READ) {
Daniel Stenberg
committed
DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
bitmap |= GETSOCK_READSOCK(sockindex);
sock[sockindex] = conn->sockfd;
Daniel Stenberg
committed
/* don't include HOLD and PAUSE connections */
if((data->req.keepon & KEEP_WRITEBITS) == KEEP_WRITE) {
if((conn->sockfd != conn->writesockfd) ||
Daniel Stenberg
committed
!(data->req.keepon & KEEP_READ)) {
/* only if they are not the same socket or we didn't have a readable
one, we increase index */
Daniel Stenberg
committed
if(data->req.keepon & KEEP_READ)
sockindex++; /* increase index if we need two entries */
Daniel Stenberg
committed
DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
sock[sockindex] = conn->writesockfd;
Daniel Stenberg
committed
}
bitmap |= GETSOCK_WRITESOCK(sockindex);
Daniel Stenberg
committed
return bitmap;
}
/*
* Transfer()
*
* This function is what performs the actual transfer. It is capable of
* doing both ways simultaneously.
* The transfer must already have been setup by a call to Curl_setup_transfer().
*
* Note that headers are created in a preallocated buffer of a default size.
Daniel Stenberg
committed
* That buffer can be enlarged on demand, but it is never shrunken again.
*
* Parts of this function was once written by the friendly Mark Butler
* <butlerm@xmission.com>.
*/
static CURLcode
Transfer(struct connectdata *conn)
{
CURLcode result;
struct SessionHandle *data = conn->data;
Daniel Stenberg
committed
struct SingleRequest *k = &data->req;
bool done=FALSE;
if((conn->sockfd == CURL_SOCKET_BAD) &&
(conn->writesockfd == CURL_SOCKET_BAD))
/* nothing to read, nothing to write, we're already OK! */
return CURLE_OK;
/* we want header and/or body, if neither then don't do this! */
Daniel Stenberg
committed
if(!k->getheader && data->set.opt_no_body)
return CURLE_OK;
Daniel Stenberg
committed
while(!done) {
curl_socket_t fd_read;
curl_socket_t fd_write;
/* limit-rate logic: if speed exceeds threshold, then do not include fd in
select set. The current speed is recalculated in each Curl_readwrite()
call */
Daniel Stenberg
committed
if((k->keepon & KEEP_WRITE) &&
(!data->set.max_send_speed ||
(data->progress.ulspeed < data->set.max_send_speed) )) {
fd_write = conn->writesockfd;
k->keepon &= ~KEEP_WRITE_HOLD;
}
else {
fd_write = CURL_SOCKET_BAD;
if(k->keepon & KEEP_WRITE)
k->keepon |= KEEP_WRITE_HOLD; /* hold it */
Daniel Stenberg
committed
if((k->keepon & KEEP_READ) &&
(!data->set.max_recv_speed ||
(data->progress.dlspeed < data->set.max_recv_speed)) ) {
fd_read = conn->sockfd;
k->keepon &= ~KEEP_READ_HOLD;
}
else {
fd_read = CURL_SOCKET_BAD;
if(k->keepon & KEEP_READ)
k->keepon |= KEEP_READ_HOLD; /* hold it */
Daniel Stenberg
committed
/* pause logic. Don't check descriptors for paused connections */
if(k->keepon & KEEP_READ_PAUSE)
fd_read = CURL_SOCKET_BAD;
if(k->keepon & KEEP_WRITE_PAUSE)
fd_write = CURL_SOCKET_BAD;
/* The *_HOLD and *_PAUSE logic is necessary since even though there might
be no traffic during the select interval, we still call
Curl_readwrite() for the timeout case and if we limit transfer speed we
must make sure that this function doesn't transfer anything while in
HOLD status. */
switch (Curl_socket_ready(fd_read, fd_write, 1000)) {
Daniel Stenberg
committed
case -1: /* select() error, stop reading */
#ifdef EINTR
/* The EINTR is not serious, and it seems you might get this more
ofen when using the lib in a multi-threaded environment! */
if(SOCKERRNO == EINTR)
;
else
#endif
done = TRUE; /* no more read or write */
Daniel Stenberg
committed
case 0: /* timeout */
default: /* readable descriptors */
result = Curl_readwrite(conn, &done);
Daniel Stenberg
committed
break;
Daniel Stenberg
committed
if(result)
return result;
/* "done" signals to us if the transfer(s) are ready */
return CURLE_OK;
}
/*
* Curl_pretransfer() is called immediately before a transfer starts.
*/
CURLcode Curl_pretransfer(struct SessionHandle *data)
{
Daniel Stenberg
committed
CURLcode res;
if(!data->change.url) {
/* we can't do anything wihout URL */
Daniel Stenberg
committed
failf(data, "No URL set!");
return CURLE_URL_MALFORMAT;
Daniel Stenberg
committed
/* Init the SSL session ID cache here. We do it here since we want to do it
after the *_setopt() calls (that could change the size of the cache) but
before any transfer takes place. */
res = Curl_ssl_initsessions(data, data->set.ssl.numsessions);
if(res)
return res;
Daniel Stenberg
committed
data->set.followlocation=0; /* reset the location-follow counter */
data->state.this_is_a_follow = FALSE; /* reset this */
Daniel Stenberg
committed
data->state.errorbuf = FALSE; /* no error has occurred */
Daniel Stenberg
committed
data->state.authproblem = FALSE;
data->state.authhost.want = data->set.httpauth;
data->state.authproxy.want = data->set.proxyauth;
Daniel Stenberg
committed
/* If there is a list of cookie files to read, do it now! */
if(data->change.cookielist) {
Daniel Stenberg
committed
Curl_cookie_loadfiles(data);
Daniel Stenberg
committed
/* Allow data->set.use_port to set which port to use. This needs to be
* disabled for example when we follow Location: headers to URLs using
* different ports! */
data->state.allow_port = TRUE;
#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
/*************************************************************
* Tell signal handler to ignore SIGPIPE
*************************************************************/
if(!data->set.no_signal)
data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
Curl_initinfo(data); /* reset session-specific information "variables" */
Curl_pgrsStartNow(data);
return CURLE_OK;
}
/*
* Curl_posttransfer() is called immediately after a transfer ends
*/
CURLcode Curl_posttransfer(struct SessionHandle *data)
{
#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
/* restore the signal handler for SIGPIPE before we get back */
if(!data->set.no_signal)
signal(SIGPIPE, data->state.prev_signal);
if(!(data->progress.flags & PGRS_HIDE) &&
!data->progress.callback)
/* only output if we don't use a progress callback and we're not hidden */
fprintf(data->set.err, "\n");
return CURLE_OK;
}
/*
* strlen_url() returns the length of the given URL if the spaces within the
* URL were properly URL encoded.
*/
static size_t strlen_url(const char *url)
const char *ptr;
size_t newlen=0;
bool left=TRUE; /* left side of the ? */
for(ptr=url; *ptr; ptr++) {
switch(*ptr) {
case '?':
left=FALSE;
/* fall through */
default:
newlen++;
break;
case ' ':
if(left)
newlen+=3;
else
newlen++;
break;
}
}
return newlen;
}
/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
* the source URL accordingly.
*/
static void strcpy_url(char *output, const char *url)
{
/* we must add this with whitespace-replacing */
bool left=TRUE;
const char *iptr;
char *optr = output;
for(iptr = url; /* read from here */
*iptr; /* until zero byte */
iptr++) {
switch(*iptr) {
case '?':
left=FALSE;
/* fall through */
default:
*optr++=*iptr;
break;
case ' ':
if(left) {
*optr++='%'; /* add a '%' */
*optr++='2'; /* add a '2' */
*optr++='0'; /* add a '0' */
}
else
*optr++='+'; /* add a '+' here */
break;
}
}
*optr=0; /* zero terminate output buffer */
}
/*
* Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
* as given by the remote server and set up the new URL to request.
*/
CURLcode Curl_follow(struct SessionHandle *data,
Daniel Stenberg
committed
char *newurl, /* this 'newurl' is the Location: string,
and it must be malloc()ed before passed
here */
Daniel Stenberg
committed
bool retry) /* set TRUE if this is a request retry as
opposed to a real redirect following */
{
/* Location: redirect */
char prot[16]; /* URL protocol string storage */
char letter; /* used for a silly sscanf */
char *newest;
Daniel Stenberg
committed
if(!retry) {
Daniel Stenberg
committed
if((data->set.maxredirs != -1) &&
Daniel Stenberg
committed
(data->set.followlocation >= data->set.maxredirs)) {
failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
return CURLE_TOO_MANY_REDIRECTS;
}
Daniel Stenberg
committed
/* mark the next request as a followed location: */
data->state.this_is_a_follow = TRUE;
Daniel Stenberg
committed
data->set.followlocation++; /* count location-followers */
}
if(data->set.http_auto_referer) {
/* We are asked to automatically set the previous URL as the
referer when we get the next URL. We pick the ->url field,
which may or may not be 100% correct */
if(data->change.referer_alloc)
/* If we already have an allocated referer, free this first */
free(data->change.referer);
data->change.referer = strdup(data->change.url);
data->change.referer_alloc = TRUE; /* yes, free this later */
}
if(2 != sscanf(newurl, "%15[^?&/:]://%c", prot, &letter)) {