cache within the multi handle.
Changelog
-
+Daniel (6 September 2006)
+- Ravi Pratap and I have implemented HTTP Pipelining support. Enable it for a
+ multi handle using CURLMOPT_PIPELINING and all HTTP connections done on that
+ handle will be attempted to get pipelined instead of done in parallell as
+ they are performed otherwise.
+
+ As a side-effect from this work, connections are now shared between all easy
+ handles within a multi handle, so if you use N easy handles for transfers,
+ each of them can pick up and re-use a connection that was previously used by
+ any of the handles, be it the same or one of the others.
+
+ This separation of the tight relationship between connections and easy
+ handles is most noticable when you close easy handles that have been used in
+ a multi handle and check amount of used memory or watch the debug output, as
+ there are times when libcurl will keep the easy handle around for a while
+ longer to be able to close it properly. Like for sending QUIT to close down
+ an FTP connection.
+
+ This is a major change.
+
Daniel (4 September 2006)
- Dmitry Rechkin (http://curl.haxx.se/bug/view.cgi?id=1551412) provided a
patch that while not fixing things very nicely, it does make the SOCKS5
-Curl and libcurl 7.15.6
+Curl and libcurl 7.16.0
Public curl release number: 96
Releases counted from the very beginning: 123
This release includes the following changes:
+ o CURLMOPT_PIPELINING added for enabling pipelined transfers
o Added support for other MS-DOS compilers (besides djgpp)
o CURLOPT_SOCKOPTFUNCTION and CURLOPT_SOCKOPTDATA were added
o (FTP) libcurl avoids sending TYPE if the desired type was already set
Domenico Andreoli, Armel Asselin, Gisle Vanem, Yang Tse, Andrew Biggs,
Peter Sylvester, David McCreedy, Dmitriy Sergeyev, Dmitry Rechkin,
- Jari Sundell
+ Jari Sundell, Ravi Pratap
Thanks! (and sorry if I forgot to mention someone)
Pass a pointer to whatever you want passed to the curl_socket_callback's forth
argument, the userp pointer. This is not used by libcurl but only passed-thru
as-is. Set the callback pointer with \fICURLMOPT_SOCKETFUNCTION\fP.
+.IP CURLMOPT_PIPELINING
+Pass a long set to 1 to enable or 0 to disable. Enabling pipelining on a multi
+handle will make it attempt to perform HTTP Pipelining as far as possible for
+transfers using this handle. This means that if you add a second request that
+can use an already existing connection, the second request will be \&"piped"
+on the same connection rather than being executed in parallell. (Added in
+7.16.0)
.SH RETURNS
The standard CURLMcode for multi interface error codes. Note that it returns a
CURLM_UNKNOWN_OPTION if you try setting an option that this version of libcurl
/* This is the version number of the libcurl package from which this header
file origins: */
-#define LIBCURL_VERSION "7.15.6-CVS"
+#define LIBCURL_VERSION "7.16.0-CVS"
/* The numeric version number is also available "in parts" by using these
defines: */
#define LIBCURL_VERSION_MAJOR 7
-#define LIBCURL_VERSION_MINOR 15
-#define LIBCURL_VERSION_PATCH 6
+#define LIBCURL_VERSION_MINOR 16
+#define LIBCURL_VERSION_PATCH 0
/* This is the numeric version of the libcurl version number, meant for easier
parsing and comparions by programs. The LIBCURL_VERSION_NUM define will
and it is always a greater number in a more recent release. It makes
comparisons with greater than and less than work.
*/
-#define LIBCURL_VERSION_NUM 0x070f06
+#define LIBCURL_VERSION_NUM 0x071000
#endif /* __CURL_CURLVER_H */
/* This is the argument passed to the socket callback */
CINIT(SOCKETDATA, OBJECTPOINT, 2),
+ /* set to 1 to enable pipelining for this multi handle */
+ CINIT(PIPELINING, LONG, 3),
+
CURLMOPT_LASTENTRY /* the last unused */
} CURLMoption;
struct SessionHandle *data=conn->data;
curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
- char *path = conn->path;
- curl_off_t *bytecount = &conn->bytecount;
+ char *path = data->reqdata.path;
+ curl_off_t *bytecount = &data->reqdata.keep.bytecount;
*done = TRUE; /* unconditionally */
if(result)
failf(data, "Failed sending DICT request");
else
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
- -1, NULL); /* no upload */
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
+ -1, NULL); /* no upload */
if(result)
return result;
}
if(result)
failf(data, "Failed sending DICT request");
else
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
- -1, NULL); /* no upload */
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
+ -1, NULL); /* no upload */
if(result)
return result;
if(result)
failf(data, "Failed sending DICT request");
else
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
- -1, NULL);
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, FALSE, bytecount,
+ -1, NULL);
if(result)
return result;
}
}
+ if(!data->state.connc) {
+ /* oops, no connection cache, make one up */
+ data->state.connc = Curl_mk_connc(CONNCACHE_PRIVATE);
+ if(!data->state.connc)
+ return CURLE_OUT_OF_MEMORY;
+ }
+
return Curl_perform(data);
}
#endif
data->multi = multi;
}
+void Curl_easy_initHandleData(struct SessionHandle *data)
+{
+ memset(&data->reqdata, 0, sizeof(struct HandleData));
+
+ data->reqdata.maxdownload = -1;
+}
+
/*
* curl_easy_getinfo() is an external interface that allows an app to retrieve
* information from a performed transfer and similar.
/* copy all userdefined values */
outcurl->set = data->set;
- outcurl->state.numconnects = data->state.numconnects;
- outcurl->state.connects = (struct connectdata **)
- malloc(sizeof(struct connectdata *) * outcurl->state.numconnects);
- if(!outcurl->state.connects) {
- break;
- }
+ if(data->state.used_interface == Curl_if_multi)
+ outcurl->state.connc = data->state.connc;
+ else
+ outcurl->state.connc = Curl_mk_connc(CONNCACHE_PRIVATE);
- memset(outcurl->state.connects, 0,
- sizeof(struct connectdata *)*outcurl->state.numconnects);
+ if(!outcurl->state.connc)
+ break;
outcurl->state.lastconnect = -1;
#endif /* CURL_DISABLE_HTTP */
/* duplicate all values in 'change' */
+
if(data->change.url) {
outcurl->change.url = strdup(data->change.url);
if(!outcurl->change.url)
break;
#endif
+ Curl_easy_initHandleData(outcurl);
+
fail = FALSE; /* we reach this point and thus we are OK */
} while(0);
if(fail) {
if(outcurl) {
- if(outcurl->state.connects)
- free(outcurl->state.connects);
+ if(outcurl->state.connc->type == CONNCACHE_PRIVATE)
+ Curl_rm_connc(outcurl->state.connc);
if(outcurl->state.headerbuff)
free(outcurl->state.headerbuff);
if(outcurl->change.proxy)
/* zero out Progress data: */
memset(&data->progress, 0, sizeof(struct Progress));
+ /* init Handle data */
+ Curl_easy_initHandleData(data);
+
/* The remainder of these calls have been taken from Curl_open() */
data->set.out = stdout; /* default output to stdout */
*/
void Curl_easy_addmulti(struct SessionHandle *data, void *multi);
+void Curl_easy_initHandleData(struct SessionHandle *data);
+
CURLcode Curl_convert_to_network(struct SessionHandle *data,
char *buffer, size_t length);
CURLcode Curl_convert_from_network(struct SessionHandle *data,
*/
CURLcode Curl_file_connect(struct connectdata *conn)
{
- char *real_path = curl_easy_unescape(conn->data, conn->path, 0, NULL);
+ char *real_path = curl_easy_unescape(conn->data, conn->data->reqdata.path, 0, NULL);
struct FILEPROTO *file;
int fd;
#if defined(WIN32) || defined(MSDOS) || defined(__EMX__)
return CURLE_OUT_OF_MEMORY;
}
- conn->proto.file = file;
+ if (conn->data->reqdata.proto.file) {
+ free(conn->data->reqdata.proto.file);
+ }
+
+ conn->data->reqdata.proto.file = file;
#if defined(WIN32) || defined(MSDOS) || defined(__EMX__)
/* If the first character is a slash, and there's
file->fd = fd;
if(!conn->data->set.upload && (fd == -1)) {
- failf(conn->data, "Couldn't open file %s", conn->path);
+ failf(conn->data, "Couldn't open file %s", conn->data->reqdata.path);
Curl_file_done(conn, CURLE_FILE_COULDNT_READ_FILE);
return CURLE_FILE_COULDNT_READ_FILE;
}
CURLcode Curl_file_done(struct connectdata *conn,
CURLcode status)
{
- struct FILEPROTO *file = conn->proto.file;
+ struct FILEPROTO *file = conn->data->reqdata.proto.file;
(void)status; /* not used */
Curl_safefree(file->freepath);
static CURLcode file_upload(struct connectdata *conn)
{
- struct FILEPROTO *file = conn->proto.file;
+ struct FILEPROTO *file = conn->data->reqdata.proto.file;
char *dir = strchr(file->path, DIRSEP);
FILE *fp;
CURLcode res=CURLE_OK;
*/
conn->fread = data->set.fread;
conn->fread_in = data->set.in;
- conn->upload_fromhere = buf;
+ conn->data->reqdata.upload_fromhere = buf;
if(!dir)
return CURLE_FILE_COULDNT_READ_FILE; /* fix: better error code */
return file_upload(conn);
/* get the fd from the connection phase */
- fd = conn->proto.file->fd;
+ fd = conn->data->reqdata.proto.file->fd;
/* VMS: This only works reliable for STREAMLF files */
if( -1 != fstat(fd, &statbuf)) {
return result;
}
- if (conn->resume_from <= expected_size)
- expected_size -= conn->resume_from;
+ if (data->reqdata.resume_from <= expected_size)
+ expected_size -= data->reqdata.resume_from;
else {
failf(data, "failed to resume file:// transfer");
return CURLE_BAD_DOWNLOAD_RESUME;
if(fstated)
Curl_pgrsSetDownloadSize(data, expected_size);
- if(conn->resume_from)
- lseek(fd, conn->resume_from, SEEK_SET);
+ if(data->reqdata.resume_from)
+ lseek(fd, data->reqdata.resume_from, SEEK_SET);
Curl_pgrsTime(data, TIMER_STARTTRANSFER);
static CURLcode ftp_state_quote(struct connectdata *conn,
bool init, ftpstate instate);
static CURLcode ftp_nb_type(struct connectdata *conn,
- bool ascii, ftpstate state);
+ bool ascii, ftpstate state);
static int ftp_need_type(struct connectdata *conn,
- bool ascii);
+ bool ascii);
/* easy-to-use macro: */
#define FTPSENDF(x,y,z) if ((result = Curl_ftpsendf(x,y,z)) != CURLE_OK) \
#define NBFTPSENDF(x,y,z) if ((result = Curl_nbftpsendf(x,y,z)) != CURLE_OK) \
return result
-static void freedirs(struct FTP *ftp)
+static void freedirs(struct connectdata *conn)
{
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
+
int i;
- if(ftp->dirs) {
- for (i=0; i < ftp->dirdepth; i++){
- if(ftp->dirs[i]) {
- free(ftp->dirs[i]);
- ftp->dirs[i]=NULL;
+ if(ftpc->dirs) {
+ for (i=0; i < ftpc->dirdepth; i++){
+ if(ftpc->dirs[i]) {
+ free(ftpc->dirs[i]);
+ ftpc->dirs[i]=NULL;
}
}
- free(ftp->dirs);
- ftp->dirs = NULL;
+ free(ftpc->dirs);
+ ftpc->dirs = NULL;
}
if(ftp->file) {
free(ftp->file);
/* initialize stuff to prepare for reading a fresh new response */
static void ftp_respinit(struct connectdata *conn)
{
- struct FTP *ftp = conn->proto.ftp;
- ftp->nread_resp = 0;
- ftp->linestart_resp = conn->data->state.buffer;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
+ ftpc->nread_resp = 0;
+ ftpc->linestart_resp = conn->data->state.buffer;
}
/* macro to check for the last line in an FTP server response */
ssize_t gotbytes;
char *ptr;
struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
char *buf = data->state.buffer;
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
int code = 0;
if (ftpcode)
*ftpcode = 0; /* 0 for errors or not done */
- ptr=buf + ftp->nread_resp;
+ ptr=buf + ftpc->nread_resp;
- perline= (int)(ptr-ftp->linestart_resp); /* number of bytes in the current
+ perline= (int)(ptr-ftpc->linestart_resp); /* number of bytes in the current
line, so far */
keepon=TRUE;
- while((ftp->nread_resp<BUFSIZE) && (keepon && !result)) {
+ while((ftpc->nread_resp<BUFSIZE) && (keepon && !result)) {
- if(ftp->cache) {
+ if(ftpc->cache) {
/* we had data in the "cache", copy that instead of doing an actual
* read
*
* int to begin with, even though its datatype may be larger
* than an int.
*/
- memcpy(ptr, ftp->cache, (int)ftp->cache_size);
- gotbytes = (int)ftp->cache_size;
- free(ftp->cache); /* free the cache */
- ftp->cache = NULL; /* clear the pointer */
- ftp->cache_size = 0; /* zero the size just in case */
+ memcpy(ptr, ftpc->cache, (int)ftpc->cache_size);
+ gotbytes = (int)ftpc->cache_size;
+ free(ftpc->cache); /* free the cache */
+ ftpc->cache = NULL; /* clear the pointer */
+ ftpc->cache_size = 0; /* zero the size just in case */
}
else {
- int res = Curl_read(conn, sockfd, ptr, BUFSIZE-ftp->nread_resp,
+ int res = Curl_read(conn, sockfd, ptr, BUFSIZE-ftpc->nread_resp,
&gotbytes);
if(res < 0)
/* EWOULDBLOCK */
* line */
int i;
- conn->headerbytecount += gotbytes;
+ k->headerbytecount += gotbytes;
- ftp->nread_resp += gotbytes;
+ ftpc->nread_resp += gotbytes;
for(i = 0; i < gotbytes; ptr++, i++) {
perline++;
if(*ptr=='\n') {
/* output debug output if that is requested */
if(data->set.verbose)
Curl_debug(data, CURLINFO_HEADER_IN,
- ftp->linestart_resp, perline, conn);
+ ftpc->linestart_resp, perline, conn);
/*
* We pass all response-lines to the callback function registered
* headers.
*/
result = Curl_client_write(conn, CLIENTWRITE_HEADER,
- ftp->linestart_resp, perline);
+ ftpc->linestart_resp, perline);
if(result)
return result;
- if(perline>3 && lastline(ftp->linestart_resp)) {
+ if(perline>3 && lastline(ftpc->linestart_resp)) {
/* This is the end of the last line, copy the last line to the
start of the buffer and zero terminate, for old times sake (and
krb4)! */
char *meow;
int n;
- for(meow=ftp->linestart_resp, n=0; meow<ptr; meow++, n++)
+ for(meow=ftpc->linestart_resp, n=0; meow<ptr; meow++, n++)
buf[n] = *meow;
*meow=0; /* zero terminate */
keepon=FALSE;
- ftp->linestart_resp = ptr+1; /* advance pointer */
+ ftpc->linestart_resp = ptr+1; /* advance pointer */
i++; /* skip this before getting out */
- *size = ftp->nread_resp; /* size of the response */
- ftp->nread_resp = 0; /* restart */
+ *size = ftpc->nread_resp; /* size of the response */
+ ftpc->nread_resp = 0; /* restart */
break;
}
perline=0; /* line starts over here */
- ftp->linestart_resp = ptr+1;
+ ftpc->linestart_resp = ptr+1;
}
}
if(!keepon && (i != gotbytes)) {
full chunk of data we have read from the server. We therefore need
to store the rest of the data to be checked on the next invoke as
it may actually contain another end of response already! */
- ftp->cache_size = gotbytes - i;
- ftp->cache = (char *)malloc((int)ftp->cache_size);
- if(ftp->cache)
- memcpy(ftp->cache, ftp->linestart_resp, (int)ftp->cache_size);
+ ftpc->cache_size = gotbytes - i;
+ ftpc->cache = (char *)malloc((int)ftpc->cache_size);
+ if(ftpc->cache)
+ memcpy(ftpc->cache, ftpc->linestart_resp, (int)ftpc->cache_size);
else
return CURLE_OUT_OF_MEMORY; /**BANG**/
}
long timeout; /* timeout in seconds */
int interval_ms;
struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
char *line_start;
int code=0; /* default ftp "error code" to return */
char *buf = data->state.buffer;
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
struct timeval now = Curl_tvnow();
if (ftpcode)
else
/* Even without a requested timeout, we only wait response_time
seconds for the full response to arrive before we bail out */
- timeout = ftp->response_time -
+ timeout = ftpc->response_time -
Curl_tvdiff(Curl_tvnow(), now)/1000; /* spent time */
if(timeout <=0 ) {
return CURLE_OPERATION_TIMEDOUT; /* already too little time */
}
- if(!ftp->cache) {
+ if(!ftpc->cache) {
interval_ms = 1 * 1000; /* use 1 second timeout intervals */
switch (Curl_select(sockfd, CURL_SOCKET_BAD, interval_ms)) {
* to read, but when we use Curl_read() it may do so. Do confirm
* that this is still ok and then remove this comment!
*/
- if(ftp->cache) {
+ if(ftpc->cache) {
/* we had data in the "cache", copy that instead of doing an actual
* read
*
* int to begin with, even though its datatype may be larger
* than an int.
*/
- memcpy(ptr, ftp->cache, (int)ftp->cache_size);
- gotbytes = (int)ftp->cache_size;
- free(ftp->cache); /* free the cache */
- ftp->cache = NULL; /* clear the pointer */
- ftp->cache_size = 0; /* zero the size just in case */
+ memcpy(ptr, ftpc->cache, (int)ftpc->cache_size);
+ gotbytes = (int)ftpc->cache_size;
+ free(ftpc->cache); /* free the cache */
+ ftpc->cache = NULL; /* clear the pointer */
+ ftpc->cache_size = 0; /* zero the size just in case */
}
else {
int res = Curl_read(conn, sockfd, ptr, BUFSIZE-*nreadp, &gotbytes);
* line */
int i;
- conn->headerbytecount += gotbytes;
+ k->headerbytecount += gotbytes;
*nreadp += gotbytes;
for(i = 0; i < gotbytes; ptr++, i++) {
invoke as it may actually contain another end of response
already! Cleverly figured out by Eric Lavigne in December
2001. */
- ftp->cache_size = gotbytes - i;
- ftp->cache = (char *)malloc((int)ftp->cache_size);
- if(ftp->cache)
- memcpy(ftp->cache, line_start, (int)ftp->cache_size);
+ ftpc->cache_size = gotbytes - i;
+ ftpc->cache = (char *)malloc((int)ftpc->cache_size);
+ if(ftpc->cache)
+ memcpy(ftpc->cache, line_start, (int)ftpc->cache_size);
else
return CURLE_OUT_OF_MEMORY; /**BANG**/
}
"QUIT"
};
#endif
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
#ifdef CURLDEBUG
- if(ftp->state != state)
+ if(ftpc->state != state)
infof(conn->data, "FTP %p state change from %s to %s\n",
- ftp, names[ftp->state], names[state]);
+ ftpc, names[ftpc->state], names[state]);
#endif
- ftp->state = state;
+ ftpc->state = state;
}
static CURLcode ftp_state_user(struct connectdata *conn)
{
CURLcode result;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
/* send USER */
NBFTPSENDF(conn, "USER %s", ftp->user?ftp->user:"");
curl_socket_t *socks,
int numsocks)
{
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
if(!numsocks)
return GETSOCK_BLANK;
socks[0] = conn->sock[FIRSTSOCKET];
- if(ftp->sendleft) {
+ if(ftpc->sendleft) {
/* write mode */
return GETSOCK_WRITESOCK(0);
}
static CURLcode ftp_state_cwd(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
- if(ftp->cwddone)
+ if(ftpc->cwddone)
/* already done and fine */
result = ftp_state_post_cwd(conn);
else {
- ftp->count2 = 0;
- if (conn->bits.reuse && ftp->entrypath) {
+ ftpc->count2 = 0;
+ if (conn->bits.reuse && ftpc->entrypath) {
/* This is a re-used connection. Since we change directory to where the
transfer is taking place, we must first get back to the original dir
where we ended up after login: */
- ftp->count1 = 0; /* we count this as the first path, then we add one
+ ftpc->count1 = 0; /* we count this as the first path, then we add one
for all upcoming ones in the ftp->dirs[] array */
- NBFTPSENDF(conn, "CWD %s", ftp->entrypath);
+ NBFTPSENDF(conn, "CWD %s", ftpc->entrypath);
state(conn, FTP_CWD);
}
else {
- if(ftp->dirdepth) {
- ftp->count1 = 1;
+ if(ftpc->dirdepth) {
+ ftpc->count1 = 1;
/* issue the first CWD, the rest is sent when the CWD responses are
received... */
- NBFTPSENDF(conn, "CWD %s", ftp->dirs[ftp->count1 -1]);
+ NBFTPSENDF(conn, "CWD %s", ftpc->dirs[ftpc->count1 -1]);
state(conn, FTP_CWD);
}
else {
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
struct SessionHandle *data=conn->data;
curl_socket_t portsock= CURL_SOCKET_BAD;
char myhost[256] = "";
}
/* store which command was sent */
- ftp->count1 = fcmd;
+ ftpc->count1 = fcmd;
/* we set the secondary socket variable to this for now, it is only so that
the cleanup function will close it in case we fail before the true
static CURLcode ftp_state_use_pasv(struct connectdata *conn)
{
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
CURLcode result = CURLE_OK;
/*
Here's the excecutive summary on what to do:
if(result)
return result;
- ftp->count1 = modeoff;
+ ftpc->count1 = modeoff;
state(conn, FTP_PASV);
infof(conn->data, "Connect data stream passively\n");
static CURLcode ftp_state_post_rest(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
struct SessionHandle *data = conn->data;
if(ftp->no_transfer || conn->bits.no_body) {
static CURLcode ftp_state_post_size(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
if(ftp->no_transfer) {
/* if a "head"-like request is being made */
static CURLcode ftp_state_post_type(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
if(ftp->no_transfer) {
/* if a "head"-like request is being made */
static CURLcode ftp_state_post_mdtm(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
struct SessionHandle *data = conn->data;
/* If we have selected NOBODY and HEADER, it means that we only want file
static CURLcode ftp_state_post_cwd(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
struct SessionHandle *data = conn->data;
/* Requested time of file or time-depended transfer? */
bool sizechecked)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
struct SessionHandle *data = conn->data;
curl_off_t passed=0;
- if((conn->resume_from && !sizechecked) ||
- ((conn->resume_from > 0) && sizechecked)) {
+ if((data->reqdata.resume_from && !sizechecked) ||
+ ((data->reqdata.resume_from > 0) && sizechecked)) {
/* we're about to continue the uploading of a file */
/* 1. get already existing file's size. We use the SIZE command for this
which may not exist in the server! The SIZE command is not in
/* 4. lower the infilesize counter */
/* => transfer as usual */
- if(conn->resume_from < 0 ) {
+ if(data->reqdata.resume_from < 0 ) {
/* Got no given size to start from, figure it out */
NBFTPSENDF(conn, "SIZE %s", ftp->file);
state(conn, FTP_STOR_SIZE);
/* TODO: allow the ioctlfunction to provide a fast forward function that
can be used here and use this method only as a fallback! */
do {
- curl_off_t readthisamountnow = (conn->resume_from - passed);
+ curl_off_t readthisamountnow = (data->reqdata.resume_from - passed);
curl_off_t actuallyread;
if(readthisamountnow > BUFSIZE)
" bytes from the input", passed);
return CURLE_FTP_COULDNT_USE_REST;
}
- } while(passed != conn->resume_from);
+ } while(passed != data->reqdata.resume_from);
/* now, decrease the size of the read */
if(data->set.infilesize>0) {
- data->set.infilesize -= conn->resume_from;
+ data->set.infilesize -= data->reqdata.resume_from;
if(data->set.infilesize <= 0) {
infof(data, "File already completely uploaded\n");
/* no data to transfer */
- result=Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ result=Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
/* Set no_transfer so that we won't get any error in
* Curl_ftp_done() because we didn't transfer anything! */
ftpstate instate)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
struct SessionHandle *data = conn->data;
+ struct FTP *ftp = data->reqdata.proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
bool quote=FALSE;
struct curl_slist *item;
}
if(init)
- ftp->count1 = 0;
+ ftpc->count1 = 0;
else
- ftp->count1++;
+ ftpc->count1++;
if(item) {
int i = 0;
/* Skip count1 items in the linked list */
- while((i< ftp->count1) && item) {
+ while((i< ftpc->count1) && item) {
item = item->next;
i++;
}
static CURLcode ftp_state_pasv_resp(struct connectdata *conn,
int ftpcode)
{
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
CURLcode result;
struct SessionHandle *data=conn->data;
Curl_addrinfo *conninfo;
char newhost[NEWHOST_BUFSIZE];
char *str=&data->state.buffer[4]; /* start on the first letter */
- if((ftp->count1 == 0) &&
+ if((ftpc->count1 == 0) &&
(ftpcode == 229)) {
/* positive EPSV response */
char *ptr = strchr(str, '(');
return CURLE_FTP_WEIRD_PASV_REPLY;
}
}
- else if((ftp->count1 == 1) &&
+ else if((ftpc->count1 == 1) &&
(ftpcode == 227)) {
/* positive PASV response */
int ip[4];
"%d.%d.%d.%d", ip[0], ip[1], ip[2], ip[3]);
newport = (port[0]<<8) + port[1];
}
- else if(ftp->count1 == 0) {
+ else if(ftpc->count1 == 0) {
/* EPSV failed, move on to PASV */
/* disable it for next transfer */
infof(data, "disabling EPSV usage\n");
NBFTPSENDF(conn, "PASV", NULL);
- ftp->count1++;
+ ftpc->count1++;
/* remain in the FTP_PASV state */
return result;
}
Curl_resolv_unlock(data, addr); /* we're done using this address */
- if (result && ftp->count1 == 0 && ftpcode == 229) {
+ if (result && ftpc->count1 == 0 && ftpcode == 229) {
infof(data, "got positive EPSV response, but can't connect. "
"Disabling EPSV\n");
/* disable it for next transfer */
conn->bits.ftp_use_epsv = FALSE;
data->state.errorbuf = FALSE; /* allow error message to get rewritten */
NBFTPSENDF(conn, "PASV", NULL);
- ftp->count1++;
+ ftpc->count1++;
/* remain in the FTP_PASV state */
return result;
}
* FTP pointer
*/
struct HTTP http_proxy;
- struct FTP *ftp_save = conn->proto.ftp;
+ struct FTP *ftp_save = data->reqdata.proto.ftp;
memset(&http_proxy, 0, sizeof(http_proxy));
- conn->proto.http = &http_proxy;
+ data->reqdata.proto.http = &http_proxy;
result = Curl_proxyCONNECT(conn, SECONDARYSOCKET, newhost, newport);
- conn->proto.ftp = ftp_save;
+ data->reqdata.proto.ftp = ftp_save;
if(CURLE_OK != result)
return result;
static CURLcode ftp_state_port_resp(struct connectdata *conn,
int ftpcode)
{
- struct FTP *ftp = conn->proto.ftp;
struct SessionHandle *data = conn->data;
- ftpport fcmd = (ftpport)ftp->count1;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
+ ftpport fcmd = (ftpport)ftpc->count1;
CURLcode result = CURLE_OK;
if(ftpcode != 200) {
int ftpcode)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
struct SessionHandle *data=conn->data;
+ struct FTP *ftp = data->reqdata.proto.ftp;
switch(ftpcode) {
case 213:
{
CURLcode result = CURLE_OK;
struct SessionHandle *data=conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = data->reqdata.proto.ftp;
if (data->set.max_filesize && (filesize > data->set.max_filesize)) {
failf(data, "Maximum file size exceeded");
}
ftp->downloadsize = filesize;
- if(conn->resume_from) {
+ if(data->reqdata.resume_from) {
/* We always (attempt to) get the size of downloads, so it is done before
this even when not doing resumes. */
if(filesize == -1) {
else {
/* We got a file size report, so we check that there actually is a
part of the file left to get, or else we go home. */
- if(conn->resume_from< 0) {
+ if(data->reqdata.resume_from< 0) {
/* We're supposed to download the last abs(from) bytes */
- if(filesize < -conn->resume_from) {
+ if(filesize < -data->reqdata.resume_from) {
failf(data, "Offset (%" FORMAT_OFF_T
") was beyond file size (%" FORMAT_OFF_T ")",
- conn->resume_from, filesize);
+ data->reqdata.resume_from, filesize);
return CURLE_BAD_DOWNLOAD_RESUME;
}
/* convert to size to download */
- ftp->downloadsize = -conn->resume_from;
+ ftp->downloadsize = -data->reqdata.resume_from;
/* download from where? */
- conn->resume_from = filesize - ftp->downloadsize;
+ data->reqdata.resume_from = filesize - ftp->downloadsize;
}
else {
- if(filesize < conn->resume_from) {
+ if(filesize < data->reqdata.resume_from) {
failf(data, "Offset (%" FORMAT_OFF_T
") was beyond file size (%" FORMAT_OFF_T ")",
- conn->resume_from, filesize);
+ data->reqdata.resume_from, filesize);
return CURLE_BAD_DOWNLOAD_RESUME;
}
/* Now store the number of bytes we are expected to download */
- ftp->downloadsize = filesize-conn->resume_from;
+ ftp->downloadsize = filesize-data->reqdata.resume_from;
}
}
if(ftp->downloadsize == 0) {
/* no data to transfer */
- result=Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ result = Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
infof(data, "File already completely downloaded\n");
/* Set no_transfer so that we won't get any error in Curl_ftp_done()
/* Set resume file transfer offset */
infof(data, "Instructs server to resume from offset %" FORMAT_OFF_T
- "\n", conn->resume_from);
+ "\n", data->reqdata.resume_from);
- NBFTPSENDF(conn, "REST %" FORMAT_OFF_T, conn->resume_from);
+ NBFTPSENDF(conn, "REST %" FORMAT_OFF_T, data->reqdata.resume_from);
state(conn, FTP_RETR_REST);
else if(instate == FTP_RETR_SIZE)
result = ftp_state_post_retr_size(conn, filesize);
else if(instate == FTP_STOR_SIZE) {
- conn->resume_from = filesize;
+ data->reqdata.resume_from = filesize;
result = ftp_state_ul_setup(conn, TRUE);
}
ftpstate instate)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
switch(instate) {
case FTP_REST:
{
CURLcode result = CURLE_OK;
struct SessionHandle *data = conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = data->reqdata.proto.ftp;
if(ftpcode>=400) {
failf(data, "Failed FTP upload: %0d", ftpcode);
Curl_pgrsSetUploadSize(data, data->set.infilesize);
- result = Curl_Transfer(conn, -1, -1, FALSE, NULL, /* no download */
- SECONDARYSOCKET, ftp->bytecountp);
+ result = Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */
+ SECONDARYSOCKET, ftp->bytecountp);
state(conn, FTP_STOP);
return result;
{
CURLcode result = CURLE_OK;
struct SessionHandle *data = conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = data->reqdata.proto.ftp;
char *buf = data->state.buffer;
if((ftpcode == 150) || (ftpcode == 125)) {
return result;
}
- if(size > conn->maxdownload && conn->maxdownload > 0)
- size = conn->size = conn->maxdownload;
+ if(size > data->reqdata.maxdownload && data->reqdata.maxdownload > 0)
+ size = data->reqdata.size = data->reqdata.maxdownload;
+
+ infof(data, "Maxdownload = %" FORMAT_OFF_T "\n", data->reqdata.maxdownload);
if(instate != FTP_LIST)
infof(data, "Getting file with size: %" FORMAT_OFF_T "\n", size);
/* FTP download: */
- result=Curl_Transfer(conn, SECONDARYSOCKET, size, FALSE,
- ftp->bytecountp,
- -1, NULL); /* no upload here */
+ result=Curl_setup_transfer(conn, SECONDARYSOCKET, size, FALSE,
+ ftp->bytecountp,
+ -1, NULL); /* no upload here */
if(result)
return result;
{
CURLcode result = CURLE_OK;
struct SessionHandle *data = conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = data->reqdata.proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
(void)instate; /* no use for this yet */
- if((ftpcode == 331) && (ftp->state == FTP_USER)) {
+ if((ftpcode == 331) && (ftpc->state == FTP_USER)) {
/* 331 Password required for ...
(the server requires to send the user's password too) */
NBFTPSENDF(conn, "PASS %s", ftp->passwd?ftp->passwd:"");
curl_socket_t sock = conn->sock[FIRSTSOCKET];
struct SessionHandle *data=conn->data;
int ftpcode;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
static const char * const ftpauth[] = {
"SSL", "TLS"
};
size_t nread = 0;
- if(ftp->sendleft) {
+ if(ftpc->sendleft) {
/* we have a piece of a command still left to send */
ssize_t written;
- result = Curl_write(conn, sock, ftp->sendthis + ftp->sendsize -
- ftp->sendleft, ftp->sendleft, &written);
+ result = Curl_write(conn, sock, ftpc->sendthis + ftpc->sendsize -
+ ftpc->sendleft, ftpc->sendleft, &written);
if(result)
return result;
- if(written != (ssize_t)ftp->sendleft) {
+ if(written != (ssize_t)ftpc->sendleft) {
/* only a fraction was sent */
- ftp->sendleft -= written;
+ ftpc->sendleft -= written;
}
else {
- free(ftp->sendthis);
- ftp->sendthis=NULL;
- ftp->sendleft = ftp->sendsize = 0;
- ftp->response = Curl_tvnow();
+ free(ftpc->sendthis);
+ ftpc->sendthis=NULL;
+ ftpc->sendleft = ftpc->sendsize = 0;
+ ftpc->response = Curl_tvnow();
}
return CURLE_OK;
}
if(ftpcode) {
/* we have now received a full FTP server response */
- switch(ftp->state) {
+ switch(ftpc->state) {
case FTP_WAIT220:
if(ftpcode != 220) {
failf(data, "This doesn't seem like a nice ftp-server response");
/* We don't have a SSL/TLS connection yet, but FTPS is
requested. Try a FTPS connection now */
- ftp->count3=0;
+ ftpc->count3=0;
switch(data->set.ftpsslauth) {
case CURLFTPAUTH_DEFAULT:
case CURLFTPAUTH_SSL:
- ftp->count2 = 1; /* add one to get next */
- ftp->count1 = 0;
+ ftpc->count2 = 1; /* add one to get next */
+ ftpc->count1 = 0;
break;
case CURLFTPAUTH_TLS:
- ftp->count2 = -1; /* subtract one to get next */
- ftp->count1 = 1;
+ ftpc->count2 = -1; /* subtract one to get next */
+ ftpc->count1 = 1;
break;
default:
failf(data, "unsupported parameter to CURLOPT_FTPSSLAUTH: %d\n",
data->set.ftpsslauth);
return CURLE_FAILED_INIT; /* we don't know what to do */
}
- NBFTPSENDF(conn, "AUTH %s", ftpauth[ftp->count1]);
+ NBFTPSENDF(conn, "AUTH %s", ftpauth[ftpc->count1]);
state(conn, FTP_AUTH);
}
else {
result = ftp_state_user(conn);
}
}
- else if(ftp->count3 < 1) {
- ftp->count3++;
- ftp->count1 += ftp->count2; /* get next attempt */
- result = Curl_nbftpsendf(conn, "AUTH %s", ftpauth[ftp->count1]);
+ else if(ftpc->count3 < 1) {
+ ftpc->count3++;
+ ftpc->count1 += ftpc->count2; /* get next attempt */
+ result = Curl_nbftpsendf(conn, "AUTH %s", ftpauth[ftpc->count1]);
/* remain in this same state */
}
else {
case FTP_USER:
case FTP_PASS:
- result = ftp_state_user_resp(conn, ftpcode, ftp->state);
+ result = ftp_state_user_resp(conn, ftpcode, ftpc->state);
break;
case FTP_ACCT:
store++;
ptr++;
}
- ftp->entrypath =dir; /* remember this */
- infof(data, "Entry path is '%s'\n", ftp->entrypath);
+ ftpc->entrypath =dir; /* remember this */
+ infof(data, "Entry path is '%s'\n", ftpc->entrypath);
/* also save it where getinfo can access it: */
- data->state.most_recent_ftp_entrypath = ftp->entrypath;
+ data->state.most_recent_ftp_entrypath = ftpc->entrypath;
}
else {
/* couldn't get the path */
failf(conn->data, "QUOT command failed with %03d", ftpcode);
return CURLE_FTP_QUOTE_ERROR;
}
- result = ftp_state_quote(conn, FALSE, ftp->state);
+ result = ftp_state_quote(conn, FALSE, ftpc->state);
if(result)
return result;
if(ftpcode/100 != 2) {
/* failure to CWD there */
if(conn->data->set.ftp_create_missing_dirs &&
- ftp->count1 && !ftp->count2) {
+ ftpc->count1 && !ftpc->count2) {
/* try making it */
- ftp->count2++; /* counter to prevent CWD-MKD loops */
- NBFTPSENDF(conn, "MKD %s", ftp->dirs[ftp->count1 - 1]);
+ ftpc->count2++; /* counter to prevent CWD-MKD loops */
+ NBFTPSENDF(conn, "MKD %s", ftpc->dirs[ftpc->count1 - 1]);
state(conn, FTP_MKD);
}
else {
/* return failure */
failf(data, "Server denied you to change to the given directory");
- ftp->cwdfail = TRUE; /* don't remember this path as we failed
- to enter it */
+ ftpc->cwdfail = TRUE; /* don't remember this path as we failed
+ to enter it */
return CURLE_FTP_ACCESS_DENIED;
}
}
else {
/* success */
- ftp->count2=0;
- if(++ftp->count1 <= ftp->dirdepth) {
+ ftpc->count2=0;
+ if(++ftpc->count1 <= ftpc->dirdepth) {
/* send next CWD */
- NBFTPSENDF(conn, "CWD %s", ftp->dirs[ftp->count1 - 1]);
+ NBFTPSENDF(conn, "CWD %s", ftpc->dirs[ftpc->count1 - 1]);
}
else {
result = ftp_state_post_cwd(conn);
}
state(conn, FTP_CWD);
/* send CWD */
- NBFTPSENDF(conn, "CWD %s", ftp->dirs[ftp->count1 - 1]);
+ NBFTPSENDF(conn, "CWD %s", ftpc->dirs[ftpc->count1 - 1]);
break;
case FTP_MDTM:
case FTP_LIST_TYPE:
case FTP_RETR_TYPE:
case FTP_STOR_TYPE:
- result = ftp_state_type_resp(conn, ftpcode, ftp->state);
+ result = ftp_state_type_resp(conn, ftpcode, ftpc->state);
break;
case FTP_SIZE:
case FTP_RETR_SIZE:
case FTP_STOR_SIZE:
- result = ftp_state_size_resp(conn, ftpcode, ftp->state);
+ result = ftp_state_size_resp(conn, ftpcode, ftpc->state);
break;
case FTP_REST:
case FTP_RETR_REST:
- result = ftp_state_rest_resp(conn, ftpcode, ftp->state);
+ result = ftp_state_rest_resp(conn, ftpcode, ftpc->state);
break;
case FTP_PASV:
case FTP_LIST:
case FTP_RETR:
- result = ftp_state_get_resp(conn, ftpcode, ftp->state);
+ result = ftp_state_get_resp(conn, ftpcode, ftpc->state);
break;
case FTP_STOR:
static long ftp_state_timeout(struct connectdata *conn)
{
struct SessionHandle *data=conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
long timeout_ms=360000; /* in milliseconds */
if(data->set.ftp_response_timeout )
to govern the response for any given ftp response, not for the time
from connect to the given ftp response. */
timeout_ms = data->set.ftp_response_timeout*1000 - /* timeout time */
- Curl_tvdiff(Curl_tvnow(), ftp->response); /* spent time */
+ Curl_tvdiff(Curl_tvnow(), ftpc->response); /* spent time */
else if(data->set.timeout)
/* if timeout is requested, find out how much remaining time we have */
timeout_ms = data->set.timeout*1000 - /* timeout time */
else
/* Without a requested timeout, we only wait 'response_time' seconds for
the full response to arrive before we bail out */
- timeout_ms = ftp->response_time*1000 -
- Curl_tvdiff(Curl_tvnow(), ftp->response); /* spent time */
+ timeout_ms = ftpc->response_time*1000 -
+ Curl_tvdiff(Curl_tvnow(), ftpc->response); /* spent time */
return timeout_ms;
}
curl_socket_t sock = conn->sock[FIRSTSOCKET];
int rc;
struct SessionHandle *data=conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
CURLcode result = CURLE_OK;
long timeout_ms = ftp_state_timeout(conn);
return CURLE_OPERATION_TIMEDOUT;
}
- rc = Curl_select(ftp->sendleft?CURL_SOCKET_BAD:sock, /* reading */
- ftp->sendleft?sock:CURL_SOCKET_BAD, /* writing */
+ rc = Curl_select(ftpc->sendleft?CURL_SOCKET_BAD:sock, /* reading */
+ ftpc->sendleft?sock:CURL_SOCKET_BAD, /* writing */
0);
if(rc == -1) {
}
else if(rc != 0) {
result = ftp_statemach_act(conn);
- *done = (ftp->state == FTP_STOP);
+ *done = (ftpc->state == FTP_STOP);
}
/* if rc == 0, then select() timed out */
curl_socket_t sock = conn->sock[FIRSTSOCKET];
int rc;
struct SessionHandle *data=conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
CURLcode result = CURLE_OK;
- while(ftp->state != FTP_STOP) {
+ while(ftpc->state != FTP_STOP) {
long timeout_ms = ftp_state_timeout(conn);
if(timeout_ms <=0 ) {
return CURLE_OPERATION_TIMEDOUT; /* already too little time */
}
- rc = Curl_select(ftp->sendleft?CURL_SOCKET_BAD:sock, /* reading */
- ftp->sendleft?sock:CURL_SOCKET_BAD, /* writing */
+ rc = Curl_select(ftpc->sendleft?CURL_SOCKET_BAD:sock, /* reading */
+ ftpc->sendleft?sock:CURL_SOCKET_BAD, /* writing */
(int)timeout_ms);
if(rc == -1) {
return result;
}
+/*
+ * Allocate and initialize the struct FTP for the current SessionHandle. If
+ * need be.
+ */
+static CURLcode ftp_init(struct connectdata *conn)
+{
+ struct SessionHandle *data = conn->data;
+ struct FTP *ftp;
+ if(data->reqdata.proto.ftp)
+ return CURLE_OK;
+
+ ftp = (struct FTP *)calloc(sizeof(struct FTP), 1);
+ if(!ftp)
+ return CURLE_OUT_OF_MEMORY;
+
+ data->reqdata.proto.ftp = ftp;
+
+ /* get some initial data into the ftp struct */
+ ftp->bytecountp = &data->reqdata.keep.bytecount;
+
+ /* no need to duplicate them, this connectdata struct won't change */
+ ftp->user = conn->user;
+ ftp->passwd = conn->passwd;
+ if (isBadFtpString(ftp->user) || isBadFtpString(ftp->passwd))
+ return CURLE_URL_MALFORMAT;
+
+ return CURLE_OK;
+}
+
/*
* Curl_ftp_connect() should do everything that is to be considered a part of
* the connection phase.
CURLcode Curl_ftp_connect(struct connectdata *conn,
bool *done) /* see description above */
{
- struct FTP *ftp;
CURLcode result;
#ifndef CURL_DISABLE_HTTP
/* for FTP over HTTP proxy */
struct HTTP http_proxy;
struct FTP *ftp_save;
#endif /* CURL_DISABLE_HTTP */
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
+ struct SessionHandle *data=conn->data;
*done = FALSE; /* default to not done yet */
- ftp = (struct FTP *)calloc(sizeof(struct FTP), 1);
- if(!ftp)
- return CURLE_OUT_OF_MEMORY;
+ if (data->reqdata.proto.ftp) {
+ Curl_ftp_disconnect(conn);
+ free(data->reqdata.proto.ftp);
+ data->reqdata.proto.ftp = NULL;
+ }
- conn->proto.ftp = ftp;
+ result = ftp_init(conn);
+ if(result)
+ return result;
/* We always support persistant connections on ftp */
conn->bits.close = FALSE;
- /* get some initial data into the ftp struct */
- ftp->bytecountp = &conn->bytecount;
-
- /* no need to duplicate them, this connectdata struct won't change */
- ftp->user = conn->user;
- ftp->passwd = conn->passwd;
- if (isBadFtpString(ftp->user) || isBadFtpString(ftp->passwd))
- return CURLE_URL_MALFORMAT;
-
- ftp->response_time = 3600; /* set default response time-out */
+ ftpc->response_time = 3600; /* set default response time-out */
#ifndef CURL_DISABLE_HTTP
if (conn->bits.tunnel_proxy && conn->bits.httpproxy) {
* Curl_proxyCONNECT we have to set back the member to the original struct
* FTP pointer
*/
- ftp_save = conn->proto.ftp;
+ ftp_save = data->reqdata.proto.ftp;
memset(&http_proxy, 0, sizeof(http_proxy));
- conn->proto.http = &http_proxy;
+ data->reqdata.proto.http = &http_proxy;
result = Curl_proxyCONNECT(conn, FIRSTSOCKET,
conn->host.name, conn->remote_port);
- conn->proto.ftp = ftp_save;
+ data->reqdata.proto.ftp = ftp_save;
if(CURLE_OK != result)
return result;
response */
ftp_respinit(conn); /* init the response reader stuff */
state(conn, FTP_WAIT220);
- ftp->response = Curl_tvnow(); /* start response time-out now! */
+ ftpc->response = Curl_tvnow(); /* start response time-out now! */
- if(conn->data->state.used_interface == Curl_if_multi)
+ if(data->state.used_interface == Curl_if_multi)
result = Curl_ftp_multi_statemach(conn, done);
else {
result = ftp_easy_statemach(conn);
CURLcode Curl_ftp_done(struct connectdata *conn, CURLcode status)
{
struct SessionHandle *data = conn->data;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = data->reqdata.proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
ssize_t nread;
int ftpcode;
CURLcode result=CURLE_OK;
- bool was_ctl_valid = ftp->ctl_valid;
+ bool was_ctl_valid = ftpc->ctl_valid;
size_t flen;
size_t dlen;
char *path;
+ char *path_to_use = data->reqdata.path;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
+
+ if (conn->sec_path)
+ path_to_use = conn->sec_path;
/* now store a copy of the directory we are in */
- if(ftp->prevpath)
- free(ftp->prevpath);
+ if(ftpc->prevpath)
+ free(ftpc->prevpath);
/* get the "raw" path */
- path = curl_easy_unescape(conn->data, conn->path, 0, NULL);
+ path = curl_easy_unescape(data, path_to_use, 0, NULL);
if(!path)
return CURLE_OUT_OF_MEMORY;
flen = ftp->file?strlen(ftp->file):0; /* file is "raw" already */
dlen = strlen(path)-flen;
- if(dlen && !ftp->cwdfail) {
- ftp->prevpath = path;
+ if(dlen && !ftpc->cwdfail) {
+ ftpc->prevpath = path;
if(flen)
/* if 'path' is not the whole string */
- ftp->prevpath[dlen]=0; /* terminate */
- infof(data, "Remembering we are in dir %s\n", ftp->prevpath);
+ ftpc->prevpath[dlen]=0; /* terminate */
+ infof(data, "Remembering we are in dir %s\n", ftpc->prevpath);
}
else {
- ftp->prevpath = NULL; /* no path */
+ ftpc->prevpath = NULL; /* no path */
free(path);
}
/* free the dir tree and file parts */
- freedirs(ftp);
+ freedirs(conn);
switch(status) {
case CURLE_BAD_DOWNLOAD_RESUME:
/* the connection stays alive fine even though this happened */
/* fall-through */
case CURLE_OK: /* doesn't affect the control connection's status */
- ftp->ctl_valid = was_ctl_valid;
+ ftpc->ctl_valid = was_ctl_valid;
break;
default: /* by default, an error means the control connection is
wedged and should not be used anymore */
- ftp->ctl_valid = FALSE;
+ ftpc->ctl_valid = FALSE;
break;
}
* data has been transfered. This happens when doing through NATs etc that
* abandon old silent connections.
*/
- long old_time = ftp->response_time;
+ long old_time = ftpc->response_time;
- ftp->response_time = 60; /* give it only a minute for now */
+ ftpc->response_time = 60; /* give it only a minute for now */
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
- ftp->response_time = old_time; /* set this back to previous value */
+ ftpc->response_time = old_time; /* set this back to previous value */
if(!nread && (CURLE_OPERATION_TIMEDOUT == result)) {
failf(data, "control connection looks dead");
- ftp->ctl_valid = FALSE; /* mark control connection as bad */
+ ftpc->ctl_valid = FALSE; /* mark control connection as bad */
return result;
}
if(result)
return result;
- if(!ftp->dont_check) {
+ if(!ftpc->dont_check) {
/* 226 Transfer complete, 250 Requested file action okay, completed. */
if((ftpcode != 226) && (ftpcode != 250)) {
failf(data, "server did not report OK, got %d", ftpcode);
}
}
else {
- if((-1 != conn->size) && (conn->size != *ftp->bytecountp) &&
+ if((-1 != k->size) && (k->size != *ftp->bytecountp) &&
#ifdef CURL_DO_LINEEND_CONV
/* Most FTP servers don't adjust their file SIZE response for CRLFs, so
* we'll check to see if the discrepancy can be explained by the number
* of CRLFs we've changed to LFs.
*/
- ((conn->size + data->state.crlf_conversions) != *ftp->bytecountp) &&
+ ((k->size + data->state.crlf_conversions) != *ftp->bytecountp) &&
#endif /* CURL_DO_LINEEND_CONV */
- (conn->maxdownload != *ftp->bytecountp)) {
+ (k->maxdownload != *ftp->bytecountp)) {
failf(data, "Received only partial file: %" FORMAT_OFF_T " bytes",
*ftp->bytecountp);
result = CURLE_PARTIAL_FILE;
}
- else if(!ftp->dont_check &&
+ else if(!ftpc->dont_check &&
!*ftp->bytecountp &&
- (conn->size>0)) {
+ (k->size>0)) {
failf(data, "No data was received!");
result = CURLE_FTP_COULDNT_RETR_FILE;
}
/* clear these for next connection */
ftp->no_transfer = FALSE;
- ftp->dont_check = FALSE;
+ ftpc->dont_check = FALSE;
if (!result && conn->sec_conn) { /* 3rd party transfer */
/* "done" with the secondary connection */
ascii?"ASCII":"binary");
return ascii? CURLE_FTP_COULDNT_SET_ASCII:CURLE_FTP_COULDNT_SET_BINARY;
}
+
/* keep track of our current transfer type */
- conn->proto.ftp->transfertype = ascii?'A':'I';
+ conn->proto.ftpc.transfertype = ascii?'A':'I';
return CURLE_OK;
}
static int ftp_need_type(struct connectdata *conn,
bool ascii_wanted)
{
- return conn->proto.ftp->transfertype != (ascii_wanted?'A':'I');
+ return conn->proto.ftpc.transfertype != (ascii_wanted?'A':'I');
}
/***********************************************************************
static CURLcode ftp_nb_type(struct connectdata *conn,
bool ascii, ftpstate newstate)
{
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
CURLcode result;
int want = ascii?'A':'I';
- if (ftp->transfertype == want) {
+ if (ftpc->transfertype == want) {
state(conn, newstate);
return ftp_state_type_resp(conn, 200, newstate);
}
state(conn, newstate);
/* keep track of our current transfer type */
- ftp->transfertype = want;
+ ftpc->transfertype = want;
return CURLE_OK;
}
curl_off_t totalsize=-1;
char *ptr;
char *ptr2;
- struct FTP *ftp = conn->proto.ftp;
+ struct SessionHandle *data = conn->data;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
- if(conn->bits.use_range && conn->range) {
- from=curlx_strtoofft(conn->range, &ptr, 0);
+ if(data->reqdata.use_range && data->reqdata.range) {
+ from=curlx_strtoofft(data->reqdata.range, &ptr, 0);
while(ptr && *ptr && (isspace((int)*ptr) || (*ptr=='-')))
ptr++;
to=curlx_strtoofft(ptr, &ptr2, 0);
}
if((-1 == to) && (from>=0)) {
/* X - */
- conn->resume_from = from;
+ data->reqdata.resume_from = from;
DEBUGF(infof(conn->data, "FTP RANGE %" FORMAT_OFF_T " to end of file\n",
from));
}
else if(from < 0) {
/* -Y */
totalsize = -from;
- conn->maxdownload = -from;
- conn->resume_from = from;
+ data->reqdata.maxdownload = -from;
+ data->reqdata.resume_from = from;
DEBUGF(infof(conn->data, "FTP RANGE the last %" FORMAT_OFF_T " bytes\n",
totalsize));
}
else {
/* X-Y */
totalsize = to-from;
- conn->maxdownload = totalsize+1; /* include the last mentioned byte */
- conn->resume_from = from;
+ data->reqdata.maxdownload = totalsize+1; /* include last byte */
+ data->reqdata.resume_from = from;
DEBUGF(infof(conn->data, "FTP RANGE from %" FORMAT_OFF_T
" getting %" FORMAT_OFF_T " bytes\n",
- from, conn->maxdownload));
+ from, data->reqdata.maxdownload));
}
DEBUGF(infof(conn->data, "range-download from %" FORMAT_OFF_T
" to %" FORMAT_OFF_T ", totally %" FORMAT_OFF_T " bytes\n",
- from, to, conn->maxdownload));
- ftp->dont_check = TRUE; /* dont check for successful transfer */
+ from, to, data->reqdata.maxdownload));
+ ftpc->dont_check = TRUE; /* dont check for successful transfer */
}
return CURLE_OK;
}
CURLcode result = CURLE_OK;
/* the ftp struct is inited in Curl_ftp_connect() */
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = data->reqdata.proto.ftp;
DEBUGF(infof(data, "DO-MORE phase starts\n"));
if(ftp->no_transfer)
/* no data to transfer. FIX: it feels like a kludge to have this here
too! */
- result=Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ result=Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
/* end of transfer */
DEBUGF(infof(data, "DO-MORE phase ends\n"));
*done = FALSE; /* default to false */
+ /*
+ Since connections can be re-used between SessionHandles, this might be a
+ connection already existing but on a fresh SessionHandle struct so we must
+ make sure we have a good 'struct FTP' to play with. For new connections,
+ the struct FTP is allocated and setup in the Curl_ftp_connect() function.
+ */
+ retcode = ftp_init(conn);
+ if(retcode)
+ return retcode;
+
retcode = ftp_parse_url_path(conn);
if (retcode)
return retcode;
size_t write_len;
char *sptr=s;
CURLcode res = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
struct SessionHandle *data = conn->data;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
va_list ap;
va_start(ap, fmt);
/* the whole chunk was not sent, store the rest of the data */
write_len -= bytes_written;
sptr += bytes_written;
- ftp->sendthis = malloc(write_len);
- if(ftp->sendthis) {
- memcpy(ftp->sendthis, sptr, write_len);
- ftp->sendsize=ftp->sendleft=write_len;
+ ftpc->sendthis = malloc(write_len);
+ if(ftpc->sendthis) {
+ memcpy(ftpc->sendthis, sptr, write_len);
+ ftpc->sendsize = ftpc->sendleft = write_len;
}
else {
failf(data, "out of memory");
}
}
else
- ftp->response = Curl_tvnow();
+ ftpc->response = Curl_tvnow();
return res;
}
{
CURLcode result = CURLE_OK;
- if(conn->proto.ftp->ctl_valid) {
+ if(conn->proto.ftpc.ctl_valid) {
NBFTPSENDF(conn, "QUIT", NULL);
state(conn, FTP_QUIT);
*/
CURLcode Curl_ftp_disconnect(struct connectdata *conn)
{
- struct FTP *ftp= conn->proto.ftp;
+ struct ftp_conn *ftpc= &conn->proto.ftpc;
/* We cannot send quit unconditionally. If this connection is stale or
bad in any way, sending quit and waiting around here will make the
*/
/* The FTP session may or may not have been allocated/setup at this point! */
- if(ftp) {
+ if(conn->data->reqdata.proto.ftp) {
(void)ftp_quit(conn); /* ignore errors on the QUIT */
- if(ftp->entrypath) {
+ if(ftpc->entrypath) {
struct SessionHandle *data = conn->data;
data->state.most_recent_ftp_entrypath = NULL;
- free(ftp->entrypath);
- ftp->entrypath = NULL;
+ free(ftpc->entrypath);
+ ftpc->entrypath = NULL;
}
- if(ftp->cache) {
- free(ftp->cache);
- ftp->cache = NULL;
+ if(ftpc->cache) {
+ free(ftpc->cache);
+ ftpc->cache = NULL;
}
- freedirs(ftp);
- if(ftp->prevpath) {
- free(ftp->prevpath);
- ftp->prevpath = NULL;
+ freedirs(conn);
+ if(ftpc->prevpath) {
+ free(ftpc->prevpath);
+ ftpc->prevpath = NULL;
}
}
return CURLE_OK;
struct connectdata *pasv_conn;
struct connectdata *port_conn;
+ char *path = data->reqdata.path;
+
if (data->set.ftpport == NULL) {
pasv_conn = conn;
port_conn = sec_conn;
port_conn = conn;
}
+ if (sec_conn->sec_path)
+ path = sec_conn->sec_path;
+
result = ftp_cwd_and_create_path(conn);
if (result)
return result;
/* transfers file between remote hosts */
/* FIX: this should send a series of CWD commands and then RETR only the
- ftp->file file. The conn->path "full path" is not unescaped. Test case
- 230 tests this. */
- FTPSENDF(sec_conn, "RETR %s", sec_conn->path);
+ ftp->file file. The conn->data->reqdata.path "full path" is not
+ unescaped. Test case 230 tests this. */
+ FTPSENDF(sec_conn, "RETR %s", path);
if(!data->set.ftpport) {
return CURLE_FTP_COULDNT_RETR_FILE;
}
- result = Curl_ftpsendf(conn, "%s %s", stor_cmd, conn->proto.ftp->file);
+ result = Curl_ftpsendf(conn, "%s %s", stor_cmd,
+ data->reqdata.proto.ftp->file);
if(CURLE_OK == result)
result = Curl_GetFTPResponse(&nread, conn, &ftpcode);
if (result)
}
else {
- result = Curl_ftpsendf(conn, "%s %s", stor_cmd, conn->proto.ftp->file);
+ result = Curl_ftpsendf(conn, "%s %s", stor_cmd,
+ data->reqdata.proto.ftp->file);
if(CURLE_OK == result)
result = Curl_GetFTPResponse(&nread, sec_conn, &ftpcode);
if (result)
{
CURLcode retcode = CURLE_OK;
struct SessionHandle *data = conn->data;
- struct FTP *ftp;
+ /* the ftp struct is already inited in ftp_connect() */
+ struct FTP *ftp = data->reqdata.proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
size_t dlen;
-
char *slash_pos; /* position of the first '/' char in curpos */
- char *cur_pos = conn->path; /* current position in path. point at the begin
- of next path component */
+ char *path_to_use = data->reqdata.path;
+ char *cur_pos;
- /* the ftp struct is already inited in ftp_connect() */
- ftp = conn->proto.ftp;
- ftp->ctl_valid = FALSE;
- ftp->cwdfail = FALSE;
+ if (conn->sec_path) {
+ path_to_use = conn->sec_path;
+ }
+
+ cur_pos = path_to_use; /* current position in path. point at the begin
+ of next path component */
+
+ ftpc->ctl_valid = FALSE;
+ ftpc->cwdfail = FALSE;
switch(data->set.ftp_filemethod) {
case FTPFILE_NOCWD:
/* fastest, but less standard-compliant */
- ftp->file = conn->path; /* this is a full file path */
+ ftp->file = data->reqdata.path; /* this is a full file path */
break;
case FTPFILE_SINGLECWD:
/* get the last slash */
slash_pos=strrchr(cur_pos, '/');
if(slash_pos || !cur_pos || !*cur_pos) {
- ftp->dirdepth = 1; /* we consider it to be a single dir */
- ftp->dirs = (char **)calloc(1, sizeof(ftp->dirs[0]));
- if(!ftp->dirs)
+ ftpc->dirdepth = 1; /* we consider it to be a single dir */
+ ftpc->dirs = (char **)calloc(1, sizeof(ftpc->dirs[0]));
+ if(!ftpc->dirs)
return CURLE_OUT_OF_MEMORY;
- ftp->dirs[0] = curl_easy_unescape(conn->data, slash_pos ? cur_pos : "/",
- slash_pos?(int)(slash_pos-cur_pos):1,
- NULL);
- if(!ftp->dirs[0]) {
- free(ftp->dirs);
+ ftpc->dirs[0] = curl_easy_unescape(conn->data, slash_pos ? cur_pos : "/",
+ slash_pos?(int)(slash_pos-cur_pos):1,
+ NULL);
+ if(!ftpc->dirs[0]) {
+ free(ftpc->dirs);
return CURLE_OUT_OF_MEMORY;
}
ftp->file = slash_pos ? slash_pos+1 : cur_pos; /* rest is file name */
default: /* allow pretty much anything */
case FTPFILE_MULTICWD:
- ftp->dirdepth = 0;
- ftp->diralloc = 5; /* default dir depth to allocate */
- ftp->dirs = (char **)calloc(ftp->diralloc, sizeof(ftp->dirs[0]));
- if(!ftp->dirs)
+ ftpc->dirdepth = 0;
+ ftpc->diralloc = 5; /* default dir depth to allocate */
+ ftpc->dirs = (char **)calloc(ftpc->diralloc, sizeof(ftpc->dirs[0]));
+ if(!ftpc->dirs)
return CURLE_OUT_OF_MEMORY;
/* parse the URL path into separate path components */
while ((slash_pos = strchr(cur_pos, '/')) != NULL) {
/* 1 or 0 to indicate absolute directory */
- bool absolute_dir = (cur_pos - conn->path > 0) && (ftp->dirdepth == 0);
+ bool absolute_dir = (cur_pos - data->reqdata.path > 0) &&
+ (ftpc->dirdepth == 0);
/* seek out the next path component */
if (slash_pos-cur_pos) {
- /* we skip empty path components, like "x//y" since the FTP command CWD
- requires a parameter and a non-existant parameter a) doesn't work on
- many servers and b) has no effect on the others. */
+ /* we skip empty path components, like "x//y" since the FTP command
+ CWD requires a parameter and a non-existant parameter a) doesn't
+ work on many servers and b) has no effect on the others. */
int len = (int)(slash_pos - cur_pos + absolute_dir);
- ftp->dirs[ftp->dirdepth] = curl_easy_unescape(conn->data,
- cur_pos - absolute_dir,
- len, NULL);
- if (!ftp->dirs[ftp->dirdepth]) { /* run out of memory ... */
+ ftpc->dirs[ftpc->dirdepth] = curl_easy_unescape(conn->data,
+ cur_pos - absolute_dir,
+ len, NULL);
+ if (!ftpc->dirs[ftpc->dirdepth]) { /* run out of memory ... */
failf(data, "no memory");
- freedirs(ftp);
+ freedirs(conn);
return CURLE_OUT_OF_MEMORY;
}
- if (isBadFtpString(ftp->dirs[ftp->dirdepth])) {
- freedirs(ftp);
+ if (isBadFtpString(ftpc->dirs[ftpc->dirdepth])) {
+ freedirs(conn);
return CURLE_URL_MALFORMAT;
}
}
if(!retcode) {
cur_pos = slash_pos + 1; /* jump to the rest of the string */
- if(++ftp->dirdepth >= ftp->diralloc) {
+ if(++ftpc->dirdepth >= ftpc->diralloc) {
/* enlarge array */
char *bigger;
- ftp->diralloc *= 2; /* double the size each time */
- bigger = realloc(ftp->dirs, ftp->diralloc * sizeof(ftp->dirs[0]));
+ ftpc->diralloc *= 2; /* double the size each time */
+ bigger = realloc(ftpc->dirs, ftpc->diralloc * sizeof(ftpc->dirs[0]));
if(!bigger) {
- ftp->dirdepth--;
- freedirs(ftp);
+ ftpc->dirdepth--;
+ freedirs(conn);
return CURLE_OUT_OF_MEMORY;
}
- ftp->dirs = (char **)bigger;
+ ftpc->dirs = (char **)bigger;
}
}
}
if(*ftp->file) {
ftp->file = curl_easy_unescape(conn->data, ftp->file, 0, NULL);
if(NULL == ftp->file) {
- freedirs(ftp);
+ freedirs(conn);
failf(data, "no memory");
return CURLE_OUT_OF_MEMORY;
}
if (isBadFtpString(ftp->file)) {
- freedirs(ftp);
+ freedirs(conn);
return CURLE_URL_MALFORMAT;
}
}
return CURLE_URL_MALFORMAT;
}
- ftp->cwddone = FALSE; /* default to not done */
+ ftpc->cwddone = FALSE; /* default to not done */
- if(ftp->prevpath) {
+ if(ftpc->prevpath) {
/* prevpath is "raw" so we convert the input path before we compare the
strings */
- char *path = curl_easy_unescape(conn->data, conn->path, 0, NULL);
+ char *path = curl_easy_unescape(conn->data, data->reqdata.path, 0, NULL);
if(!path)
return CURLE_OUT_OF_MEMORY;
dlen = strlen(path) - (ftp->file?strlen(ftp->file):0);
- if((dlen == strlen(ftp->prevpath)) &&
- curl_strnequal(path, ftp->prevpath, dlen)) {
+ if((dlen == strlen(ftpc->prevpath)) &&
+ curl_strnequal(path, ftpc->prevpath, dlen)) {
infof(data, "Request has same path as previous transfer\n");
- ftp->cwddone = TRUE;
+ ftpc->cwddone = TRUE;
}
free(path);
}
CURLcode ftp_cwd_and_create_path(struct connectdata *conn)
{
CURLcode result = CURLE_OK;
- /* the ftp struct is already inited in Curl_ftp_connect() */
- struct FTP *ftp = conn->proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
int i;
- if(ftp->cwddone)
+ if(ftpc->cwddone)
/* already done and fine */
return CURLE_OK;
/* This is a re-used connection. Since we change directory to where the
transfer is taking place, we must now get back to the original dir
where we ended up after login: */
- if (conn->bits.reuse && ftp->entrypath) {
- if ((result = ftp_cwd_and_mkd(conn, ftp->entrypath)) != CURLE_OK)
+ if (conn->bits.reuse && ftpc->entrypath) {
+ if ((result = ftp_cwd_and_mkd(conn, ftpc->entrypath)) != CURLE_OK)
return result;
}
- for (i=0; i < ftp->dirdepth; i++) {
+ for (i=0; i < ftpc->dirdepth; i++) {
/* RFC 1738 says empty components should be respected too, but
that is plain stupid since CWD can't be used with an empty argument */
- if ((result = ftp_cwd_and_mkd(conn, ftp->dirs[i])) != CURLE_OK)
+ if ((result = ftp_cwd_and_mkd(conn, ftpc->dirs[i])) != CURLE_OK)
return result;
}
bool connected)
{
CURLcode result = CURLE_OK;
- struct FTP *ftp = conn->proto.ftp;
+ struct FTP *ftp = conn->data->reqdata.proto.ftp;
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
if(connected)
result = Curl_ftp_nextconnect(conn);
if(ftp->no_transfer)
/* no data to transfer */
- result=Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ result=Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
else if(!connected)
/* since we didn't connect now, we want do_more to get called */
conn->bits.do_more = TRUE;
- ftp->ctl_valid = TRUE; /* seems good */
+ ftpc->ctl_valid = TRUE; /* seems good */
return result;
}
CURLcode result=CURLE_OK;
bool connected=0;
struct SessionHandle *data = conn->data;
- struct FTP *ftp;
-
- /* the ftp struct is already inited in ftp_connect() */
- ftp = conn->proto.ftp;
- conn->size = -1; /* make sure this is unknown at this point */
+ struct ftp_conn *ftpc = &conn->proto.ftpc;
+ data->reqdata.size = -1; /* make sure this is unknown at this point */
Curl_pgrsSetUploadCounter(data, 0);
Curl_pgrsSetDownloadCounter(data, 0);
Curl_pgrsSetUploadSize(data, 0);
Curl_pgrsSetDownloadSize(data, 0);
- ftp->ctl_valid = TRUE; /* starts good */
+ ftpc->ctl_valid = TRUE; /* starts good */
result = ftp_perform(conn,
&connected, /* have we connected after PASV/PORT */
return result;
}
else
- freedirs(ftp);
+ freedirs(conn);
return result;
}
*/
static CURLcode ftp_3rdparty(struct connectdata *conn)
{
+ struct Curl_transfer_keeper *k = &conn->data->reqdata.keep;
CURLcode result = CURLE_OK;
- conn->proto.ftp->ctl_valid = conn->sec_conn->proto.ftp->ctl_valid = TRUE;
- conn->size = conn->sec_conn->size = -1;
+ /* both control connections start out fine */
+ conn->proto.ftpc.ctl_valid = TRUE;
+ conn->sec_conn->proto.ftpc.ctl_valid = TRUE;
+ k->size = -1;
result = ftp_3rdparty_pretransfer(conn);
if (!result)
break;
case CURLINFO_LASTSOCKET:
if((data->state.lastconnect != -1) &&
- (data->state.connects[data->state.lastconnect] != NULL)) {
- *param_longp = data->state.connects[data->state.lastconnect]->
- sock[FIRSTSOCKET];
+ (data->state.connc->connects[data->state.lastconnect] != NULL)) {
+ struct connectdata *c = data->state.connc->connects
+ [data->state.lastconnect];
+ *param_longp = c->sock[FIRSTSOCKET];
/* we have a socket connected, let's determine if the server shut down */
/* determine if ssl */
- if(data->state.connects[data->state.lastconnect]->ssl[FIRSTSOCKET].use) {
+ if(c->ssl[FIRSTSOCKET].use) {
/* use the SSL context */
- if (!Curl_ssl_check_cxn(data->state.connects[data->state.lastconnect]))
+ if (!Curl_ssl_check_cxn(c))
*param_longp = -1; /* FIN received */
}
/* Minix 3.1 doesn't support any flags on recv; just assume socket is OK */
#ifdef MSG_PEEK
else {
/* use the socket */
- if(recv((int)data->state.connects[data->state.lastconnect]->
- sock[FIRSTSOCKET], (void*)&buf, 1, MSG_PEEK) == 0)
+ if(recv((int)c->sock[FIRSTSOCKET], (void*)&buf, 1, MSG_PEEK) == 0)
*param_longp = -1; /* FIN received */
}
#endif
#define CURL_ASYNC_SUCCESS ARES_SUCCESS
#else
#define CURL_ASYNC_SUCCESS CURLE_OK
-#define ares_cancel(x)
+#define ares_cancel(x) do {} while(0)
+#define ares_destroy(x) do {} while (0);
#endif
/*
*/
static CURLcode perhapsrewind(struct connectdata *conn)
{
- struct HTTP *http = conn->proto.http;
struct SessionHandle *data = conn->data;
+ struct HTTP *http = data->reqdata.proto.http;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
curl_off_t bytessent;
curl_off_t expectsend = -1; /* default is unknown */
/* This is not NTLM or NTLM with many bytes left to send: close
*/
conn->bits.close = TRUE;
- conn->size = 0; /* don't download any more than 0 bytes */
+ k->size = 0; /* don't download any more than 0 bytes */
}
if(bytessent)
bool pickproxy = FALSE;
CURLcode code = CURLE_OK;
- if(100 == conn->keep.httpcode)
+ if(100 == data->reqdata.keep.httpcode)
/* this is a transient response code, ignore */
return CURLE_OK;
return data->set.http_fail_on_error?CURLE_HTTP_RETURNED_ERROR:CURLE_OK;
if(conn->bits.user_passwd &&
- ((conn->keep.httpcode == 401) ||
- (conn->bits.authneg && conn->keep.httpcode < 300))) {
+ ((data->reqdata.keep.httpcode == 401) ||
+ (conn->bits.authneg && data->reqdata.keep.httpcode < 300))) {
pickhost = pickoneauth(&data->state.authhost);
if(!pickhost)
data->state.authproblem = TRUE;
}
if(conn->bits.proxy_user_passwd &&
- ((conn->keep.httpcode == 407) ||
- (conn->bits.authneg && conn->keep.httpcode < 300))) {
+ ((data->reqdata.keep.httpcode == 407) ||
+ (conn->bits.authneg && data->reqdata.keep.httpcode < 300))) {
pickproxy = pickoneauth(&data->state.authproxy);
if(!pickproxy)
data->state.authproblem = TRUE;
}
if(pickhost || pickproxy) {
- conn->newurl = strdup(data->change.url); /* clone URL */
+ data->reqdata.newurl = strdup(data->change.url); /* clone URL */
if((data->set.httpreq != HTTPREQ_GET) &&
(data->set.httpreq != HTTPREQ_HEAD) &&
}
}
- else if((conn->keep.httpcode < 300) &&
+ else if((data->reqdata.keep.httpcode < 300) &&
(!data->state.authhost.done) &&
conn->bits.authneg) {
/* no (known) authentication available,
we didn't try HEAD or GET */
if((data->set.httpreq != HTTPREQ_GET) &&
(data->set.httpreq != HTTPREQ_HEAD)) {
- conn->newurl = strdup(data->change.url); /* clone URL */
+ data->reqdata.newurl = strdup(data->change.url); /* clone URL */
data->state.authhost.done = TRUE;
}
}
if (Curl_http_should_fail(conn)) {
failf (data, "The requested URL returned error: %d",
- conn->keep.httpcode);
+ data->reqdata.keep.httpcode);
code = CURLE_HTTP_RETURNED_ERROR;
}
/* if exactly this is wanted, go */
int neg = Curl_input_negotiate(conn, start);
if (neg == 0) {
- conn->newurl = strdup(data->change.url);
- data->state.authproblem = (conn->newurl == NULL);
+ data->reqdata.newurl = strdup(data->change.url);
+ data->state.authproblem = (data->reqdata.newurl == NULL);
}
else {
infof(data, "Authentication problem. Ignoring this.\n");
/*
** For readability
*/
- k = &conn->keep;
+ k = &data->reqdata.keep;
/*
** If we haven't been asked to fail on error,
if (k->httpcode < 400)
return 0;
- if (conn->resume_from &&
+ if (data->reqdata.resume_from &&
(data->set.httpreq==HTTPREQ_GET) &&
(k->httpcode == 416)) {
/* "Requested Range Not Satisfiable", just proceed and
infof(data,"%s: authavail = 0x%08x\n",__FUNCTION__,data->state.authavail);
infof(data,"%s: httpcode = %d\n",__FUNCTION__,k->httpcode);
infof(data,"%s: authdone = %d\n",__FUNCTION__,data->state.authdone);
- infof(data,"%s: newurl = %s\n",__FUNCTION__,conn->newurl ? conn->newurl : "(null)");
+ infof(data,"%s: newurl = %s\n",__FUNCTION__,data->reqdata.newurl ? data->reqdata.newurl : "(null)");
infof(data,"%s: authproblem = %d\n",__FUNCTION__,data->state.authproblem);
#endif
void *userp)
{
struct connectdata *conn = (struct connectdata *)userp;
- struct HTTP *http = conn->proto.http;
+ struct HTTP *http = conn->data->reqdata.proto.http;
size_t fullsize = size * nitems;
if(0 == http->postsize)
CURLcode res;
char *ptr;
size_t size;
- struct HTTP *http = conn->proto.http;
+ struct HTTP *http = conn->data->reqdata.proto.http;
size_t sendsize;
curl_socket_t sockfd;
This needs FIXing.
*/
return CURLE_SEND_ERROR;
+ else
+ conn->writechannel_inuse = FALSE;
}
}
if(in->buffer)
{
int subversion=0;
struct SessionHandle *data=conn->data;
- struct Curl_transfer_keeper *k = &conn->keep;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
CURLcode result;
int res;
size_t nread; /* total size read */
infof(data, "Establish HTTP proxy tunnel to %s:%d\n", hostname, remote_port);
do {
- if(conn->newurl) {
+ if(data->reqdata.newurl) {
/* This only happens if we've looped here due to authentication reasons,
and we don't really use the newly cloned URL here then. Just free()
it. */
- free(conn->newurl);
- conn->newurl = NULL;
+ free(data->reqdata.newurl);
+ data->reqdata.newurl = NULL;
}
/* initialize a dynamic send-buffer */
headers. 'newurl' is set to a new URL if we must loop. */
Curl_http_auth_act(conn);
- } while(conn->newurl);
+ } while(data->reqdata.newurl);
if(200 != k->httpcode) {
failf(data, "Received HTTP code %d from proxy after CONNECT",
CURLcode Curl_http_done(struct connectdata *conn,
CURLcode status)
{
- struct SessionHandle *data;
- struct HTTP *http;
-
- data=conn->data;
- http=conn->proto.http;
+ struct SessionHandle *data = conn->data;
+ struct HTTP *http =data->reqdata.proto.http;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
/* set the proper values (possibly modified on POST) */
conn->fread = data->set.fread; /* restore */
}
if(HTTPREQ_POST_FORM == data->set.httpreq) {
- conn->bytecount = http->readbytecount + http->writebytecount;
+ k->bytecount = http->readbytecount + http->writebytecount;
Curl_formclean(http->sendit); /* Now free that whole lot */
if(http->form.fp) {
}
}
else if(HTTPREQ_PUT == data->set.httpreq)
- conn->bytecount = http->readbytecount + http->writebytecount;
+ k->bytecount = http->readbytecount + http->writebytecount;
if (status != CURLE_OK)
return (status);
if(!conn->bits.retry &&
((http->readbytecount +
- conn->headerbytecount -
- conn->deductheadercount)) <= 0) {
+ k->headerbytecount -
+ k->deductheadercount)) <= 0) {
/* If this connection isn't simply closed to be retried, AND nothing was
read from the HTTP server (that counts), this can't be right so we
return an error here */
char *buf = data->state.buffer; /* this is a short cut to the buffer */
CURLcode result=CURLE_OK;
struct HTTP *http;
- char *ppath = conn->path;
+ char *ppath = data->reqdata.path;
char *host = conn->host.name;
const char *te = ""; /* tranfer-encoding */
char *ptr;
the rest of the request in the PERFORM phase. */
*done = TRUE;
- if(!conn->proto.http) {
+ if(!data->reqdata.proto.http) {
/* Only allocate this struct if we don't already have it! */
http = (struct HTTP *)malloc(sizeof(struct HTTP));
if(!http)
return CURLE_OUT_OF_MEMORY;
memset(http, 0, sizeof(struct HTTP));
- conn->proto.http = http;
+ data->reqdata.proto.http = http;
}
else
- http = conn->proto.http;
+ http = data->reqdata.proto.http;
/* We default to persistant connections */
conn->bits.close = FALSE;
if(( (HTTPREQ_POST == httpreq) ||
(HTTPREQ_POST_FORM == httpreq) ||
(HTTPREQ_PUT == httpreq) ) &&
- conn->resume_from) {
+ data->reqdata.resume_from) {
/**********************************************************************
* Resuming upload in HTTP means that we PUT or POST and that we have
* got a resume_from value set. The resume value has already created
* file size before we continue this venture in the dark lands of HTTP.
*********************************************************************/
- if(conn->resume_from < 0 ) {
+ if(data->reqdata.resume_from < 0 ) {
/*
* This is meant to get the size of the present remote-file by itself.
* We don't support this now. Bail out!
*/
- conn->resume_from = 0;
+ data->reqdata.resume_from = 0;
}
- if(conn->resume_from) {
+ if(data->reqdata.resume_from) {
/* do we still game? */
curl_off_t passed=0;
input. If we knew it was a proper file we could've just
fseek()ed but we only have a stream here */
do {
- size_t readthisamountnow = (size_t)(conn->resume_from - passed);
+ size_t readthisamountnow = (size_t)(data->reqdata.resume_from - passed);
size_t actuallyread;
if(readthisamountnow > BUFSIZE)
passed);
return CURLE_READ_ERROR;
}
- } while(passed != conn->resume_from); /* loop until done */
+ } while(passed != data->reqdata.resume_from); /* loop until done */
/* now, decrease the size of the read */
if(data->set.infilesize>0) {
- data->set.infilesize -= conn->resume_from;
+ data->set.infilesize -= data->reqdata.resume_from;
if(data->set.infilesize <= 0) {
failf(data, "File already completely uploaded");
/* we've passed, proceed as normal */
}
}
- if(conn->bits.use_range) {
+ if(data->reqdata.use_range) {
/*
* A range is selected. We use different headers whether we're downloading
* or uploading and we always let customized headers override our internal
/* if a line like this was already allocated, free the previous one */
if(conn->allocptr.rangeline)
free(conn->allocptr.rangeline);
- conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", conn->range);
+ conn->allocptr.rangeline = aprintf("Range: bytes=%s\r\n", data->reqdata.range);
}
else if((httpreq != HTTPREQ_GET) &&
!checkheaders(data, "Content-Range:")) {
- if(conn->resume_from) {
+ if(data->reqdata.resume_from) {
/* This is because "resume" was selected */
curl_off_t total_expected_size=
- conn->resume_from + data->set.infilesize;
+ data->reqdata.resume_from + data->set.infilesize;
conn->allocptr.rangeline =
aprintf("Content-Range: bytes %s%" FORMAT_OFF_T
"/%" FORMAT_OFF_T "\r\n",
- conn->range, total_expected_size-1,
+ data->reqdata.range, total_expected_size-1,
total_expected_size);
}
else {
append total size */
conn->allocptr.rangeline =
aprintf("Content-Range: bytes %s/%" FORMAT_OFF_T "\r\n",
- conn->range, data->set.infilesize);
+ data->reqdata.range, data->set.infilesize);
}
}
}
conn->allocptr.proxyuserpwd?
conn->allocptr.proxyuserpwd:"",
conn->allocptr.userpwd?conn->allocptr.userpwd:"",
- (conn->bits.use_range && conn->allocptr.rangeline)?
+ (data->reqdata.use_range && conn->allocptr.rangeline)?
conn->allocptr.rangeline:"",
(data->set.useragent && *data->set.useragent && conn->allocptr.uagent)?
conn->allocptr.uagent:"",
Curl_share_lock(data, CURL_LOCK_DATA_COOKIE, CURL_LOCK_ACCESS_SINGLE);
co = Curl_cookie_getlist(data->cookies,
conn->allocptr.cookiehost?
- conn->allocptr.cookiehost:host, conn->path,
+ conn->allocptr.cookiehost:host, data->reqdata.path,
(bool)(conn->protocol&PROT_HTTPS?TRUE:FALSE));
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
failf(data, "Failed sending POST request");
else
/* setup variables for the upcoming transfer */
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
- &http->readbytecount,
- -1, NULL);
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
+ &http->readbytecount,
+ -1, NULL);
break;
}
failf(data, "Failed sending POST request");
else
/* setup variables for the upcoming transfer */
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
- &http->readbytecount,
- FIRSTSOCKET,
- &http->writebytecount);
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
+ &http->readbytecount,
+ FIRSTSOCKET,
+ &http->writebytecount);
if(result) {
Curl_formclean(http->sendit); /* free that whole lot */
return result;
failf(data, "Failed sending PUT request");
else
/* prepare for transfer */
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
- &http->readbytecount,
- postsize?FIRSTSOCKET:-1,
- postsize?&http->writebytecount:NULL);
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
+ &http->readbytecount,
+ postsize?FIRSTSOCKET:-1,
+ postsize?&http->writebytecount:NULL);
if(result)
return result;
break;
failf(data, "Failed sending HTTP POST request");
else
result =
- Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
+ Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
&http->readbytecount,
http->postdata?FIRSTSOCKET:-1,
http->postdata?&http->writebytecount:NULL);
failf(data, "Failed sending HTTP request");
else
/* HTTP GET/HEAD download: */
- result = Curl_Transfer(conn, FIRSTSOCKET, -1, TRUE,
+ result = Curl_setup_transfer(conn, FIRSTSOCKET, -1, TRUE,
&http->readbytecount,
http->postdata?FIRSTSOCKET:-1,
http->postdata?&http->writebytecount:NULL);
void Curl_httpchunk_init(struct connectdata *conn)
{
- struct Curl_chunker *chunk = &conn->proto.http->chunk;
+ struct Curl_chunker *chunk = &conn->data->reqdata.proto.http->chunk;
chunk->hexindex=0; /* start at 0 */
chunk->dataleft=0; /* no data left yet! */
chunk->state = CHUNK_HEX; /* we get hex first! */
ssize_t *wrotep)
{
CURLcode result=CURLE_OK;
- struct Curl_chunker *ch = &conn->proto.http->chunk;
- struct Curl_transfer_keeper *k = &conn->keep;
+ struct SessionHandle *data = conn->data;
+ struct Curl_chunker *ch = &data->reqdata.proto.http->chunk;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
size_t piece;
size_t length = (size_t)datalen;
size_t *wrote = (size_t *)wrotep;
/* Write the data portion available */
#ifdef HAVE_LIBZ
- switch (conn->keep.content_encoding) {
+ switch (data->reqdata.keep.content_encoding) {
case IDENTITY:
#endif
if(!k->ignorebody)
break;
case DEFLATE:
- /* update conn->keep.str to point to the chunk data. */
- conn->keep.str = datap;
- result = Curl_unencode_deflate_write(conn, &conn->keep,
+ /* update data->reqdata.keep.str to point to the chunk data. */
+ data->reqdata.keep.str = datap;
+ result = Curl_unencode_deflate_write(conn, &data->reqdata.keep,
(ssize_t)piece);
break;
case GZIP:
- /* update conn->keep.str to point to the chunk data. */
- conn->keep.str = datap;
- result = Curl_unencode_gzip_write(conn, &conn->keep,
+ /* update data->reqdata.keep.str to point to the chunk data. */
+ data->reqdata.keep.str = datap;
+ result = Curl_unencode_gzip_write(conn, &data->reqdata.keep,
(ssize_t)piece);
break;
#ifndef __HTTP_DIGEST_H
#define __HTTP_DIGEST_H
/***************************************************************************
- * _ _ ____ _
- * Project ___| | | | _ \| |
- * / __| | | | |_) | |
- * | (__| |_| | _ <| |___
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2004, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
- *
+ *
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
bool proxy,
unsigned char *request,
unsigned char *uripath);
-void Curl_digest_cleanup(struct SessionHandle *data);
void Curl_digest_cleanup_one(struct digestdata *dig);
+#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_CRYPTO_AUTH)
+void Curl_digest_cleanup(struct SessionHandle *data);
+#else
+#define Curl_digest_cleanup(x) do {} while(0)
+#endif
+
#endif
DynaClose();
/* no data to transfer */
- Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
conn->bits.close = TRUE;
return status;
free(list);
}
}
+
+size_t
+Curl_llist_count(struct curl_llist *list)
+{
+ return list->size;
+}
CURLM_STATE_WAITCONNECT, /* awaiting the connect to finalize */
CURLM_STATE_PROTOCONNECT, /* completing the protocol-specific connect
phase */
+ CURLM_STATE_WAITDO, /* wait for our turn to send the request */
CURLM_STATE_DO, /* start send off the request (part 1) */
CURLM_STATE_DOING, /* sending off the request (part 1) */
CURLM_STATE_DO_MORE, /* send off the request (part 2) */
+ CURLM_STATE_DO_DONE, /* done sending off request */
+ CURLM_STATE_WAITPERFORM, /* wait for our turn to read the response */
CURLM_STATE_PERFORM, /* transfer data */
CURLM_STATE_TOOFAST, /* wait because limit-rate exceeded */
CURLM_STATE_DONE, /* post data transfer operation */
CURLM_STATE_COMPLETED, /* operation complete */
+ CURLM_STATE_CANCELLED, /* cancelled */
CURLM_STATE_LAST /* not a true state, never use this */
} CURLMstate;
unsigned int action; /* socket action bitmap */
};
+struct closure {
+ struct closure *next; /* a simple one-way list of structs */
+ struct SessionHandle *easy_handle;
+};
+
struct Curl_one_easy {
/* first, two fields for the linked list of these */
struct Curl_one_easy *next;
the pluralis form, there can be more than one easy handle waiting on the
same actual socket) */
struct curl_hash *sockhash;
+
+ /* Whether pipelining is enabled for this multi handle */
+ bool pipelining_enabled;
+
+ /* shared connection cache */
+ struct conncache *connc;
+
+ /* list of easy handles kept around for doing nice connection closures */
+ struct closure *closure;
};
+static bool multi_conn_using(struct Curl_multi *multi,
+ struct SessionHandle *data);
+
/* always use this function to change state, to make debugging easier */
static void multistate(struct Curl_one_easy *easy, CURLMstate state)
{
"WAITRESOLVE",
"WAITCONNECT",
"PROTOCONNECT",
+ "WAITDO",
"DO",
"DOING",
"DO_MORE",
+ "DO_DONE",
+ "WAITPERFORM",
"PERFORM",
"TOOFAST",
"DONE",
"COMPLETED",
+ "CANCELLED"
};
CURLMstate oldstate = easy->state;
+ int index = -1;
#endif
easy->state = state;
+ if(easy->state > CURLM_STATE_CONNECT &&
+ easy->state < CURLM_STATE_COMPLETED)
+ index = easy->easy_conn->connectindex;
+
#ifdef CURLDEBUG
infof(easy->easy_handle,
- "STATE: %s => %s handle %p: \n",
- statename[oldstate], statename[easy->state], (char *)easy);
+ "STATE: %s => %s handle %p; (connection #%d) \n",
+ statename[oldstate], statename[easy->state],
+ (char *)easy, index);
#endif
if(state == CURLM_STATE_COMPLETED)
/* changing to COMPLETED means there's one less easy handle 'alive' */
return NULL;
}
+ multi->connc = Curl_mk_connc(CONNCACHE_MULTI);
+ if(!multi->connc) {
+ Curl_hash_destroy(multi->hostcache);
+ free(multi);
+ return NULL;
+ }
+
return (CURLM *) multi;
}
if(!GOOD_EASY_HANDLE(easy_handle))
return CURLM_BAD_EASY_HANDLE;
+ /* TODO: add some kind of code that prevents a user from being able to
+ add the same handle more than once! */
+
/* Now, time to add an easy handle to the multi stack */
easy = (struct Curl_one_easy *)calloc(sizeof(struct Curl_one_easy), 1);
if(!easy)
easy->easy_handle = easy_handle;
multistate(easy, CURLM_STATE_INIT);
- /* for multi interface connections, we share DNS cache automaticly if the
+ /* for multi interface connections, we share DNS cache automatically if the
easy handle's one is currently private. */
if (easy->easy_handle->dns.hostcache &&
(easy->easy_handle->dns.hostcachetype == HCACHE_PRIVATE)) {
easy->easy_handle->dns.hostcachetype = HCACHE_MULTI;
}
+ if(easy->easy_handle->state.connc) {
+ if(easy->easy_handle->state.connc->type == CONNCACHE_PRIVATE) {
+ /* kill old private version */
+ Curl_rm_connc(easy->easy_handle->state.connc);
+ /* point out our shared one instead */
+ easy->easy_handle->state.connc = multi->connc;
+ }
+ /* else it is already using multi? */
+ }
+ else
+ /* point out our shared one */
+ easy->easy_handle->state.connc = multi->connc;
+
+ /* Make sure the type is setup correctly */
+ easy->easy_handle->state.connc->type = CONNCACHE_MULTI;
+
+
/* We add this new entry first in the list. We make our 'next' point to the
previous next and our 'prev' point back to the 'first' struct */
easy->next = multi->easy.next;
break;
easy=easy->next;
}
+
if(easy) {
/* If the 'state' is not INIT or COMPLETED, we might need to do something
nice to put the easy_handle in a good known state when this returns. */
alive connections when this is removed */
multi->num_alive--;
+ if (easy->easy_handle->state.is_in_pipeline &&
+ easy->state > CURLM_STATE_DO) {
+ /* If the handle is in a pipeline and has finished sending off its
+ request, we need to remember the fact that we want to remove this
+ handle but do the actual removal at a later time */
+ easy->easy_handle->state.cancelled = TRUE;
+ return CURLM_OK;
+ }
+
/* The timer must be shut down before easy->multi is set to NULL,
else the timenode will remain in the splay tree after
curl_easy_cleanup is called. */
easy->easy_handle->dns.hostcachetype = HCACHE_NONE;
}
- Curl_easy_addmulti(easy->easy_handle, NULL); /* clear the association
- to this multi handle */
-
/* if we have a connection we must call Curl_done() here so that we
don't leave a half-baked one around */
- if(easy->easy_conn)
+ if(easy->easy_conn) {
+ /* Set up the association right */
+ easy->easy_conn->data = easy->easy_handle;
Curl_done(&easy->easy_conn, easy->result);
+ }
+
+ /* If this easy_handle was the last one in charge for one or more
+ connections a the shared connection cache, we might need to keep this
+ handle around until either A) the connection is closed and killed
+ properly, or B) another easy_handle uses the connection.
+
+ The reason why we need to have a easy_handle associated with a live
+ connection is simply that some connections will need a handle to get
+ closed down properly. Currently, the only connections that need to keep
+ a easy_handle handle around are using FTP(S). Such connections have
+ the PROT_CLOSEACTION bit set.
+
+ Thus, we need to check for all connections in the shared cache that
+ points to this handle and are using PROT_CLOSEACTION. If there's any,
+ we need to add this handle to the list of "easy_handls kept around for
+ nice closure".
+ */
+ if(multi_conn_using(multi, easy->easy_handle))
+ /* There's at least one connection using this handle so we must keep
+ this handle around. We also keep the connection cache pointer
+ pointing to the shared one since that will be used on close as
+ well. */
+ easy->easy_handle->state.shared_conn = multi;
+ else
+ if(easy->easy_handle->state.connc->type == CONNCACHE_MULTI)
+ /* if this was using the shared connection cache we clear the pointer
+ to that */
+ easy->easy_handle->state.connc = NULL;
+
+ Curl_easy_addmulti(easy->easy_handle, NULL); /* clear the association
+ to this multi handle */
/* make the previous node point to our next */
if(easy->prev)
return CURLM_BAD_EASY_HANDLE; /* twasn't found */
}
+bool Curl_multi_canPipeline(struct Curl_multi* multi)
+{
+ return multi->pipelining_enabled;
+}
+
static int waitconnect_getsock(struct connectdata *conn,
curl_socket_t *sock,
int numsocks)
of sockets */
int numsocks)
{
+ if (easy->easy_handle->state.pipe_broke) {
+ return 0;
+ }
+
+ if (easy->state > CURLM_STATE_CONNECT &&
+ easy->state < CURLM_STATE_COMPLETED) {
+ /* Set up ownership correctly */
+ easy->easy_conn->data = easy->easy_handle;
+ }
+
switch(easy->state) {
case CURLM_STATE_TOOFAST: /* returns 0, so will not select. */
default:
return domore_getsock(easy->easy_conn, socks, numsocks);
case CURLM_STATE_PERFORM:
+ case CURLM_STATE_WAITPERFORM:
return Curl_single_getsock(easy->easy_conn, socks, numsocks);
}
bool dophase_done;
bool done;
CURLMcode result = CURLM_OK;
+ struct Curl_transfer_keeper *k;
do {
+
+ if (easy->easy_handle->state.pipe_broke) {
+ infof(easy->easy_handle, "Pipe broke: handle 0x%x\n", easy);
+ if(easy->easy_handle->state.is_in_pipeline) {
+ /* Head back to the CONNECT state */
+ multistate(easy, CURLM_STATE_CONNECT);
+ result = CURLM_CALL_MULTI_PERFORM;
+ easy->result = CURLE_OK;
+ } else {
+ easy->result = CURLE_COULDNT_CONNECT;
+ multistate(easy, CURLM_STATE_COMPLETED);
+ }
+
+ easy->easy_handle->state.pipe_broke = FALSE;
+ easy->easy_conn = NULL;
+ break;
+ }
+
+ if (easy->state > CURLM_STATE_CONNECT &&
+ easy->state < CURLM_STATE_COMPLETED) {
+ /* Make sure we set the connection's current owner */
+ easy->easy_conn->data = easy->easy_handle;
+ }
+
if (CURLM_STATE_WAITCONNECT <= easy->state &&
easy->state <= CURLM_STATE_DO &&
easy->easy_handle->change.url_changed) {
Curl_posttransfer(easy->easy_handle);
easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
+ /* We make sure that the pipe broken flag is reset
+ because in this case, it isn't an actual break */
+ easy->easy_handle->state.pipe_broke = FALSE;
if(CURLE_OK == easy->result) {
gotourl = strdup(easy->easy_handle->change.url);
if(gotourl) {
&async, &protocol_connect);
if(CURLE_OK == easy->result) {
+ /* Add this handle to the send pipeline */
+ Curl_addHandleToPipeline(easy->easy_handle,
+ easy->easy_conn->send_pipe);
+
if(async)
/* We're now waiting for an asynchronous name lookup */
multistate(easy, CURLM_STATE_WAITRESOLVE);
else {
/* after the connect has been sent off, go WAITCONNECT unless the
protocol connect is already done and we can go directly to
- DO! */
+ WAITDO! */
result = CURLM_CALL_MULTI_PERFORM;
- if(protocol_connect)
- multistate(easy, CURLM_STATE_DO);
- else
+ if(protocol_connect) {
+ multistate(easy, CURLM_STATE_WAITDO);
+ } else {
multistate(easy, CURLM_STATE_WAITCONNECT);
+ }
}
}
break;
case CURLM_STATE_WAITCONNECT:
/* awaiting a completion of an asynch connect */
- easy->result = Curl_is_connected(easy->easy_conn, FIRSTSOCKET,
+ easy->result = Curl_is_connected(easy->easy_conn,
+ FIRSTSOCKET,
&connected);
if(connected)
easy->result = Curl_protocol_connect(easy->easy_conn,
multistate(easy, CURLM_STATE_PROTOCONNECT);
}
else {
- /* after the connect has completed, go DO */
- multistate(easy, CURLM_STATE_DO);
+ /* after the connect has completed, go WAITDO */
+ multistate(easy, CURLM_STATE_WAITDO);
+
result = CURLM_CALL_MULTI_PERFORM;
}
}
easy->result = Curl_protocol_connecting(easy->easy_conn,
&protocol_connect);
if(protocol_connect) {
- /* after the connect has completed, go DO */
- multistate(easy, CURLM_STATE_DO);
+ /* after the connect has completed, go WAITDO */
+ multistate(easy, CURLM_STATE_WAITDO);
result = CURLM_CALL_MULTI_PERFORM;
}
else if(easy->result) {
}
break;
+ case CURLM_STATE_WAITDO:
+ /* Wait for our turn to DO when we're pipelining requests */
+ infof(easy->easy_handle, "Connection #%d: send pipe size = %d\n",
+ easy->easy_conn->connectindex,
+ easy->easy_conn->send_pipe->size);
+ if (!easy->easy_conn->writechannel_inuse &&
+ Curl_isHandleAtHead(easy->easy_handle,
+ easy->easy_conn->send_pipe)) {
+ /* Grab the channel */
+ easy->easy_conn->writechannel_inuse = TRUE;
+ multistate(easy, CURLM_STATE_DO);
+ result = CURLM_CALL_MULTI_PERFORM;
+ }
+ break;
+
case CURLM_STATE_DO:
if(easy->easy_handle->set.connect_only) {
/* keep connection open for application to use the socket */
}
else {
/* Perform the protocol's DO action */
- easy->result = Curl_do(&easy->easy_conn, &dophase_done);
+ easy->result = Curl_do(&easy->easy_conn,
+ &dophase_done);
if(CURLE_OK == easy->result) {
result = CURLM_OK;
}
- /* after DO, go PERFORM... or DO_MORE */
+ /* after DO, go DO_DONE... or DO_MORE */
else if(easy->easy_conn->bits.do_more) {
/* we're supposed to do more, but we need to sit down, relax
and wait a little while first */
result = CURLM_OK;
}
else {
- /* we're done with the DO, now PERFORM */
+ /* we're done with the DO, now DO_DONE */
easy->result = Curl_readwrite_init(easy->easy_conn);
if(CURLE_OK == easy->result) {
- multistate(easy, CURLM_STATE_PERFORM);
+ multistate(easy, CURLM_STATE_DO_DONE);
result = CURLM_CALL_MULTI_PERFORM;
}
}
case CURLM_STATE_DOING:
/* we continue DOING until the DO phase is complete */
- easy->result = Curl_protocol_doing(easy->easy_conn, &dophase_done);
+ easy->result = Curl_protocol_doing(easy->easy_conn,
+ &dophase_done);
if(CURLE_OK == easy->result) {
if(dophase_done) {
/* after DO, go PERFORM... or DO_MORE */
result = CURLM_OK;
}
else {
- /* we're done with the DO, now PERFORM */
+ /* we're done with the DO, now DO_DONE */
easy->result = Curl_readwrite_init(easy->easy_conn);
if(CURLE_OK == easy->result) {
- multistate(easy, CURLM_STATE_PERFORM);
+ multistate(easy, CURLM_STATE_DO_DONE);
result = CURLM_CALL_MULTI_PERFORM;
}
}
case CURLM_STATE_DO_MORE:
/* Ready to do more? */
- easy->result = Curl_is_connected(easy->easy_conn, SECONDARYSOCKET,
+ easy->result = Curl_is_connected(easy->easy_conn,
+ SECONDARYSOCKET,
&connected);
if(connected) {
/*
- * When we are connected, DO MORE and then go PERFORM
+ * When we are connected, DO MORE and then go DO_DONE
*/
easy->result = Curl_do_more(easy->easy_conn);
easy->result = Curl_readwrite_init(easy->easy_conn);
if(CURLE_OK == easy->result) {
- multistate(easy, CURLM_STATE_PERFORM);
+ multistate(easy, CURLM_STATE_DO_DONE);
result = CURLM_CALL_MULTI_PERFORM;
}
}
break;
+ case CURLM_STATE_DO_DONE:
+ /* Remove ourselves from the send pipeline */
+ Curl_removeHandleFromPipeline(easy->easy_handle,
+ easy->easy_conn->send_pipe);
+ /* Add ourselves to the recv pipeline */
+ Curl_addHandleToPipeline(easy->easy_handle,
+ easy->easy_conn->recv_pipe);
+ multistate(easy, CURLM_STATE_WAITPERFORM);
+ result = CURLM_CALL_MULTI_PERFORM;
+ break;
+
+ case CURLM_STATE_WAITPERFORM:
+ infof(easy->easy_handle, "Connection #%d: recv pipe size = %d\n",
+ easy->easy_conn->connectindex,
+ easy->easy_conn->recv_pipe->size);
+ /* Wait for our turn to PERFORM */
+ if (!easy->easy_conn->readchannel_inuse &&
+ Curl_isHandleAtHead(easy->easy_handle,
+ easy->easy_conn->recv_pipe)) {
+ /* Grab the channel */
+ easy->easy_conn->readchannel_inuse = TRUE;
+ multistate(easy, CURLM_STATE_PERFORM);
+ result = CURLM_CALL_MULTI_PERFORM;
+ }
+ break;
+
case CURLM_STATE_TOOFAST: /* limit-rate exceeded in either direction */
/* if both rates are within spec, resume transfer */
Curl_pgrsUpdate(easy->easy_conn);
( easy->easy_handle->progress.dlspeed <
easy->easy_handle->set.max_recv_speed ) )
)
- multistate(easy, CURLM_STATE_PERFORM);
-
+ multistate(easy, CURLM_STATE_PERFORM);
break;
case CURLM_STATE_PERFORM:
-
/* check if over speed */
if ( ( ( easy->easy_handle->set.max_send_speed > 0 ) &&
( easy->easy_handle->progress.ulspeed >
/* read/write data if it is ready to do so */
easy->result = Curl_readwrite(easy->easy_conn, &done);
+ k = &easy->easy_handle->reqdata.keep;
+
+ if (!(k->keepon & KEEP_READ)) {
+ /* We're done reading */
+ easy->easy_conn->readchannel_inuse = FALSE;
+ }
+
+ if (!(k->keepon & KEEP_WRITE)) {
+ /* We're done writing */
+ easy->easy_conn->writechannel_inuse = FALSE;
+ }
+
if(easy->result) {
/* The transfer phase returned error, we mark the connection to get
* closed to prevent being re-used. This is becasue we can't
Curl_posttransfer(easy->easy_handle);
Curl_done(&easy->easy_conn, easy->result);
}
-
else if(TRUE == done) {
char *newurl;
bool retry = Curl_retry_request(easy->easy_conn, &newurl);
/* call this even if the readwrite function returned error */
Curl_posttransfer(easy->easy_handle);
+ if (retry) {
+ Curl_removeHandleFromPipeline(easy->easy_handle,
+ easy->easy_conn->recv_pipe);
+ }
+
/* When we follow redirects, must to go back to the CONNECT state */
- if(easy->easy_conn->newurl || retry) {
+ if(easy->easy_handle->reqdata.newurl || retry) {
if(!retry) {
/* if the URL is a follow-location and not just a retried request
then figure out the URL here */
- newurl = easy->easy_conn->newurl;
- easy->easy_conn->newurl = NULL;
+ newurl = easy->easy_handle->reqdata.newurl;
+ easy->easy_handle->reqdata.newurl = NULL;
}
easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
if(easy->result == CURLE_OK)
result = CURLM_CALL_MULTI_PERFORM;
}
}
+
break;
case CURLM_STATE_DONE:
- /* post-transfer command */
- easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
+ /* Remove ourselves from the receive pipeline */
+ Curl_removeHandleFromPipeline(easy->easy_handle,
+ easy->easy_conn->recv_pipe);
+ easy->easy_handle->state.is_in_pipeline = FALSE;
+
+ if (easy->easy_conn->bits.stream_was_rewound) {
+ /* This request read past its response boundary so we quickly
+ let the other requests consume those bytes since there is no
+ guarantee that the socket will become active again */
+ result = CURLM_CALL_MULTI_PERFORM;
+ }
+
+ if (!easy->easy_handle->state.cancelled) {
+ /* post-transfer command */
+ easy->result = Curl_done(&easy->easy_conn, CURLE_OK);
+
+ /* after we have DONE what we're supposed to do, go COMPLETED, and
+ it doesn't matter what the Curl_done() returned! */
+ multistate(easy, CURLM_STATE_COMPLETED);
+ }
- /* after we have DONE what we're supposed to do, go COMPLETED, and
- it doesn't matter what the Curl_done() returned! */
- multistate(easy, CURLM_STATE_COMPLETED);
break;
case CURLM_STATE_COMPLETED:
+ if (easy->easy_handle->state.cancelled) {
+ /* Go into the CANCELLED state if we were cancelled */
+ multistate(easy, CURLM_STATE_CANCELLED);
+ }
+
/* this is a completed transfer, it is likely to still be connected */
/* This node should be delinked from the list now and we should post
an information message that we are complete. */
break;
+
+ case CURLM_STATE_CANCELLED:
+ /* Cancelled transfer, wait to be cleaned up */
+ break;
+
default:
return CURLM_INTERNAL_ERROR;
}
if(CURLE_OK != easy->result) {
/*
* If an error was returned, and we aren't in completed state now,
- * then we go to completed and consider this transfer aborted. */
+ * then we go to completed and consider this transfer aborted.
+ */
multistate(easy, CURLM_STATE_COMPLETED);
}
}
msg->extmsg.msg = CURLMSG_DONE;
msg->extmsg.easy_handle = easy->easy_handle;
msg->extmsg.data.result = easy->result;
- msg->next=NULL;
+ msg->next = NULL;
easy->msg = msg;
easy->msg_num = 1; /* there is one unread message here */
easy=multi->easy.next;
while(easy) {
- CURLMcode result = multi_runsingle(multi, easy);
+ CURLMcode result;
+
+ if (easy->easy_handle->state.cancelled &&
+ easy->state == CURLM_STATE_CANCELLED) {
+ /* Remove cancelled handles once it's safe to do so */
+ easy = easy->next;
+ Curl_multi_rmeasy(multi_handle, easy->easy_handle);
+ continue;
+ }
+
+ result = multi_runsingle(multi, easy);
if(result)
returncode = result;
int key = now.tv_sec; /* drop the usec part */
multi->timetree = Curl_splaygetbest(key, multi->timetree, &t);
-
if (t) {
struct SessionHandle *d = t->payload;
struct timeval* tv = &d->state.expiretime;
curl_multi_remove_handle(multi_handle, easy_handle);
}
+
CURLMcode curl_multi_cleanup(CURLM *multi_handle)
{
struct Curl_multi *multi=(struct Curl_multi *)multi_handle;
struct Curl_one_easy *easy;
struct Curl_one_easy *nexteasy;
+ int i;
+ struct closure *cl;
+ struct closure *n;
if(GOOD_MULTI_HANDLE(multi)) {
multi->type = 0; /* not good anymore */
Curl_hash_destroy(multi->hostcache);
Curl_hash_destroy(multi->sockhash);
+#if 1
+ /* go over all connections that have close actions */
+ for(i=0; i< multi->connc->num; i++) {
+ if(multi->connc->connects[i] &&
+ multi->connc->connects[i]->protocol & PROT_CLOSEACTION)
+ Curl_disconnect(multi->connc->connects[i]);
+ }
+ /* now walk through the list of handles we kept around only to be
+ able to close connections "properly" */
+ cl = multi->closure;
+ while(cl) {
+ cl->easy_handle->state.shared_conn = NULL; /* no more shared */
+ Curl_close(cl->easy_handle); /* close handle */
+ n = cl->next;
+ free(cl);
+ cl= n;
+ }
+#endif
+
+ Curl_rm_connc(multi->connc);
+
/* remove all easy handles */
easy = multi->easy.next;
while(easy) {
easy->easy_handle->dns.hostcache = NULL;
easy->easy_handle->dns.hostcachetype = HCACHE_NONE;
}
+
+ /* Clear the pointer to the connection cache */
+ easy->easy_handle->state.connc = NULL;
+
Curl_easy_addmulti(easy->easy_handle, NULL); /* clear the association */
if (easy->msg)
case CURLMOPT_SOCKETDATA:
multi->socket_userp = va_arg(param, void *);
break;
+ case CURLMOPT_PIPELINING:
+ multi->pipelining_enabled = va_arg(param, long);
+ break;
default:
res = CURLM_UNKNOWN_OPTION;
}
return CURLM_OK;
}
+
+static bool multi_conn_using(struct Curl_multi *multi,
+ struct SessionHandle *data)
+{
+ /* any live CLOSEACTION-connections pointing to the give 'data' ? */
+ int i;
+
+ for(i=0; i< multi->connc->num; i++) {
+ if(multi->connc->connects[i] &&
+ (multi->connc->connects[i]->data == data) &&
+ multi->connc->connects[i]->protocol & PROT_CLOSEACTION)
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/* add the given data pointer to the list of 'closure handles' that are
+ kept around only to be able to close some connections nicely */
+void Curl_multi_add_closure(struct Curl_multi *multi,
+ struct SessionHandle *data)
+{
+ int i;
+ struct closure *cl = (struct closure *)calloc(sizeof(struct closure), 1);
+ struct closure *p=NULL;
+ struct closure *n;
+ if(cl) {
+ cl->easy_handle = data;
+ cl->next = multi->closure;
+ multi->closure = cl;
+ }
+
+ p = multi->closure;
+ cl = p->next; /* start immediately on the second since the first is the one
+ we just added and it is _very_ likely to actually exist
+ used in the cache since that's the whole purpose of adding
+ it to this list! */
+
+ /* When adding, scan through all the other currently kept handles and see if
+ there are any connections still referring to them and kill them if not. */
+ while(cl) {
+ bool inuse = FALSE;
+ for(i=0; i< multi->connc->num; i++) {
+ if(multi->connc->connects[i] &&
+ (multi->connc->connects[i]->data == cl->easy_handle)) {
+ inuse = TRUE;
+ break;
+ }
+ }
+
+ n = cl->next;
+
+ if(!inuse) {
+ /* cl->easy_handle is now killable */
+ infof(data, "Delayed kill of easy handle %p\n", cl->easy_handle);
+ /* unmark it as not having a connection around that uses it anymore */
+ cl->easy_handle->state.shared_conn= NULL;
+ Curl_close(cl->easy_handle);
+ if(p)
+ p->next = n;
+ else
+ multi->closure = n;
+ free(cl);
+ }
+ else
+ p = cl;
+
+ cl = n;
+ }
+
+}
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2005, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
void Curl_multi_rmeasy(void *multi, CURL *data);
+bool Curl_multi_canPipeline(struct Curl_multi* multi);
+
+void Curl_multi_add_closure(struct Curl_multi *multi,
+ struct SessionHandle *data);
+
/* the write bits start at bit 16 for the *getsock() bitmap */
#define GETSOCK_WRITEBITSTART 16
even when not displayed! */
else if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
if (!data->progress.callback) {
- if(conn->resume_from)
+ if(data->reqdata.resume_from)
fprintf(data->set.err,
"** Resuming transfer from byte position %" FORMAT_OFF_T
"\n",
- conn->resume_from);
+ data->reqdata.resume_from);
fprintf(data->set.err,
" %% Total %% Received %% Xferd Average Speed Time Time Time Current\n"
" Dload Upload Total Spent Left Speed\n");
struct SessionHandle *data = conn->data;
size_t wrote;
+ if (data->state.cancelled) {
+ /* We just suck everything into a black hole */
+ return CURLE_OK;
+ }
+
if(0 == len)
len = strlen(ptr);
if(type & CLIENTWRITE_BODY) {
- if((conn->protocol&PROT_FTP) && conn->proto.ftp->transfertype == 'A') {
+ if((conn->protocol&PROT_FTP) && conn->proto.ftpc.transfertype == 'A') {
#ifdef CURL_DOES_CONVERSIONS
/* convert from the network encoding */
size_t rc;
return CURLE_OK;
}
+void Curl_read_rewind(struct connectdata *conn,
+ size_t extraBytesRead)
+{
+ conn->read_pos -= extraBytesRead;
+ conn->bits.stream_was_rewound = TRUE;
+}
+
+#define MIN(a,b) (a < b ? a : b)
+
/*
* Internal read-from-socket function. This is meant to deal with plain
* sockets, SSL sockets and kerberos sockets.
ssize_t *n) /* amount bytes read */
{
ssize_t nread;
+ size_t bytestocopy = MIN(conn->buf_len - conn->read_pos, buffersize);
+ size_t bytesremaining = buffersize - bytestocopy;
/* Set 'num' to 0 or 1, depending on which socket that has been sent here.
If it is the second socket, we set num to 1. Otherwise to 0. This lets
*n=0; /* reset amount to zero */
+ bytesremaining = MIN(bytesremaining, sizeof(conn->master_buffer));
+
+ /* Copy from our master buffer first */
+ memcpy(buf, conn->master_buffer + conn->read_pos, bytestocopy);
+ conn->read_pos += bytestocopy;
+
+ conn->bits.stream_was_rewound = FALSE;
+
+ *n = bytestocopy;
+
+ if (bytesremaining == 0) {
+ return CURLE_OK;
+ }
+
if(conn->ssl[num].use) {
- nread = Curl_ssl_recv(conn, num, buf, buffersize);
+ nread = Curl_ssl_recv(conn, num, conn->master_buffer, bytesremaining);
- if(nread == -1)
+ if(nread == -1 && bytestocopy == 0) {
return -1; /* -1 from Curl_ssl_recv() means EWOULDBLOCK */
- }
- else {
- *n=0; /* reset amount to zero */
+ }
+
+ } else {
if(conn->sec_complete)
- nread = Curl_sec_read(conn, sockfd, buf, buffersize);
+ nread = Curl_sec_read(conn, sockfd, conn->master_buffer, bytesremaining);
else
- nread = sread(sockfd, buf, buffersize);
+ nread = sread(sockfd, conn->master_buffer, bytesremaining);
- if(-1 == nread) {
+ if(-1 == nread && bytestocopy == 0) {
int err = Curl_sockerrno();
#ifdef WIN32
if(WSAEWOULDBLOCK == err)
return -1;
}
}
- *n = nread;
+
+ if (nread > 0) {
+ memcpy(buf, conn->master_buffer, nread);
+
+ conn->buf_len = nread;
+ conn->read_pos = nread;
+ *n += nread;
+ }
+
return CURLE_OK;
}
CURLcode Curl_client_write(struct connectdata *conn, int type, char *ptr,
size_t len);
+void Curl_read_rewind(struct connectdata *conn,
+ size_t extraBytesRead);
+
/* internal read-function, does plain socket, SSL and krb4 */
int Curl_read(struct connectdata *conn, curl_socket_t sockfd,
char *buf, size_t buffersize,
if(!tn)
return CURLE_OUT_OF_MEMORY;
- conn->proto.telnet = (void *)tn; /* make us known */
+ conn->data->reqdata.proto.telnet = (void *)tn; /* make us known */
tn->telrcv_state = CURL_TS_DATA;
static void negotiate(struct connectdata *conn)
{
int i;
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *) conn->data->reqdata.proto.telnet;
for(i = 0;i < CURL_NTELOPTS;i++)
{
static
void set_remote_option(struct connectdata *conn, int option, int newstate)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
if(newstate == CURL_YES)
{
switch(tn->him[option])
static
void rec_will(struct connectdata *conn, int option)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
switch(tn->him[option])
{
case CURL_NO:
static
void rec_wont(struct connectdata *conn, int option)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
switch(tn->him[option])
{
case CURL_NO:
static void
set_local_option(struct connectdata *conn, int option, int newstate)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
if(newstate == CURL_YES)
{
switch(tn->us[option])
static
void rec_do(struct connectdata *conn, int option)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
switch(tn->us[option])
{
case CURL_NO:
static
void rec_dont(struct connectdata *conn, int option)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
switch(tn->us[option])
{
case CURL_NO:
char option_arg[256];
char *buf;
struct SessionHandle *data = conn->data;
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
/* Add the user name as an environment variable if it
was given on the command line */
char varname[128];
char varval[128];
struct SessionHandle *data = conn->data;
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)data->reqdata.proto.telnet;
printsub(data, '<', (unsigned char *)tn->subbuffer, CURL_SB_LEN(tn)+2);
switch (CURL_SB_GET(tn)) {
unsigned char c;
int in = 0;
struct SessionHandle *data = conn->data;
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)data->reqdata.proto.telnet;
while(count--)
{
CURLcode Curl_telnet_done(struct connectdata *conn, CURLcode status)
{
- struct TELNET *tn = (struct TELNET *)conn->proto.telnet;
+ struct TELNET *tn = (struct TELNET *)conn->data->reqdata.proto.telnet;
(void)status; /* unused */
curl_slist_free_all(tn->telnet_vars);
- free(conn->proto.telnet);
- conn->proto.telnet = NULL;
+ free(conn->data->reqdata.proto.telnet);
+ conn->data->reqdata.proto.telnet = NULL;
return CURLE_OK;
}
if(code)
return code;
- tn = (struct TELNET *)conn->proto.telnet;
+ tn = (struct TELNET *)data->reqdata.proto.telnet;
code = check_telnet_options(conn);
if(code)
}
#endif
/* mark this as "no further transfer wanted" */
- Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
return code;
}
/* As RFC3617 describes the separator slash is not actually part of the file
name so we skip the always-present first letter of the path string. */
- char *filename = &state->conn->path[1];
+ char *filename = &state->conn->data->reqdata.path[1];
struct SessionHandle *data = state->conn->data;
CURLcode res = CURLE_OK;
/* If we are uploading, send an WRQ */
setpacketevent(&state->spacket, TFTP_EVENT_WRQ);
filename = curl_easy_unescape(data, filename, 0, NULL);
- state->conn->upload_fromhere = (char *)&state->spacket.data[4];
+ state->conn->data->reqdata.upload_fromhere = (char *)&state->spacket.data[4];
if(data->set.infilesize != -1)
Curl_pgrsSetUploadSize(data, data->set.infilesize);
}
tftp_state_data_t *state;
int rc;
- state = conn->proto.tftp = calloc(sizeof(tftp_state_data_t), 1);
+ state = conn->data->reqdata.proto.tftp = calloc(sizeof(tftp_state_data_t), 1);
if(!state)
return CURLE_OUT_OF_MEMORY;
{
(void)status; /* unused */
- free(conn->proto.tftp);
- conn->proto.tftp = NULL;
+ free(conn->data->reqdata.proto.tftp);
+ conn->data->reqdata.proto.tftp = NULL;
Curl_pgrsDone(conn);
return CURLE_OK;
CURLcode Curl_tftp(struct connectdata *conn, bool *done)
{
struct SessionHandle *data = conn->data;
- tftp_state_data_t *state = (tftp_state_data_t *)(conn->proto.tftp);
+ tftp_state_data_t *state = (tftp_state_data_t *)(conn->data->reqdata.proto.tftp);
tftp_event_t event;
CURLcode code;
int rc;
}
/* Tell curl we're done */
- code = Curl_Transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
+ code = Curl_setup_transfer(conn, -1, -1, FALSE, NULL, -1, NULL);
if(code)
return code;
#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
-enum {
- KEEP_NONE,
- KEEP_READ,
- KEEP_WRITE
-};
-
/*
* This function will call the read callback to fill our buffer with data
* to upload.
if(conn->bits.upload_chunky) {
/* if chunked Transfer-Encoding */
buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
- conn->upload_fromhere += 10; /* 32bit hex + CRLF */
+ data->reqdata.upload_fromhere += 10; /* 32bit hex + CRLF */
}
/* this function returns a size_t, so we typecast to int to prevent warnings
with picky compilers */
- nread = (int)conn->fread(conn->upload_fromhere, 1,
+ nread = (int)conn->fread(data->reqdata.upload_fromhere, 1,
buffersize, conn->fread_in);
if(nread == CURL_READFUNC_ABORT) {
int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
"%x\r\n", nread);
/* move buffer pointer */
- conn->upload_fromhere -= hexlen;
+ data->reqdata.upload_fromhere -= hexlen;
nread += hexlen;
/* copy the prefix to the buffer */
- memcpy(conn->upload_fromhere, hexbuffer, hexlen);
+ memcpy(data->reqdata.upload_fromhere, hexbuffer, hexlen);
/* always append CRLF to the data */
- memcpy(conn->upload_fromhere + nread, "\r\n", 2);
+ memcpy(data->reqdata.upload_fromhere + nread, "\r\n", 2);
if((nread - hexlen) == 0) {
/* mark this as done once this chunk is transfered */
- conn->keep.upload_done = TRUE;
+ data->reqdata.keep.upload_done = TRUE;
}
nread+=2; /* for the added CRLF */
#ifdef CURL_DOES_CONVERSIONS
if(data->set.prefer_ascii) {
CURLcode res;
- res = Curl_convert_to_network(data, conn->upload_fromhere, nread);
+ res = Curl_convert_to_network(data, data->reqdata.upload_fromhere, nread);
/* Curl_convert_to_network calls failf if unsuccessful */
if(res != CURLE_OK) {
return(res);
CURLcode Curl_readwrite(struct connectdata *conn,
bool *done)
{
- struct Curl_transfer_keeper *k = &conn->keep;
struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
CURLcode result;
ssize_t nread; /* number of bytes read */
int didwhat=0;
}
do {
- /* If we still have reading to do, we check if we have a readable
- socket. */
- if((k->keepon & KEEP_READ) && (select_res & CSELECT_IN)) {
-
+ /* We go ahead and do a read if we have a readable socket or if
+ the stream was rewound (in which case we have data in a
+ buffer) */
+ if((k->keepon & KEEP_READ) &&
+ ((select_res & CSELECT_IN) || conn->bits.stream_was_rewound)) {
+ /* read */
bool is_empty_data = FALSE;
/* This is where we loop until we have read everything there is to
read or we get a EWOULDBLOCK */
do {
size_t buffersize = data->set.buffer_size?
- data->set.buffer_size:BUFSIZE;
+ data->set.buffer_size : BUFSIZE;
+ size_t bytestoread = buffersize;
+ int readrc;
+
+ if (k->size != -1 && !k->header)
+ bytestoread = k->size - k->bytecount;
/* receive data from the network! */
- int readrc = Curl_read(conn, conn->sockfd, k->buf, buffersize, &nread);
+ readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
/* subzero, this would've blocked */
- if(0>readrc)
+ if(0 > readrc)
break; /* get out of loop */
/* get the CURLcode from the int */
is_empty_data = (nread == 0 && k->bodywrites == 0);
/* NULL terminate, allowing string ops to be used */
- if (0 < nread || is_empty_data)
+ if (0 < nread || is_empty_data) {
k->buf[nread] = 0;
-
- /* if we receive 0 or less here, the server closed the connection and
- we bail out from this! */
+ }
else if (0 >= nread) {
+ /* if we receive 0 or less here, the server closed the connection
+ and we bail out from this! */
+
k->keepon &= ~KEEP_READ;
break;
}
return result;
data->info.header_size += (long)headerlen;
- conn->headerbytecount += (long)headerlen;
+ k->headerbytecount += (long)headerlen;
- conn->deductheadercount =
- (100 == k->httpcode)?conn->headerbytecount:0;
+ k->deductheadercount =
+ (100 == k->httpcode)?k->headerbytecount:0;
- if (conn->resume_from &&
+ if (data->reqdata.resume_from &&
(data->set.httpreq==HTTPREQ_GET) &&
(k->httpcode == 416)) {
/* "Requested Range Not Satisfiable" */
using chunked Transfer-Encoding.
*/
if(conn->bits.chunk)
- conn->size=-1;
+ k->size=-1;
}
- if(-1 != conn->size) {
+ if(-1 != k->size) {
/* We do this operation even if no_body is true, since this
data might be retrieved later with curl_easy_getinfo()
and its CURLINFO_CONTENT_LENGTH_DOWNLOAD option. */
- Curl_pgrsSetDownloadSize(data, conn->size);
- conn->maxdownload = conn->size;
+ Curl_pgrsSetDownloadSize(data, k->size);
+ k->maxdownload = k->size;
}
/* If max download size is *zero* (nothing) we already
have nothing and can safely return ok now! */
- if(0 == conn->maxdownload)
+ if(0 == k->maxdownload)
stop_reading = TRUE;
if(stop_reading) {
(k->httpcode != 401) &&
(k->httpcode != 407)) {
- if (conn->resume_from &&
+ if (data->reqdata.resume_from &&
(data->set.httpreq==HTTPREQ_GET) &&
(k->httpcode == 416)) {
/* "Requested Range Not Satisfiable", just proceed and
* MUST NOT contain a message-body, and thus is always
* terminated by the first empty line after the header
* fields. */
- conn->size=0;
- conn->maxdownload=0;
+ k->size=0;
+ k->maxdownload=0;
k->ignorecl = TRUE; /* ignore Content-Length headers */
break;
default:
return CURLE_FILESIZE_EXCEEDED;
}
if(contentlength >= 0)
- conn->size = contentlength;
+ k->size = contentlength;
else {
/* Negative Content-Length is really odd, and we know it
happens for example when older Apache servers send large
k->offset = curlx_strtoofft(ptr, NULL, 10);
- if (conn->resume_from == k->offset)
+ if (data->reqdata.resume_from == k->offset)
/* we asked for a resume and we got it */
k->content_range = TRUE;
}
here, or else use real peer host name. */
conn->allocptr.cookiehost?
conn->allocptr.cookiehost:conn->host.name,
- conn->path);
+ data->reqdata.path);
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
}
#endif
backup = *ptr; /* store the ending letter */
if(ptr != start) {
*ptr = '\0'; /* zero terminate */
- conn->newurl = strdup(start); /* clone string */
+ data->reqdata.newurl = strdup(start); /* clone string */
*ptr = backup; /* restore ending letter */
- if(!conn->newurl)
+ if(!data->reqdata.newurl)
return CURLE_OUT_OF_MEMORY;
}
}
return result;
data->info.header_size += (long)k->hbuflen;
- conn->headerbytecount += (long)k->hbuflen;
+ k->headerbytecount += (long)k->hbuflen;
/* reset hbufp pointer && hbuflen */
k->hbufp = data->state.headerbuff;
if(conn->protocol&PROT_HTTP) {
/* HTTP-only checks */
- if (conn->newurl) {
+ if (data->reqdata.newurl) {
if(conn->bits.close) {
/* Abort after the headers if "follow Location" is set
and we're set to close anyway. */
k->ignorebody = TRUE;
infof(data, "Ignoring the response-body\n");
}
- if (conn->resume_from && !k->content_range &&
+ if (data->reqdata.resume_from && !k->content_range &&
(data->set.httpreq==HTTPREQ_GET) &&
!k->ignorebody) {
/* we wanted to resume a download, although the server doesn't
return CURLE_HTTP_RANGE_ERROR;
}
- if(data->set.timecondition && !conn->range) {
+ if(data->set.timecondition && !data->reqdata.range) {
/* A time condition has been set AND no ranges have been
requested. This seems to be what chapter 13.3.4 of
RFC 2616 defines to be the correct action for a
}
#endif /* CURL_DISABLE_HTTP */
- if((-1 != conn->maxdownload) &&
- (k->bytecount + nread >= conn->maxdownload)) {
- nread = (ssize_t) (conn->maxdownload - k->bytecount);
+ if((-1 != k->maxdownload) &&
+ (k->bytecount + nread >= k->maxdownload)) {
+ size_t excess = k->bytecount + nread - k->maxdownload;
+
+ if (excess > 0) {
+ infof(data, "Rewinding stream by : %d bytes\n", excess);
+ Curl_read_rewind(conn, excess);
+ conn->bits.stream_was_rewound = TRUE;
+ }
+
+ nread = (ssize_t) (k->maxdownload - k->bytecount);
if(nread < 0 ) /* this should be unusual */
nread = 0;
/* only read more data if there's no upload data already
present in the upload buffer */
- if(0 == conn->upload_present) {
+ if(0 == data->reqdata.upload_present) {
/* init the "upload from here" pointer */
- conn->upload_fromhere = k->uploadbuf;
+ data->reqdata.upload_fromhere = k->uploadbuf;
if(!k->upload_done) {
/* HTTP pollution, this should be written nicer to become more
int fillcount;
if(k->wait100_after_headers &&
- (conn->proto.http->sending == HTTPSEND_BODY)) {
+ (data->reqdata.proto.http->sending == HTTPSEND_BODY)) {
/* If this call is to send body data, we must take some action:
We have sent off the full HTTP 1.1 request, and we shall now
go into the Expect: 100 state and await such a header */
}
/* store number of bytes available for upload */
- conn->upload_present = nread;
+ data->reqdata.upload_present = nread;
/* convert LF to CRLF if so asked */
#ifdef CURL_DO_LINEEND_CONV
* must be used instead of the escape sequences \r & \n.
*/
for(i = 0, si = 0; i < nread; i++, si++) {
- if (conn->upload_fromhere[i] == 0x0a) {
+ if (data->reqdata.upload_fromhere[i] == 0x0a) {
data->state.scratch[si++] = 0x0d;
data->state.scratch[si] = 0x0a;
if (!data->set.crlf) {
}
}
else
- data->state.scratch[si] = conn->upload_fromhere[i];
+ data->state.scratch[si] = data->reqdata.upload_fromhere[i];
}
if(si != nread) {
/* only perform the special operation if we really did replace
nread = si;
/* upload from the new (replaced) buffer instead */
- conn->upload_fromhere = data->state.scratch;
+ data->reqdata.upload_fromhere = data->state.scratch;
/* set the new amount too */
- conn->upload_present = nread;
+ data->reqdata.upload_present = nread;
}
}
}
/* write to socket (send away data) */
result = Curl_write(conn,
conn->writesockfd, /* socket to send to */
- conn->upload_fromhere, /* buffer pointer */
- conn->upload_present, /* buffer size */
+ data->reqdata.upload_fromhere, /* buffer pointer */
+ data->reqdata.upload_present, /* buffer size */
&bytes_written); /* actually send away */
if(result)
return result;
if(data->set.verbose)
/* show the data before we change the pointer upload_fromhere */
- Curl_debug(data, CURLINFO_DATA_OUT, conn->upload_fromhere,
+ Curl_debug(data, CURLINFO_DATA_OUT, data->reqdata.upload_fromhere,
bytes_written, conn);
- if(conn->upload_present != bytes_written) {
+ if(data->reqdata.upload_present != bytes_written) {
/* we only wrote a part of the buffer (if anything), deal with it! */
/* store the amount of bytes left in the buffer to write */
- conn->upload_present -= bytes_written;
+ data->reqdata.upload_present -= bytes_written;
/* advance the pointer where to find the buffer when the next send
is to happen */
- conn->upload_fromhere += bytes_written;
+ data->reqdata.upload_fromhere += bytes_written;
writedone = TRUE; /* we are done, stop the loop */
}
else {
/* we've uploaded that buffer now */
- conn->upload_fromhere = k->uploadbuf;
- conn->upload_present = 0; /* no more bytes left */
+ data->reqdata.upload_fromhere = k->uploadbuf;
+ data->reqdata.upload_present = 0; /* no more bytes left */
if(k->upload_done) {
/* switch off writing, we're done! */
k->now = Curl_tvnow();
if(didwhat) {
/* Update read/write counters */
- if(conn->bytecountp)
- *conn->bytecountp = k->bytecount; /* read count */
- if(conn->writebytecountp)
- *conn->writebytecountp = k->writebytecount; /* write count */
+ if(k->bytecountp)
+ *k->bytecountp = k->bytecount; /* read count */
+ if(k->writebytecountp)
+ *k->writebytecountp = k->writebytecount; /* write count */
}
else {
/* no read no write, this is a timeout? */
if (data->set.timeout &&
((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
- if (conn->size != -1) {
+ if (k->size != -1) {
failf(data, "Operation timed out after %d seconds with %"
FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
- data->set.timeout, k->bytecount, conn->size);
+ data->set.timeout, k->bytecount, k->size);
} else {
failf(data, "Operation timed out after %d seconds with %"
FORMAT_OFF_T " bytes received",
* returning.
*/
- if(!(conn->bits.no_body) && (conn->size != -1) &&
- (k->bytecount != conn->size) &&
+ if(!(conn->bits.no_body) && (k->size != -1) &&
+ (k->bytecount != k->size) &&
#ifdef CURL_DO_LINEEND_CONV
/* Most FTP servers don't adjust their file SIZE response for CRLFs,
so we'll check to see if the discrepancy can be explained
by the number of CRLFs we've changed to LFs.
*/
- (k->bytecount != (conn->size + data->state.crlf_conversions)) &&
+ (k->bytecount != (k->size + data->state.crlf_conversions)) &&
#endif /* CURL_DO_LINEEND_CONV */
- !conn->newurl) {
+ !data->reqdata.newurl) {
failf(data, "transfer closed with %" FORMAT_OFF_T
" bytes remaining to read",
- conn->size - k->bytecount);
+ k->size - k->bytecount);
return CURLE_PARTIAL_FILE;
}
else if(!(conn->bits.no_body) &&
conn->bits.chunk &&
- (conn->proto.http->chunk.state != CHUNK_STOP)) {
+ (data->reqdata.proto.http->chunk.state != CHUNK_STOP)) {
/*
* In chunked mode, return an error if the connection is closed prior to
* the empty (terminiating) chunk is read.
/*
- * Curl_readwrite_init() inits the readwrite session.
+ * Curl_readwrite_init() inits the readwrite session. This is inited each time for a
+ * transfer, sometimes multiple times on the same SessionHandle
*/
CURLcode Curl_readwrite_init(struct connectdata *conn)
{
struct SessionHandle *data = conn->data;
- struct Curl_transfer_keeper *k = &conn->keep;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
/* NB: the content encoding software depends on this initialization of
- Curl_transfer_keeper. */
+ Curl_transfer_keeper.*/
memset(k, 0, sizeof(struct Curl_transfer_keeper));
k->start = Curl_tvnow(); /* start time */
k->header = TRUE; /* assume header */
k->httpversion = -1; /* unknown at this point */
- data = conn->data; /* there's the root struct */
+ k->size = data->reqdata.size;
+ k->maxdownload = data->reqdata.maxdownload;
+ k->bytecountp = data->reqdata.bytecountp;
+ k->writebytecountp = data->reqdata.writebytecountp;
+
+ k->bytecount = 0;
+ k->headerbytecount = 0;
+
k->buf = data->state.buffer;
k->uploadbuf = data->state.uploadbuffer;
k->maxfd = (conn->sockfd>conn->writesockfd?
if (!conn->bits.getheader) {
k->header = FALSE;
- if(conn->size > 0)
- Curl_pgrsSetDownloadSize(data, conn->size);
+ if(k->size > 0)
+ Curl_pgrsSetDownloadSize(data, k->size);
}
/* we want header and/or body, if neither then don't do this! */
if(conn->bits.getheader || !conn->bits.no_body) {
state info where we wait for the 100-return code
*/
if (data->state.expect100header &&
- (conn->proto.http->sending == HTTPSEND_BODY)) {
+ (data->reqdata.proto.http->sending == HTTPSEND_BODY)) {
/* wait with write until we either got 100-continue or a timeout */
k->write_after_100_header = TRUE;
k->start100 = k->start;
of sockets */
int numsocks)
{
+ struct SessionHandle *data = conn->data;
int bitmap = GETSOCK_BLANK;
int index = 0;
/* simple check but we might need two slots */
return GETSOCK_BLANK;
- if(conn->keep.keepon & KEEP_READ) {
+ if(data->reqdata.keep.keepon & KEEP_READ) {
bitmap |= GETSOCK_READSOCK(index);
sock[index] = conn->sockfd;
}
- if(conn->keep.keepon & KEEP_WRITE) {
+
+ if(data->reqdata.keep.keepon & KEEP_WRITE) {
if((conn->sockfd != conn->writesockfd) ||
- !(conn->keep.keepon & KEEP_READ)) {
+ !(data->reqdata.keep.keepon & KEEP_READ)) {
/* only if they are not the same socket or we didn't have a readable
one, we increase index */
- if(conn->keep.keepon & KEEP_READ)
+ if(data->reqdata.keep.keepon & KEEP_READ)
index++; /* increase index if we need two entries */
sock[index] = conn->writesockfd;
}
*
* This function is what performs the actual transfer. It is capable of
* doing both ways simultaneously.
- * The transfer must already have been setup by a call to Curl_Transfer().
+ * The transfer must already have been setup by a call to Curl_setup_transfer().
*
* Note that headers are created in a preallocated buffer of a default size.
* That buffer can be enlarged on demand, but it is never shrunken again.
Transfer(struct connectdata *conn)
{
CURLcode result;
- struct Curl_transfer_keeper *k = &conn->keep;
+ struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
bool done=FALSE;
if(!(conn->protocol & PROT_FILE))
char **url)
{
bool retry = FALSE;
+ struct SessionHandle *data = conn->data;
+ struct Curl_transfer_keeper *k = &data->reqdata.keep;
- if((conn->keep.bytecount+conn->headerbytecount == 0) &&
+ if((data->reqdata.keep.bytecount+k->headerbytecount == 0) &&
conn->bits.reuse &&
!conn->bits.no_body) {
/* We got no data, we attempted to re-use a connection and yet we want a
* We must duplicate the new URL here as the connection data may
* be free()ed in the Curl_done() function.
*/
- newurl = conn->newurl?strdup(conn->newurl):NULL;
+ newurl = data->reqdata.newurl?strdup(data->reqdata.newurl):NULL;
}
else {
/* The transfer phase returned error, we mark the connection to get
}
/*
- * Curl_Transfer() is called to setup some basic properties for the upcoming
+ * Curl_setup_transfer() is called to setup some basic properties for the upcoming
* transfer.
*/
CURLcode
-Curl_Transfer(struct connectdata *c_conn, /* connection data */
- int sockindex, /* socket index to read from or -1 */
- curl_off_t size, /* -1 if unknown at this point */
- bool getheader, /* TRUE if header parsing is wanted */
- curl_off_t *bytecountp, /* return number of bytes read or NULL */
- int writesockindex, /* socket index to write to, it may very
- well be the same we read from. -1
- disables */
- curl_off_t *writecountp /* return number of bytes written or
- NULL */
- )
+Curl_setup_transfer(
+ struct connectdata *c_conn, /* connection data */
+ int sockindex, /* socket index to read from or -1 */
+ curl_off_t size, /* -1 if unknown at this point */
+ bool getheader, /* TRUE if header parsing is wanted */
+ curl_off_t *bytecountp, /* return number of bytes read or NULL */
+ int writesockindex, /* socket index to write to, it may very
+ well be the same we read from. -1
+ disables */
+ curl_off_t *writecountp /* return number of bytes written or
+ NULL */
+ )
{
struct connectdata *conn = (struct connectdata *)c_conn;
+ struct SessionHandle *data = conn->data;
+
if(!conn)
return CURLE_BAD_FUNCTION_ARGUMENT;
curlassert((sockindex <= 1) && (sockindex >= -1));
/* now copy all input parameters */
- conn->sockfd = sockindex==-1?
- CURL_SOCKET_BAD:conn->sock[sockindex];
- conn->size = size;
+ conn->sockfd = sockindex == -1 ?
+ CURL_SOCKET_BAD : conn->sock[sockindex];
+ conn->writesockfd = writesockindex == -1 ?
+ CURL_SOCKET_BAD:conn->sock[writesockindex];
conn->bits.getheader = getheader;
- conn->bytecountp = bytecountp;
- conn->writesockfd = writesockindex==-1?
- CURL_SOCKET_BAD:conn->sock[writesockindex];
- conn->writebytecountp = writecountp;
- return CURLE_OK;
+ data->reqdata.size = size;
+ data->reqdata.bytecountp = bytecountp;
+ data->reqdata.writebytecountp = writecountp;
+ return CURLE_OK;
}
/*
struct connectdata *sec_conn = NULL; /* secondary connection */
bool backup_reuse_fresh = data->set.reuse_fresh;
char *backup_userpwd = data->set.userpwd;
+ char *backup_path = data->reqdata.path;
+ char *backup_pathbuffer = data->reqdata.pathbuffer;
if(data->change.url_alloc)
free(data->change.url);
data->set.reuse_fresh = backup_reuse_fresh;
data->set.userpwd = backup_userpwd;
+ /* Copy the source path into a separate place */
+ sec_conn->sec_path = data->reqdata.path;
+ sec_conn->sec_pathbuffer = data->reqdata.pathbuffer;
+
+ /* Restore the original */
+ data->reqdata.path = backup_path;
+ data->reqdata.pathbuffer = backup_pathbuffer;
+
return status;
}
CURLcode Curl_readrewind(struct connectdata *conn);
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp);
bool Curl_retry_request(struct connectdata *conn, char **url);
+
/* This sets up a forthcoming transfer */
CURLcode
-Curl_Transfer (struct connectdata *data,
+Curl_setup_transfer (struct connectdata *data,
int sockindex, /* socket index to read from or -1 */
curl_off_t size, /* -1 if unknown at this point */
bool getheader, /* TRUE if header parsing is wanted */
#include "http_negotiate.h"
#include "select.h"
#include "multiif.h"
+#include "easyif.h"
/* And now for the protocols */
#include "ftp.h"
struct connectdata **usethis);
static long ConnectionStore(struct SessionHandle *data,
struct connectdata *conn);
+static bool IsPipeliningPossible(struct SessionHandle *handle);
+static void conn_free(struct connectdata *conn);
+
+#define MAX_PIPELINE_LENGTH 5
#ifndef USE_ARES
/* not for Win32, unless it is cygwin
free(ptr);
}
+static void close_connections(struct SessionHandle *data)
+{
+ /* Loop through all open connections and kill them one by one */
+ while(-1 != ConnectionKillOne(data))
+ ; /* empty loop */
+}
+
/*
* This is the internal function curl_easy_cleanup() calls. This should
* cleanup and free all resources associated with this sessionhandle.
CURLcode Curl_close(struct SessionHandle *data)
{
- if(data->multi) {
- /* this handle is still part of a multi handle, take care of this first */
+ struct Curl_multi *m = data->multi;
+
+ if(m)
+ /* This handle is still part of a multi handle, take care of this first
+ and detach this handle from there. */
Curl_multi_rmeasy(data->multi, data);
+
+ if(data->state.connc && (data->state.connc->type == CONNCACHE_PRIVATE)) {
+ /* close all connections still alive that are in the private connection
+ cache, as we no longer have the pointer left to the shared one. */
+ close_connections(data);
+
+ /* free the connection cache if allocated privately */
+ Curl_rm_connc(data->state.connc);
}
- /* Loop through all open connections and kill them one by one */
- while(-1 != ConnectionKillOne(data))
- ; /* empty loop */
if ( ! (data->share && data->share->hostcache) ) {
if ( !Curl_global_host_cache_use(data)) {
}
}
+ if(data->state.shared_conn) {
+ /* this handle is still being used by a shared connection cache and thus
+ we leave it around for now */
+ Curl_multi_add_closure(data->state.shared_conn, data);
+
+ return CURLE_OK;
+ }
+
+ /* Free the pathbuffer */
+ Curl_safefree(data->reqdata.pathbuffer);
+ Curl_safefree(data->reqdata.proto.generic);
+
/* Close down all open SSL info and sessions */
Curl_ssl_close_all(data);
Curl_safefree(data->state.first_host);
Curl_share_unlock(data, CURL_LOCK_DATA_COOKIE);
#endif
-#if !defined(CURL_DISABLE_HTTP) && !defined(CURL_DISABLE_CRYPTO_AUTH)
Curl_digest_cleanup(data);
-#endif
-
- /* free the connection cache */
- free(data->state.connects);
Curl_safefree(data->info.contenttype);
-#ifdef USE_ARES
/* this destroys the channel and we cannot use it anymore after this */
ares_destroy(data->state.areschannel);
-#endif
#if defined(CURL_DOES_CONVERSIONS) && defined(HAVE_ICONV)
/* close iconv conversion descriptors */
return CURLE_OK;
}
+/* create a connection cache of a private or multi type */
+struct conncache *Curl_mk_connc(int type)
+{
+ /* It is subject for debate how many default connections to have for a multi
+ connection cache... */
+ int default_amount = (type == CONNCACHE_PRIVATE)?5:10;
+ struct conncache *c;
+
+ c= calloc(sizeof(struct conncache), 1);
+ if(!c)
+ return NULL;
+
+ c->connects = calloc(sizeof(struct connectdata *), default_amount);
+ if(!c->connects) {
+ free(c);
+ return NULL;
+ }
+
+ c->num = default_amount;
+
+ return c;
+}
+
+/* Free a connection cache. This is called from Curl_close() and
+ curl_multi_cleanup(). */
+void Curl_rm_connc(struct conncache *c)
+{
+ if(c->connects) {
+ int i;
+ for(i = 0; i < c->num; ++i)
+ conn_free(c->connects[i]);
+
+ free(c->connects);
+ }
+
+ free(c);
+}
+
/**
* Curl_open()
*
{
CURLcode res = CURLE_OK;
struct SessionHandle *data;
+
/* Very simple start-up: alloc the struct, init it with zeroes and return */
data = (struct SessionHandle *)calloc(1, sizeof(struct SessionHandle));
if(!data)
data->set.httpauth = CURLAUTH_BASIC; /* defaults to basic */
data->set.proxyauth = CURLAUTH_BASIC; /* defaults to basic */
- /* create an array with connection data struct pointers */
- data->state.numconnects = 5; /* hard-coded right now */
- data->state.connects = (struct connectdata **)
- calloc(sizeof(struct connectdata *) * data->state.numconnects, 1);
-
- if(!data->state.connects)
- res = CURLE_OUT_OF_MEMORY;
+ /* This no longer creates a connection cache here. It is instead made on
+ the first call to curl_easy_perform() or when the handle is added to a
+ multi stack. */
/* most recent connection is not yet defined */
data->state.lastconnect = -1;
+ Curl_easy_initHandleData(data);
+
/*
* libcurl 7.10 introduced SSL verification *by default*! This needs to be
* switched off unless wanted.
}
if(res) {
-#ifdef USE_ARES
ares_destroy(data->state.areschannel);
-#endif
if(data->state.headerbuff)
free(data->state.headerbuff);
free(data);
struct connectdata **newptr;
long i;
- if(newconnects < data->state.numconnects) {
+ if(newconnects < data->state.connc->num) {
/* Since this number is *decreased* from the existing number, we must
close the possibly open connections that live on the indexes that
- are being removed! */
- for(i=newconnects; i< data->state.numconnects; i++)
- Curl_disconnect(data->state.connects[i]);
+ are being removed!
+
+ NOTE: for conncache_multi cases we must make sure that we only
+ close handles not in use.
+ */
+ for(i=newconnects; i< data->state.connc->num; i++)
+ Curl_disconnect(data->state.connc->connects[i]);
- /* If the most recent connection is no longer valid, mark it invalid. */
+ /* If the most recent connection is no longer valid, mark it
+ invalid. */
if(data->state.lastconnect <= newconnects)
data->state.lastconnect = -1;
}
- if(newconnects) {
+ if(newconnects > 0) {
newptr= (struct connectdata **)
- realloc(data->state.connects,
+ realloc(data->state.connc->connects,
sizeof(struct connectdata *) * newconnects);
if(!newptr)
/* we closed a few connections in vain, but so what? */
return CURLE_OUT_OF_MEMORY;
/* nullify the newly added pointers */
- for(i=data->state.numconnects; i<newconnects; i++) {
+ for(i=data->state.connc->num; i<newconnects; i++)
newptr[i] = NULL;
- }
- data->state.connects = newptr;
- data->state.numconnects = newconnects;
- }
- else {
- /* zero makes NO cache at all */
- if(data->state.connects)
- free(data->state.connects);
- data->state.connects = NULL;
- data->state.numconnects = 0;
- data->state.lastconnect = -1;
+ data->state.connc->connects = newptr;
+ data->state.connc->num = newconnects;
}
+ /* we no longer support less than 1 as size for the connection cache,
+ and I'm not sure it ever worked to set it to zero */
}
break;
case CURLOPT_FORBID_REUSE:
return result;
}
+static void conn_free(struct connectdata *conn)
+{
+ if (!conn)
+ return;
+
+ /* close possibly still open sockets */
+ if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET])
+ sclose(conn->sock[SECONDARYSOCKET]);
+ if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET])
+ sclose(conn->sock[FIRSTSOCKET]);
+
+ Curl_safefree(conn->user);
+ Curl_safefree(conn->passwd);
+ Curl_safefree(conn->proxyuser);
+ Curl_safefree(conn->proxypasswd);
+ Curl_safefree(conn->allocptr.proxyuserpwd);
+ Curl_safefree(conn->allocptr.uagent);
+ Curl_safefree(conn->allocptr.userpwd);
+ Curl_safefree(conn->allocptr.accept_encoding);
+ Curl_safefree(conn->allocptr.rangeline);
+ Curl_safefree(conn->allocptr.ref);
+ Curl_safefree(conn->allocptr.host);
+ Curl_safefree(conn->allocptr.cookiehost);
+ Curl_safefree(conn->ip_addr_str);
+ Curl_safefree(conn->trailer);
+ Curl_safefree(conn->sec_pathbuffer);
+ Curl_safefree(conn->host.rawalloc); /* host name buffer */
+ Curl_safefree(conn->proxy.rawalloc); /* proxy name buffer */
+
+ Curl_llist_destroy(conn->send_pipe, NULL);
+ Curl_llist_destroy(conn->recv_pipe, NULL);
+
+ /* possible left-overs from the async name resolvers */
+#if defined(USE_ARES)
+ Curl_safefree(conn->async.hostname);
+ Curl_safefree(conn->async.os_specific);
+#elif defined(CURLRES_THREADED)
+ Curl_destroy_thread_data(&conn->async);
+#endif
+
+ Curl_free_ssl_config(&conn->ssl_config);
+
+ free(conn); /* free all the connection oriented data */
+}
+
CURLcode Curl_disconnect(struct connectdata *conn)
{
- struct SessionHandle *data;
+ struct SessionHandle *data = conn->data;
if(!conn)
return CURLE_OK; /* this is closed and fine already */
- data = conn->data;
-
#if defined(CURLDEBUG) && defined(AGGRESIVE_TEST)
/* scan for DNS cache entries still marked as in use */
Curl_hash_apply(data->hostcache,
* get here *instead* if we fail prematurely. Thus we need to be able
* to free this resource here as well.
*/
- if(conn->bits.rangestringalloc) {
- free(conn->range);
- conn->bits.rangestringalloc = FALSE;
+ if(data->reqdata.rangestringalloc) {
+ free(data->reqdata.range);
+ data->reqdata.rangestringalloc = FALSE;
}
if((conn->ntlm.state != NTLMSTATE_NONE) ||
if(-1 != conn->connectindex) {
/* unlink ourselves! */
infof(data, "Closing connection #%ld\n", conn->connectindex);
- data->state.connects[conn->connectindex] = NULL;
+ data->state.connc->connects[conn->connectindex] = NULL;
}
- Curl_safefree(conn->proto.generic);
- Curl_safefree(conn->newurl);
- Curl_safefree(conn->pathbuffer); /* the URL path buffer */
-
- Curl_safefree(conn->host.rawalloc); /* host name buffer */
- Curl_safefree(conn->proxy.rawalloc); /* proxy name buffer */
#ifdef USE_LIBIDN
if(conn->host.encalloc)
idn_free(conn->host.encalloc); /* encoded host name buffer, must be freed
freed with idn_free() since this was
allocated by libidn */
#endif
- Curl_ssl_close(conn);
-
- /* close possibly still open sockets */
- if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET])
- sclose(conn->sock[SECONDARYSOCKET]);
- if(CURL_SOCKET_BAD != conn->sock[FIRSTSOCKET])
- sclose(conn->sock[FIRSTSOCKET]);
- Curl_safefree(conn->user);
- Curl_safefree(conn->passwd);
- Curl_safefree(conn->proxyuser);
- Curl_safefree(conn->proxypasswd);
- Curl_safefree(conn->allocptr.proxyuserpwd);
- Curl_safefree(conn->allocptr.uagent);
- Curl_safefree(conn->allocptr.userpwd);
- Curl_safefree(conn->allocptr.accept_encoding);
- Curl_safefree(conn->allocptr.rangeline);
- Curl_safefree(conn->allocptr.ref);
- Curl_safefree(conn->allocptr.host);
- Curl_safefree(conn->allocptr.cookiehost);
- Curl_safefree(conn->ip_addr_str);
- Curl_safefree(conn->trailer);
-
- /* possible left-overs from the async name resolvers */
-#if defined(USE_ARES)
- Curl_safefree(conn->async.hostname);
- Curl_safefree(conn->async.os_specific);
-#elif defined(CURLRES_THREADED)
- Curl_destroy_thread_data(&conn->async);
-#endif
+ Curl_ssl_close(conn);
- Curl_free_ssl_config(&conn->ssl_config);
+ /* Indicate to all handles on the pipe that we're dead */
+ Curl_signalPipeClose(conn->send_pipe);
+ Curl_signalPipeClose(conn->recv_pipe);
- free(conn); /* free all the connection oriented data */
+ conn_free(conn);
return CURLE_OK;
}
return ret_val;
}
+static bool IsPipeliningPossible(struct SessionHandle *handle)
+{
+ if (handle->multi && Curl_multi_canPipeline(handle->multi) &&
+ (handle->set.httpreq == HTTPREQ_GET ||
+ handle->set.httpreq == HTTPREQ_HEAD) &&
+ handle->set.httpversion != CURL_HTTP_VERSION_1_0)
+ return TRUE;
+
+ return FALSE;
+}
+
+void Curl_addHandleToPipeline(struct SessionHandle *handle,
+ struct curl_llist *pipe)
+{
+ Curl_llist_insert_next(pipe,
+ pipe->tail,
+ handle);
+}
+
+
+void Curl_removeHandleFromPipeline(struct SessionHandle *handle,
+ struct curl_llist *pipe)
+{
+ struct curl_llist_element *curr;
+
+ curr = pipe->head;
+ while (curr) {
+ if (curr->ptr == handle) {
+ Curl_llist_remove(pipe, curr, NULL);
+ break;
+ }
+ curr = curr->next;
+ }
+}
+
+#if 0
+static void Curl_printPipeline(struct curl_llist *pipe)
+{
+ struct curl_llist_element *curr;
+
+ curr = pipe->head;
+ while (curr) {
+ struct SessionHandle *data = (struct SessionHandle *) curr->ptr;
+ infof(data, "Handle in pipeline: %s\n",
+ data->reqdata.path);
+ curr = curr->next;
+ }
+}
+#endif
+
+bool Curl_isHandleAtHead(struct SessionHandle *handle,
+ struct curl_llist *pipe)
+{
+ struct curl_llist_element *curr = pipe->head;
+ if (curr) {
+ return curr->ptr == handle ? TRUE : FALSE;
+ }
+
+ return FALSE;
+}
+
+void Curl_signalPipeClose(struct curl_llist *pipe)
+{
+ struct curl_llist_element *curr;
+
+ curr = pipe->head;
+ while (curr) {
+ struct curl_llist_element *next = curr->next;
+ struct SessionHandle *data = (struct SessionHandle *) curr->ptr;
+
+ data->state.pipe_broke = TRUE;
+
+ Curl_llist_remove(pipe, curr, NULL);
+ curr = next;
+ }
+}
+
+
/*
* Given one filled in connection struct (named needle), this function should
- * detect if there already is one that have all the significant details
+ * detect if there already is one that has all the significant details
* exactly the same and thus should be used instead.
+ *
+ * If there is a match, this function returns TRUE - and has marked the
+ * connection as 'in-use'. It must later be called with ConnectionDone() to
+ * return back to 'idle' (unused) state.
*/
static bool
ConnectionExists(struct SessionHandle *data,
{
long i;
struct connectdata *check;
+ bool canPipeline = IsPipeliningPossible(data);
- for(i=0; i< data->state.numconnects; i++) {
+ for(i=0; i< data->state.connc->num; i++) {
bool match = FALSE;
/*
* Note that if we use a HTTP proxy, we check connections to that
* proxy and not to the actual remote server.
*/
- check = data->state.connects[i];
+ check = data->state.connc->connects[i];
if(!check)
/* NULL pointer means not filled-in entry */
continue;
+ if(check->inuse && !canPipeline)
+ /* can only happen within multi handles, and means that another easy
+ handle is using this connection */
+ continue;
+
+ if (check->send_pipe->size >= MAX_PIPELINE_LENGTH ||
+ check->recv_pipe->size >= MAX_PIPELINE_LENGTH)
+ continue;
+
if((needle->protocol&PROT_SSL) != (check->protocol&PROT_SSL))
/* don't do mixed SSL and non-SSL connections */
continue;
}
if((needle->protocol & PROT_FTP) ||
((needle->protocol & PROT_HTTP) &&
- (needle->data->state.authhost.want==CURLAUTH_NTLM))) {
+ (data->state.authhost.want==CURLAUTH_NTLM))) {
/* This is FTP or HTTP+NTLM, verify that we're using the same name
and password as well */
if(!strequal(needle->user, check->user) ||
if(dead) {
/*
*/
+ check->data = data;
infof(data, "Connection %d seems to be dead!\n", i);
Curl_disconnect(check); /* disconnect resources */
- data->state.connects[i]=NULL; /* nothing here */
+ data->state.connc->connects[i]=NULL; /* nothing here */
/* There's no need to continue searching, because we only store
one connection for each unique set of identifiers */
return FALSE;
}
+ check->inuse = TRUE; /* mark this as being in use so that no other
+ handle in a multi stack may nick it */
+
+ if (canPipeline) {
+ /* Mark the connection as being in a pipeline */
+ check->is_in_pipeline = TRUE;
+ }
+
*usethis = check;
return TRUE; /* yes, we found one to use! */
}
return FALSE; /* no matching connecting exists */
}
+
/*
* This function frees/closes a connection in the connection cache. This
* should take the previously set policy into account when deciding which
now = Curl_tvnow();
- for(i=0; i< data->state.numconnects; i++) {
- conn = data->state.connects[i];
+ for(i=0; data->state.connc && (i< data->state.connc->num); i++) {
+ conn = data->state.connc->connects[i];
if(!conn)
continue;
}
}
if(connindex >= 0) {
+ /* Set the connection's owner correctly */
+ conn = data->state.connc->connects[connindex];
+ conn->data = data;
/* the winner gets the honour of being disconnected */
- (void) Curl_disconnect(data->state.connects[connindex]);
+ (void)Curl_disconnect(conn);
/* clean the array entry */
- data->state.connects[connindex] = NULL;
+ data->state.connc->connects[connindex] = NULL;
}
return connindex; /* return the available index or -1 */
}
+/* this connection can now be marked 'idle' */
+static void
+ConnectionDone(struct connectdata *conn)
+{
+ conn->inuse = FALSE;
+ conn->data = NULL;
+
+ if (conn->send_pipe == 0 &&
+ conn->recv_pipe == 0)
+ conn->is_in_pipeline = FALSE;
+}
+
/*
* The given input connection struct pointer is to be stored. If the "cache"
* is already full, we must clean out the most suitable using the previously
struct connectdata *conn)
{
long i;
- for(i=0; i< data->state.numconnects; i++) {
- if(!data->state.connects[i])
+ for(i=0; i< data->state.connc->num; i++) {
+ if(!data->state.connc->connects[i])
break;
}
- if(i == data->state.numconnects) {
+ if(i == data->state.connc->num) {
/* there was no room available, kill one */
i = ConnectionKillOne(data);
infof(data, "Connection (#%d) was killed to make room\n", i);
}
+ conn->connectindex = i; /* Make the child know where the pointer to this
+ particular data is stored. But note that this -1
+ if this is not within the cache and this is
+ probably not checked for everywhere (yet). */
+ conn->inuse = TRUE;
if(-1 != i) {
- /* only do this if a true index was returned, if -1 was returned there
+ /* Only do this if a true index was returned, if -1 was returned there
is no room in the cache for an unknown reason and we cannot store
- this there. */
- data->state.connects[i] = conn; /* fill in this */
- conn->connectindex = i; /* make the child know where the pointer to this
- particular data is stored */
+ this there.
+
+ TODO: make sure we really can work with more handles than positions in
+ the cache, or possibly we should (allow to automatically) resize the
+ connection cache when we add more easy handles to a multi handle!
+ */
+ data->state.connc->connects[i] = conn; /* fill in this */
+ conn->data = data;
}
+
return i;
}
* Nonsupport "SOCKS 4A (Simple Extension to SOCKS 4 Protocol)"
* Nonsupport "Identification Protocol (RFC1413)"
*/
-static int handleSock4Proxy(const char *proxy_name, struct connectdata *conn)
+static int handleSock4Proxy(const char *proxy_name,
+ struct SessionHandle *data,
+ struct connectdata *conn)
{
unsigned char socksreq[262]; /* room for SOCKS4 request incl. user id */
int result;
CURLcode code;
curl_socket_t sock = conn->sock[FIRSTSOCKET];
- struct SessionHandle *data = conn->data;
Curl_nonblock(sock, FALSE);
else
hp = NULL; /* fail! */
- Curl_resolv_unlock(conn->data, dns); /* not used anymore from now on */
+ Curl_resolv_unlock(data, dns); /* not used anymore from now on */
}
if(!hp) {
- failf(conn->data, "Failed to resolve \"%s\" for SOCKS4 connect.",
+ failf(data, "Failed to resolve \"%s\" for SOCKS4 connect.",
conn->host.name);
return 1;
}
/* Send request */
code = Curl_write(conn, sock, (char *)socksreq, packetsize, &written);
if ((code != CURLE_OK) || (written != packetsize)) {
- failf(conn->data, "Failed to send SOCKS4 connect request.");
+ failf(data, "Failed to send SOCKS4 connect request.");
return 1;
}
/* Receive response */
result = Curl_read(conn, sock, (char *)socksreq, packetsize, &actualread);
if ((result != CURLE_OK) || (actualread != packetsize)) {
- failf(conn->data, "Failed to receive SOCKS4 connect request ack.");
+ failf(data, "Failed to receive SOCKS4 connect request ack.");
return 1;
}
/* wrong version ? */
if (socksreq[0] != 0) {
- failf(conn->data,
+ failf(data,
"SOCKS4 reply has wrong version, version should be 4.");
return 1;
}
infof(data, "SOCKS4 request granted.\n");
break;
case 91:
- failf(conn->data,
+ failf(data,
"Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", request rejected or failed.",
(unsigned char)socksreq[4], (unsigned char)socksreq[5],
socksreq[1]);
return 1;
case 92:
- failf(conn->data,
+ failf(data,
"Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", request rejected because SOCKS server cannot connect to "
"identd on the client.",
socksreq[1]);
return 1;
case 93:
- failf(conn->data,
+ failf(data,
"Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", request rejected because the client program and identd "
"report different user-ids.",
socksreq[1]);
return 1;
default :
- failf(conn->data,
+ failf(data,
"Can't complete SOCKS4 connection to %d.%d.%d.%d:%d. (%d)"
", Unknown.",
(unsigned char)socksreq[4], (unsigned char)socksreq[5],
code = Curl_write(conn, sock, (char *)socksreq, (2 + (int)socksreq[1]),
&written);
if ((code != CURLE_OK) || (written != (2 + (int)socksreq[1]))) {
- failf(conn->data, "Unable to send initial SOCKS5 request.");
+ failf(data, "Unable to send initial SOCKS5 request.");
return 1;
}
result=Curl_read(conn, sock, (char *)socksreq, 2, &actualread);
if ((result != CURLE_OK) || (actualread != 2)) {
- failf(conn->data, "Unable to receive initial SOCKS5 response.");
+ failf(data, "Unable to receive initial SOCKS5 response.");
return 1;
}
if (socksreq[0] != 5) {
- failf(conn->data, "Received invalid version in initial SOCKS5 response.");
+ failf(data, "Received invalid version in initial SOCKS5 response.");
return 1;
}
if (socksreq[1] == 0) {
code = Curl_write(conn, sock, (char *)socksreq, len, &written);
if ((code != CURLE_OK) || (len != written)) {
- failf(conn->data, "Failed to send SOCKS5 sub-negotiation request.");
+ failf(data, "Failed to send SOCKS5 sub-negotiation request.");
return 1;
}
result=Curl_read(conn, sock, (char *)socksreq, 2, &actualread);
if ((result != CURLE_OK) || (actualread != 2)) {
- failf(conn->data, "Unable to receive SOCKS5 sub-negotiation response.");
+ failf(data, "Unable to receive SOCKS5 sub-negotiation response.");
return 1;
}
/* ignore the first (VER) byte */
if (socksreq[1] != 0) { /* status */
- failf(conn->data, "User was rejected by the SOCKS5 server (%d %d).",
+ failf(data, "User was rejected by the SOCKS5 server (%d %d).",
socksreq[0], socksreq[1]);
return 1;
}
else {
/* error */
if (socksreq[1] == 1) {
- failf(conn->data,
+ failf(data,
"SOCKS5 GSSAPI per-message authentication is not supported.");
return 1;
}
else if (socksreq[1] == 255) {
if (!proxy_name || !*proxy_name) {
- failf(conn->data,
+ failf(data,
"No authentication method was acceptable. (It is quite likely"
" that the SOCKS5 server wanted a username/password, since none"
" was supplied to the server on this connection.)");
}
else {
- failf(conn->data, "No authentication method was acceptable.");
+ failf(data, "No authentication method was acceptable.");
}
return 1;
}
else {
- failf(conn->data,
+ failf(data,
"Undocumented SOCKS5 mode attempted to be used by server.");
return 1;
}
else
hp = NULL; /* fail! */
- Curl_resolv_unlock(conn->data, dns); /* not used anymore from now on */
+ Curl_resolv_unlock(data, dns); /* not used anymore from now on */
}
if(!hp) {
- failf(conn->data, "Failed to resolve \"%s\" for SOCKS5 connect.",
+ failf(data, "Failed to resolve \"%s\" for SOCKS5 connect.",
conn->host.name);
return 1;
}
code = Curl_write(conn, sock, (char *)socksreq, packetsize, &written);
if ((code != CURLE_OK) || (written != packetsize)) {
- failf(conn->data, "Failed to send SOCKS5 connect request.");
+ failf(data, "Failed to send SOCKS5 connect request.");
return 1;
}
result = Curl_read(conn, sock, (char *)socksreq, packetsize, &actualread);
if ((result != CURLE_OK) || (actualread != packetsize)) {
- failf(conn->data, "Failed to receive SOCKS5 connect request ack.");
+ failf(data, "Failed to receive SOCKS5 connect request ack.");
return 1;
}
if (socksreq[0] != 5) { /* version */
- failf(conn->data,
+ failf(data,
"SOCKS5 reply has wrong version, version should be 5.");
return 1;
}
if (socksreq[1] != 0) { /* Anything besides 0 is an error */
- failf(conn->data,
+ failf(data,
"Can't complete SOCKS5 connection to %d.%d.%d.%d:%d. (%d)",
(unsigned char)socksreq[4], (unsigned char)socksreq[5],
(unsigned char)socksreq[6], (unsigned char)socksreq[7],
return 0; /* Proxy was successful! */
}
-static CURLcode ConnectPlease(struct connectdata *conn,
+static CURLcode ConnectPlease(struct SessionHandle *data,
+ struct connectdata *conn,
struct Curl_dns_entry *hostaddr,
bool *connected)
{
CURLcode result;
Curl_addrinfo *addr;
- struct SessionHandle *data = conn->data;
char *hostname = data->change.proxy?conn->proxy.name:conn->host.name;
infof(data, "About to connect() to %s%s port %d\n",
Curl_store_ip_addr(conn);
- switch(conn->data->set.proxytype) {
+ switch(data->set.proxytype) {
case CURLPROXY_SOCKS5:
return handleSock5Proxy(conn->proxyuser,
conn->proxypasswd,
/* do nothing here. handled later. */
break;
case CURLPROXY_SOCKS4:
- return handleSock4Proxy(conn->proxyuser, conn) ?
+ return handleSock4Proxy(conn->proxyuser, data, conn) ?
CURLE_COULDNT_CONNECT : CURLE_OK;
default:
- failf(conn->data, "unknown proxytype option given");
+ failf(data, "unknown proxytype option given");
return CURLE_COULDNT_CONNECT;
}
}
* protocol layer.
*/
-CURLcode Curl_protocol_connecting(struct connectdata *conn, bool *done)
+CURLcode Curl_protocol_connecting(struct connectdata *conn,
+ bool *done)
{
CURLcode result=CURLE_OK;
* proceed with some action.
*
*/
-CURLcode Curl_protocol_connect(struct connectdata *conn, bool *protocol_done)
+CURLcode Curl_protocol_connect(struct connectdata *conn,
+ bool *protocol_done)
{
- struct SessionHandle *data = conn->data;
CURLcode result=CURLE_OK;
+ struct SessionHandle *data = conn->data;
*protocol_done = FALSE;
}
#endif
-static void fix_hostname(struct connectdata *conn, struct hostname *host)
+static void fix_hostname(struct SessionHandle *data,
+ struct connectdata *conn, struct hostname *host)
{
/* set the name we use to display the host name */
host->dispname = host->name;
if (!is_ASCII_name(host->name) &&
stringprep_check_version(LIBIDN_REQUIRED_VERSION)) {
char *ace_hostname = NULL;
- struct SessionHandle *data = conn->data;
int rc = idna_to_ascii_lz(host->name, &ace_hostname, 0);
infof (data, "Input domain encoded as `%s'\n",
stringprep_locale_charset ());
}
}
#else
+ (void)data; /* never used */
(void)conn; /* never used */
#endif
}
-
-/**
- * CreateConnection() sets up a new connectdata struct, or re-uses an already
- * existing one, and resolves host name.
- *
- * if this function returns CURLE_OK and *async is set to TRUE, the resolve
- * response will be coming asynchronously. If *async is FALSE, the name is
- * already resolved.
- *
- * @param data The sessionhandle pointer
- * @param in_connect is set to the next connection data pointer
- * @param addr is set to the new dns entry for this connection. If this
- * connection is re-used it will be NULL.
- * @param async is set TRUE/FALSE depending on the nature of this lookup
- * @return CURLcode
- * @see SetupConnection()
+/*
+ * Parse URL and fill in the relevant members of the connection struct.
*/
-
-static CURLcode CreateConnection(struct SessionHandle *data,
- struct connectdata **in_connect,
- struct Curl_dns_entry **addr,
- bool *async)
+static CURLcode ParseURLAndFillConnection(struct SessionHandle *data,
+ struct connectdata *conn)
{
- char *tmp;
char *at;
- CURLcode result=CURLE_OK;
- struct connectdata *conn;
- struct connectdata *conn_temp = NULL;
- size_t urllen;
- struct Curl_dns_entry *hostaddr;
-#if defined(HAVE_ALARM) && !defined(USE_ARES)
- unsigned int prev_alarm=0;
-#endif
- char endbracket;
- char user[MAX_CURL_USER_LENGTH];
- char passwd[MAX_CURL_PASSWORD_LENGTH];
- int rc;
- bool reuse;
-
-#ifndef USE_ARES
-#ifdef SIGALRM
-#ifdef HAVE_SIGACTION
- struct sigaction keep_sigact; /* store the old struct here */
- bool keep_copysig=FALSE; /* did copy it? */
-#else
-#ifdef HAVE_SIGNAL
- void *keep_sigact; /* store the old handler here */
-#endif /* HAVE_SIGNAL */
-#endif /* HAVE_SIGACTION */
-#endif /* SIGALRM */
-#endif /* USE_ARES */
-
- *addr = NULL; /* nothing yet */
- *async = FALSE;
-
- /*************************************************************
- * Check input data
- *************************************************************/
-
- if(!data->change.url)
- return CURLE_URL_MALFORMAT;
-
- /* First, split up the current URL in parts so that we can use the
- parts for checking against the already present connections. In order
- to not have to modify everything at once, we allocate a temporary
- connection data struct and fill in for comparison purposes. */
-
- conn = (struct connectdata *)calloc(sizeof(struct connectdata), 1);
- if(!conn) {
- *in_connect = NULL; /* clear the pointer */
- return CURLE_OUT_OF_MEMORY;
- }
- /* We must set the return variable as soon as possible, so that our
- parent can cleanup any possible allocs we may have done before
- any failure */
- *in_connect = conn;
-
- /* and we setup a few fields in case we end up actually using this struct */
- conn->data = data; /* remember our daddy */
- conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */
- conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */
- conn->connectindex = -1; /* no index */
- conn->bits.httpproxy = (data->change.proxy && *data->change.proxy &&
- (data->set.proxytype == CURLPROXY_HTTP))?
- TRUE:FALSE; /* http proxy or not */
-
- /* Default protocol-independent behavior doesn't support persistent
- connections, so we set this to force-close. Protocols that support
- this need to set this to FALSE in their "curl_do" functions. */
- conn->bits.close = TRUE;
-
- /* maxdownload must be -1 on init, as 0 is a valid value! */
- conn->maxdownload = -1; /* might have been used previously! */
-
- /* Store creation time to help future close decision making */
- conn->created = Curl_tvnow();
-
- conn->bits.use_range = data->set.set_range?TRUE:FALSE; /* range status */
- conn->range = data->set.set_range; /* clone the range setting */
- conn->resume_from = data->set.set_resume_from; /* inherit resume_from */
-
- conn->bits.user_passwd = data->set.userpwd?1:0;
- conn->bits.proxy_user_passwd = data->set.proxyuserpwd?1:0;
- conn->bits.no_body = data->set.opt_no_body;
- conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy;
- conn->bits.ftp_use_epsv = data->set.ftp_use_epsv;
- conn->bits.ftp_use_eprt = data->set.ftp_use_eprt;
-
- /* This initing continues below, see the comment "Continue connectdata
- * initialization here" */
-
- /***********************************************************
- * We need to allocate memory to store the path in. We get the size of the
- * full URL to be sure, and we need to make it at least 256 bytes since
- * other parts of the code will rely on this fact
- ***********************************************************/
-#define LEAST_PATH_ALLOC 256
- urllen=strlen(data->change.url);
- if(urllen < LEAST_PATH_ALLOC)
- urllen=LEAST_PATH_ALLOC;
-
- /*
- * We malloc() the buffers below urllen+2 to make room for to possibilities:
- * 1 - an extra terminating zero
- * 2 - an extra slash (in case a syntax like "www.host.com?moo" is used)
- */
+ char *tmp;
- conn->pathbuffer=(char *)malloc(urllen+2);
- if(NULL == conn->pathbuffer)
- return CURLE_OUT_OF_MEMORY; /* really bad error */
- conn->path = conn->pathbuffer;
-
- conn->host.rawalloc=(char *)malloc(urllen+2);
- if(NULL == conn->host.rawalloc)
- return CURLE_OUT_OF_MEMORY;
- conn->host.name = conn->host.rawalloc;
- conn->host.name[0] = 0;
+ char *path = data->reqdata.path;
/*************************************************************
* Parse the URL.
************************************************************/
if((2 == sscanf(data->change.url, "%15[^:]:%[^\n]",
conn->protostr,
- conn->path)) && strequal(conn->protostr, "file")) {
- if(conn->path[0] == '/' && conn->path[1] == '/') {
+ path)) && strequal(conn->protostr, "file")) {
+ if(path[0] == '/' && path[1] == '/') {
/* Allow omitted hostname (e.g. file:/<path>). This is not strictly
* speaking a valid file: URL by RFC 1738, but treating file:/<path> as
* file://localhost/<path> is similar to how other schemes treat missing
/* This cannot be done with strcpy() in a portable manner, since the
memory areas overlap! */
- memmove(conn->path, conn->path + 2, strlen(conn->path + 2)+1);
+ memmove(path, path + 2, strlen(path + 2)+1);
}
/*
* we deal with file://<host>/<path> differently since it supports no
* hostname other than "localhost" and "127.0.0.1", which is unique among
* the URL protocols specified in RFC 1738
*/
- if(conn->path[0] != '/') {
+ if(path[0] != '/') {
/* the URL included a host name, we ignore host names in file:// URLs
as the standards don't define what to do with them */
- char *ptr=strchr(conn->path, '/');
+ char *ptr=strchr(path, '/');
if(ptr) {
/* there was a slash present
ptr++;
/* This cannot be made with strcpy, as the memory chunks overlap! */
- memmove(conn->path, ptr, strlen(ptr)+1);
+ memmove(path, ptr, strlen(ptr)+1);
}
}
}
else {
/* clear path */
- conn->path[0]=0;
+ path[0]=0;
if (2 > sscanf(data->change.url,
"%15[^\n:]://%[^\n/]%[^\n]",
conn->protostr,
- conn->host.name, conn->path)) {
+ conn->host.name, path)) {
/*
* The URL was badly formatted, let's try the browser-style _without_
* protocol specified like 'http://'.
*/
if((1 > sscanf(data->change.url, "%[^\n/]%[^\n]",
- conn->host.name, conn->path)) ) {
+ conn->host.name, path)) ) {
/*
* We couldn't even get this format.
*/
*/
size_t hostlen = strlen(tmp);
- size_t pathlen = strlen(conn->path);
+ size_t pathlen = strlen(path);
/* move the existing path plus the zero byte forward, to make room for
the host-name part */
- memmove(conn->path+hostlen+1, conn->path, pathlen+1);
+ memmove(path+hostlen+1, path, pathlen+1);
/* now copy the trailing host part in front of the existing path */
- memcpy(conn->path+1, tmp, hostlen);
+ memcpy(path+1, tmp, hostlen);
- conn->path[0]='/'; /* prepend the missing slash */
+ path[0]='/'; /* prepend the missing slash */
*tmp=0; /* now cut off the hostname at the ? */
}
- else if(!conn->path[0]) {
+ else if(!path[0]) {
/* if there's no path set, use a single slash */
- strcpy(conn->path, "/");
+ strcpy(path, "/");
}
/* If the URL is malformatted (missing a '/' after hostname before path) we
* insert a slash here. The only letter except '/' we accept to start a path
* is '?'.
*/
- if(conn->path[0] == '?') {
+ if(path[0] == '?') {
/* We need this function to deal with overlapping memory areas. We know
that the memory area 'path' points to is 'urllen' bytes big and that
is bigger than the path. Use +1 to move the zero byte too. */
- memmove(&conn->path[1], conn->path, strlen(conn->path)+1);
- conn->path[0] = '/';
+ memmove(&path[1], path, strlen(path)+1);
+ path[0] = '/';
}
/*
* So if the URL was A://B/C,
* conn->protostr is A
* conn->host.name is B
- * conn->path is /C
+ * data->reqdata.path is /C
+ */
+
+ return CURLE_OK;
+}
+
+static void llist_dtor(void *user, void *element)
+{
+ (void)user;
+ (void)element;
+ /* Do nothing */
+}
+
+
+/**
+ * CreateConnection() sets up a new connectdata struct, or re-uses an already
+ * existing one, and resolves host name.
+ *
+ * if this function returns CURLE_OK and *async is set to TRUE, the resolve
+ * response will be coming asynchronously. If *async is FALSE, the name is
+ * already resolved.
+ *
+ * @param data The sessionhandle pointer
+ * @param in_connect is set to the next connection data pointer
+ * @param addr is set to the new dns entry for this connection. If this
+ * connection is re-used it will be NULL.
+ * @param async is set TRUE/FALSE depending on the nature of this lookup
+ * @return CURLcode
+ * @see SetupConnection()
+ *
+ * *NOTE* this function assigns the conn->data pointer!
+ */
+
+static CURLcode CreateConnection(struct SessionHandle *data,
+ struct connectdata **in_connect,
+ struct Curl_dns_entry **addr,
+ bool *async)
+{
+
+ char *tmp;
+ CURLcode result=CURLE_OK;
+ struct connectdata *conn;
+ struct connectdata *conn_temp = NULL;
+ size_t urllen;
+ struct Curl_dns_entry *hostaddr;
+#if defined(HAVE_ALARM) && !defined(USE_ARES)
+ unsigned int prev_alarm=0;
+#endif
+ char endbracket;
+ char user[MAX_CURL_USER_LENGTH];
+ char passwd[MAX_CURL_PASSWORD_LENGTH];
+ int rc;
+ bool reuse;
+
+#ifndef USE_ARES
+#ifdef SIGALRM
+#ifdef HAVE_SIGACTION
+ struct sigaction keep_sigact; /* store the old struct here */
+ bool keep_copysig=FALSE; /* did copy it? */
+#else
+#ifdef HAVE_SIGNAL
+ void *keep_sigact; /* store the old handler here */
+#endif /* HAVE_SIGNAL */
+#endif /* HAVE_SIGACTION */
+#endif /* SIGALRM */
+#endif /* USE_ARES */
+
+ *addr = NULL; /* nothing yet */
+ *async = FALSE;
+
+ /*************************************************************
+ * Check input data
+ *************************************************************/
+
+ if(!data->change.url)
+ return CURLE_URL_MALFORMAT;
+
+ /* First, split up the current URL in parts so that we can use the
+ parts for checking against the already present connections. In order
+ to not have to modify everything at once, we allocate a temporary
+ connection data struct and fill in for comparison purposes. */
+
+ conn = (struct connectdata *)calloc(sizeof(struct connectdata), 1);
+ if(!conn) {
+ *in_connect = NULL; /* clear the pointer */
+ return CURLE_OUT_OF_MEMORY;
+ }
+ /* We must set the return variable as soon as possible, so that our
+ parent can cleanup any possible allocs we may have done before
+ any failure */
+ *in_connect = conn;
+
+ /* and we setup a few fields in case we end up actually using this struct */
+
+ conn->data = data; /* Setup the association between this connection
+ and the SessionHandle */
+
+ conn->sock[FIRSTSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */
+ conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD; /* no file descriptor */
+ conn->connectindex = -1; /* no index */
+ conn->bits.httpproxy = (data->change.proxy && *data->change.proxy &&
+ (data->set.proxytype == CURLPROXY_HTTP))?
+ TRUE:FALSE; /* http proxy or not */
+
+ /* Default protocol-independent behavior doesn't support persistent
+ connections, so we set this to force-close. Protocols that support
+ this need to set this to FALSE in their "curl_do" functions. */
+ conn->bits.close = TRUE;
+
+ conn->readchannel_inuse = FALSE;
+ conn->writechannel_inuse = FALSE;
+
+ /* Initialize the pipeline lists */
+ conn->send_pipe = Curl_llist_alloc((curl_llist_dtor) llist_dtor);
+ conn->recv_pipe = Curl_llist_alloc((curl_llist_dtor) llist_dtor);
+
+ /* Store creation time to help future close decision making */
+ conn->created = Curl_tvnow();
+
+ data->reqdata.use_range = data->set.set_range?TRUE:FALSE; /* range status */
+
+ data->reqdata.range = data->set.set_range; /* clone the range setting */
+ data->reqdata.resume_from = data->set.set_resume_from;
+
+ conn->bits.user_passwd = data->set.userpwd?1:0;
+ conn->bits.proxy_user_passwd = data->set.proxyuserpwd?1:0;
+ conn->bits.no_body = data->set.opt_no_body;
+ conn->bits.tunnel_proxy = data->set.tunnel_thru_httpproxy;
+ conn->bits.ftp_use_epsv = data->set.ftp_use_epsv;
+ conn->bits.ftp_use_eprt = data->set.ftp_use_eprt;
+
+ /* This initing continues below, see the comment "Continue connectdata
+ * initialization here" */
+
+ /***********************************************************
+ * We need to allocate memory to store the path in. We get the size of the
+ * full URL to be sure, and we need to make it at least 256 bytes since
+ * other parts of the code will rely on this fact
+ ***********************************************************/
+#define LEAST_PATH_ALLOC 256
+ urllen=strlen(data->change.url);
+ if(urllen < LEAST_PATH_ALLOC)
+ urllen=LEAST_PATH_ALLOC;
+
+ if (!data->set.source_url /* 3rd party FTP */
+ && data->reqdata.pathbuffer) {
+ /* Free the old buffer */
+ free(data->reqdata.pathbuffer);
+ }
+
+ /*
+ * We malloc() the buffers below urllen+2 to make room for to possibilities:
+ * 1 - an extra terminating zero
+ * 2 - an extra slash (in case a syntax like "www.host.com?moo" is used)
*/
+ data->reqdata.pathbuffer=(char *)malloc(urllen+2);
+ if(NULL == data->reqdata.pathbuffer)
+ return CURLE_OUT_OF_MEMORY; /* really bad error */
+ data->reqdata.path = data->reqdata.pathbuffer;
+
+ conn->host.rawalloc=(char *)malloc(urllen+2);
+ if(NULL == conn->host.rawalloc)
+ return CURLE_OUT_OF_MEMORY;
+
+ conn->host.name = conn->host.rawalloc;
+ conn->host.name[0] = 0;
+
+ result = ParseURLAndFillConnection(data, conn);
+ if (result != CURLE_OK) {
+ return result;
+ }
+
/*************************************************************
* Take care of proxy authentication stuff
*************************************************************/
* server, we just fail since we can't rewind the file writing from within
* this function.
***********************************************************/
- if(conn->resume_from) {
- if(!conn->bits.use_range) {
+ if(data->reqdata.resume_from) {
+ if(!data->reqdata.use_range) {
/* if it already was in use, we just skip this */
- conn->range = aprintf("%" FORMAT_OFF_T "-", conn->resume_from);
- if(!conn->range)
+ data->reqdata.range = aprintf("%" FORMAT_OFF_T "-", data->reqdata.resume_from);
+ if(!data->reqdata.range)
return CURLE_OUT_OF_MEMORY;
- conn->bits.rangestringalloc = TRUE; /* mark as allocated */
- conn->bits.use_range = 1; /* switch on range usage */
+ data->reqdata.rangestringalloc = TRUE; /* mark as allocated */
+ data->reqdata.use_range = 1; /* switch on range usage */
}
}
#endif
conn->port = port;
conn->remote_port = (unsigned short)port;
- conn->protocol |= PROT_FTP;
+ conn->protocol |= PROT_FTP|PROT_CLOSEACTION;
if(data->change.proxy &&
*data->change.proxy &&
conn->curl_disconnect = Curl_ftp_disconnect;
}
- conn->path++; /* don't include the initial slash */
+ data->reqdata.path++; /* don't include the initial slash */
/* FTP URLs support an extension like ";type=<typecode>" that
* we'll try to get now! */
- type=strstr(conn->path, ";type=");
+ type=strstr(data->reqdata.path, ";type=");
if(!type) {
type=strstr(conn->host.rawalloc, ";type=");
}
/* Setup a "faked" transfer that'll do nothing */
if(CURLE_OK == result) {
conn->bits.tcpconnect = TRUE; /* we are "connected */
- result = Curl_Transfer(conn, -1, -1, FALSE, NULL, /* no download */
- -1, NULL); /* no upload */
+ result = Curl_setup_transfer(conn, -1, -1, FALSE, NULL, /* no download */
+ -1, NULL); /* no upload */
}
return result;
conn->curl_done = Curl_tftp_done;
/* TFTP URLs support an extension like ";mode=<typecode>" that
* we'll try to get now! */
- type=strstr(conn->path, ";mode=");
+ type=strstr(data->reqdata.path, ";mode=");
if(!type) {
type=strstr(conn->host.rawalloc, ";mode=");
}
char *url;
url = aprintf("http://%s:%d%s", conn->host.name, conn->remote_port,
- conn->path);
+ data->reqdata.path);
if(!url)
return CURLE_OUT_OF_MEMORY;
conn = conn_temp; /* use this connection from now on */
+ conn->data = old_conn->data;
+
/* get the user+password information from the old_conn struct since it may
* be new for this request even when we re-use an existing connection */
conn->bits.user_passwd = old_conn->bits.user_passwd;
if (!conn->bits.httpproxy)
free(old_conn->host.rawalloc); /* free the newly allocated name buffer */
- free(conn->pathbuffer); /* free the newly allocated path pointer */
- conn->pathbuffer = old_conn->pathbuffer; /* use the old one */
- conn->path = old_conn->path;
-
/* re-use init */
conn->bits.reuse = TRUE; /* yes, we're re-using here */
conn->bits.chunk = FALSE; /* always assume not chunked unless told
otherwise */
- conn->maxdownload = -1; /* might have been used previously! */
Curl_safefree(old_conn->user);
Curl_safefree(old_conn->passwd);
Curl_safefree(old_conn->proxyuser);
Curl_safefree(old_conn->proxypasswd);
-
- if(old_conn->bits.rangestringalloc)
- free(old_conn->range);
+ Curl_llist_destroy(old_conn->send_pipe, NULL);
+ Curl_llist_destroy(old_conn->recv_pipe, NULL);
free(old_conn); /* we don't need this anymore */
* If we're doing a resumed transfer, we need to setup our stuff
* properly.
*/
- conn->resume_from = data->set.set_resume_from;
- if (conn->resume_from) {
- if (conn->bits.rangestringalloc == TRUE)
- free(conn->range);
- conn->range = aprintf("%" FORMAT_OFF_T "-", conn->resume_from);
- if(!conn->range)
+ data->reqdata.resume_from = data->set.set_resume_from;
+ if (data->reqdata.resume_from) {
+ if (data->reqdata.rangestringalloc == TRUE)
+ free(data->reqdata.range);
+ data->reqdata.range = aprintf("%" FORMAT_OFF_T "-",
+ data->reqdata.resume_from);
+ if(!data->reqdata.range)
return CURLE_OUT_OF_MEMORY;
/* tell ourselves to fetch this range */
- conn->bits.use_range = TRUE; /* enable range download */
- conn->bits.rangestringalloc = TRUE; /* mark range string allocated */
+ data->reqdata.use_range = TRUE; /* enable range download */
+ data->reqdata.rangestringalloc = TRUE; /* mark range string allocated */
}
else if (data->set.set_range) {
/* There is a range, but is not a resume, useful for random ftp access */
- conn->range = strdup(data->set.set_range);
- if(!conn->range)
+ data->reqdata.range = strdup(data->set.set_range);
+ if(!data->reqdata.range)
return CURLE_OUT_OF_MEMORY;
- conn->bits.rangestringalloc = TRUE; /* mark range string allocated */
- conn->bits.use_range = TRUE; /* enable range download */
+ data->reqdata.rangestringalloc = TRUE; /* mark range string allocated */
+ data->reqdata.use_range = TRUE; /* enable range download */
}
else
- conn->bits.use_range = FALSE; /* disable range download */
+ data->reqdata.use_range = FALSE; /* disable range download */
*in_connect = conn; /* return this instead! */
ConnectionStore(data, conn);
}
- /* Continue connectdata initialization here.
+ /* Continue connectdata initialization here. */
+
+ /*
*
* Inherit the proper values from the urldata struct AFTER we have arranged
* the persistent connection stuff */
/* we'll need to clear conn->dns_entry later in Curl_disconnect() */
if (conn->bits.httpproxy)
- fix_hostname(conn, &conn->host);
+ fix_hostname(data, conn, &conn->host);
}
else {
/* this is a fresh connect */
/* set a pointer to the hostname we display */
- fix_hostname(conn, &conn->host);
+ fix_hostname(data, conn, &conn->host);
if(!data->change.proxy || !*data->change.proxy) {
/* If not connecting via a proxy, extract the port from the URL, if it is
* there, thus overriding any defaults that might have been set above. */
conn->port = conn->remote_port; /* it is the same port */
- /* Resolve target host right now */
+ /* Resolve target host right on */
rc = Curl_resolv(conn, conn->host.name, (int)conn->port, &hostaddr);
if(rc == CURLRESOLV_PENDING)
*async = TRUE;
/* This is a proxy that hasn't been resolved yet. */
/* IDN-fix the proxy name */
- fix_hostname(conn, &conn->proxy);
+ fix_hostname(data, conn, &conn->proxy);
/* resolve proxy */
rc = Curl_resolv(conn, conn->proxy.name, (int)conn->port, &hostaddr);
*
* NOTE: the argument 'hostaddr' is NULL when this function is called for a
* re-used connection.
+ *
+ * conn->data MUST already have been setup fine (in CreateConnection)
*/
static CURLcode SetupConnection(struct connectdata *conn,
struct Curl_dns_entry *hostaddr,
bool *protocol_done)
{
- struct SessionHandle *data = conn->data;
CURLcode result=CURLE_OK;
+ struct SessionHandle *data = conn->data;
Curl_pgrsTime(data, TIMER_NAMELOOKUP);
}
}
- conn->bytecount = 0;
- conn->headerbytecount = 0;
#ifdef CURL_DO_LINEEND_CONV
data->state.crlf_conversions = 0; /* reset CRLF conversion counter */
#endif /* CURL_DO_LINEEND_CONV */
bool connected = FALSE;
/* Connect only if not already connected! */
- result = ConnectPlease(conn, hostaddr, &connected);
+ result = ConnectPlease(data, conn, hostaddr, &connected);
if(connected) {
result = Curl_protocol_connect(conn, protocol_done);
Curl_disconnect(*in_connect); /* close the connection */
*in_connect = NULL; /* return a NULL */
}
+ } else {
+ if ((*in_connect)->is_in_pipeline)
+ data->state.is_in_pipeline = TRUE;
}
return code;
{
CURLcode result;
struct connectdata *conn = *connp;
- struct SessionHandle *data=conn->data;
+ struct SessionHandle *data = conn->data;
Curl_expire(data, 0); /* stop timer */
conn->bits.done = TRUE; /* called just now! */
/* cleanups done even if the connection is re-used */
- if(conn->bits.rangestringalloc) {
- free(conn->range);
- conn->bits.rangestringalloc = FALSE;
+
+ if(data->reqdata.rangestringalloc) {
+ free(data->reqdata.range);
+ data->reqdata.rangestringalloc = FALSE;
+ }
+
+ /* Cleanup possible redirect junk */
+ if(data->reqdata.newurl) {
+ free(data->reqdata.newurl);
+ data->reqdata.newurl = NULL;
}
if(conn->dns_entry) {
conn->dns_entry = NULL;
}
- /* Cleanup possible redirect junk */
- if(conn->newurl) {
- free(conn->newurl);
- conn->newurl = NULL;
- }
-
/* this calls the protocol-specific function pointer previously set */
if(conn->curl_done)
result = conn->curl_done(conn, status);
infof(data, "Connection #%ld to host %s left intact\n",
conn->connectindex,
conn->bits.httpproxy?conn->proxy.dispname:conn->host.dispname);
+
+ ConnectionDone(conn); /* the connection is no longer in use */
}
return result;
{
CURLcode result=CURLE_OK;
struct connectdata *conn = *connp;
- struct SessionHandle *data=conn->data;
+ struct SessionHandle *data = conn->data;
conn->bits.done = FALSE; /* Curl_done() is not called yet */
conn->bits.do_more = FALSE; /* by default there's no curl_do_more() to use */
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
- * Copyright (C) 1998 - 2005, Daniel Stenberg, <daniel@haxx.se>, et al.
+ * Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
CURLcode Curl_protocol_doing(struct connectdata *conn, bool *done);
void Curl_safefree(void *ptr);
+/* create a connection cache */
+struct conncache *Curl_mk_connc(int type);
+/* free a connection cache */
+void Curl_rm_connc(struct conncache *c);
int Curl_protocol_getsock(struct connectdata *conn,
curl_socket_t *socks,
curl_socket_t *socks,
int numsocks);
+void Curl_addHandleToPipeline(struct SessionHandle *handle,
+ struct curl_llist *pipe);
+void Curl_removeHandleFromPipeline(struct SessionHandle *handle,
+ struct curl_llist *pipe);
+bool Curl_isHandleAtHead(struct SessionHandle *handle,
+ struct curl_llist *pipe);
+void Curl_signalPipeClose(struct curl_llist *pipe);
+
+void Curl_close_connections(struct SessionHandle *data);
+
#if 0
CURLcode Curl_protocol_fdset(struct connectdata *conn,
fd_set *read_fd_set,
FTPFILE_SINGLECWD = 3 /* make one CWD, then SIZE / RETR / STOR on the file */
} curl_ftpfile;
+/* This FTP struct is used in the SessionHandle. All FTP data that is
+ connection-oriented must be in FTP_conn to properly deal with the fact that
+ perhaps the SessionHandle is changed between the times the connection is
+ used. */
struct FTP {
curl_off_t *bytecountp;
char *user; /* user name string */
char *passwd; /* password string */
char *urlpath; /* the originally given path part of the URL */
- char **dirs; /* realloc()ed array for path components */
- int dirdepth; /* number of entries used in the 'dirs' array */
- int diralloc; /* number of entries allocated for the 'dirs' array */
char *file; /* decoded file */
+ bool no_transfer; /* nothing was transfered, (possibly because a resumed
+ transfer already was complete) */
+ curl_off_t downloadsize;
+};
+/* ftp_conn is used for striuct connection-oriented data in the connectdata
+ struct */
+struct ftp_conn {
char *entrypath; /* the PWD reply when we logged on */
-
+ char **dirs; /* realloc()ed array for path components */
+ int dirdepth; /* number of entries used in the 'dirs' array */
+ int diralloc; /* number of entries allocated for the 'dirs' array */
char *cache; /* data cache between getresponse()-calls */
curl_off_t cache_size; /* size of cache in bytes */
bool dont_check; /* Set to TRUE to prevent the final (post-transfer)
file size and 226/250 status check. It should still
read the line, just ignore the result. */
- bool no_transfer; /* nothing was transfered, (possibly because a resumed
- transfer already was complete) */
long response_time; /* When no timeout is given, this is the amount of
seconds we await for an FTP response. Initialized
in Curl_ftp_connect() */
char *prevpath; /* conn->path from the previous transfer */
char transfertype; /* set by ftp_transfertype for use by Curl_client_write()a
and others (A/I or zero) */
-
size_t nread_resp; /* number of bytes currently read of a server response */
char *linestart_resp; /* line start pointer for the FTP server response
reader function */
struct timeval response; /* set to Curl_tvnow() when a command has been sent
off, used to time-out response reading */
ftpstate state; /* always use ftp.c:state() to change state! */
- curl_off_t downloadsize;
};
/****************************************************************************
bool ipv6_ip; /* we communicate with a remote site specified with pure IPv6
IP address */
bool ipv6; /* we communicate with a site using an IPv6 address */
- bool use_range;
- bool rangestringalloc; /* the range string is malloc()'ed */
bool do_more; /* this is set TRUE if the ->curl_do_more() function is
supposed to be called, after ->curl_do() */
when Curl_done() is called, to prevent Curl_done() to
get invoked twice when the multi interface is
used. */
+ bool stream_was_rewound; /* Indicates that the stream was rewound after a request
+ read past the end of its response byte boundary */
};
struct hostname {
char *dispname; /* name to display, as 'name' might be encoded */
};
+/*
+ * Flags on the keepon member of the Curl_transfer_keeper
+ */
+enum {
+ KEEP_NONE,
+ KEEP_READ,
+ KEEP_WRITE
+};
+
+
/*
* This struct is all the previously local variables from Curl_perform() moved
* to struct to allow the function to return and get re-invoked better without
*/
struct Curl_transfer_keeper {
+
+ /** Values copied over from the HandleData struct each time on init **/
+
+ curl_off_t size; /* -1 if unknown at this point */
+ curl_off_t *bytecountp; /* return number of bytes read or NULL */
+
+ curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch, 0
+ means unlimited */
+ curl_off_t *writebytecountp; /* return number of bytes written or NULL */
+
+ /** End of HandleData struct copies **/
+
curl_off_t bytecount; /* total number of bytes read */
curl_off_t writebytecount; /* number of bytes written */
+
+ long headerbytecount; /* only count received headers */
+ long deductheadercount; /* this amount of bytes doesn't count when we check
+ if anything has been transfered at the end of
+ a connection. We use this counter to make only
+ a 100 reply (without a following second response
+ code) result in a CURLE_GOT_NOTHING error code */
+
struct timeval start; /* transfer started at this time */
struct timeval now; /* current time */
bool header; /* incoming data has HTTP header */
typedef CURLcode (*Curl_do_more_func)(struct connectdata *);
typedef CURLcode (*Curl_done_func)(struct connectdata *, CURLcode);
+
+/*
+ * Store's request specific data in the easy handle (SessionHandle).
+ * Previously, these members were on the connectdata struct but since
+ * a conn struct may now be shared between different SessionHandles,
+ * we store connection-specifc data here.
+ *
+ */
+struct HandleData {
+ char *pathbuffer;/* allocated buffer to store the URL's path part in */
+ char *path; /* path to use, points to somewhere within the pathbuffer
+ area */
+
+ char *newurl; /* This can only be set if a Location: was in the
+ document headers */
+
+ /* This struct is inited when needed */
+ struct Curl_transfer_keeper keep;
+
+ /* 'upload_present' is used to keep a byte counter of how much data there is
+ still left in the buffer, aimed for upload. */
+ ssize_t upload_present;
+
+ /* 'upload_fromhere' is used as a read-pointer when we uploaded parts of a
+ buffer, so the next read should read from where this pointer points to,
+ and the 'upload_present' contains the number of bytes available at this
+ position */
+ char *upload_fromhere;
+
+ curl_off_t size; /* -1 if unknown at this point */
+ curl_off_t *bytecountp; /* return number of bytes read or NULL */
+
+ curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch, 0
+ means unlimited */
+ curl_off_t *writebytecountp; /* return number of bytes written or NULL */
+
+ bool use_range;
+ bool rangestringalloc; /* the range string is malloc()'ed */
+
+ char *range; /* range, if used. See README for detailed specification on
+ this syntax. */
+ curl_off_t resume_from; /* continue [ftp] transfer from here */
+
+ /* Protocol specific data */
+
+ union {
+ struct HTTP *http;
+ struct HTTP *https; /* alias, just for the sake of being more readable */
+ struct FTP *ftp;
+ void *tftp; /* private for tftp.c-eyes only */
+ struct FILEPROTO *file;
+ void *telnet; /* private for telnet.c-eyes only */
+ void *generic;
+ } proto;
+};
+
/*
* The connectdata struct contains all fields and variables that should be
* unique for an entire connection.
*/
struct connectdata {
- /**** Fields set when inited and not modified again */
- struct SessionHandle *data; /* link to the root CURL struct */
- long connectindex; /* what index in the connects index this particular
- struct has */
+ /* 'data' is the CURRENT SessionHandle using this connection -- take great
+ caution that this might very well vary between different times this
+ connection is used! */
+ struct SessionHandle *data;
+
+ bool inuse; /* This is a marker for the connection cache logic. If this is
+ TRUE this handle is being used by an easy handle and cannot
+ be used by any other easy handle without careful
+ consideration (== only for pipelining). */
+ /**** Fields set when inited and not modified again */
+ long connectindex; /* what index in the connection cache connects index this
+ particular struct has */
long protocol; /* PROT_* flags concerning the protocol set */
#define PROT_MISSING (1<<0)
+#define PROT_CLOSEACTION (1<<1) /* needs action before socket close */
#define PROT_HTTP (1<<2)
#define PROT_HTTPS (1<<3)
#define PROT_FTP (1<<4)
#define PROT_DICT (1<<6)
#define PROT_LDAP (1<<7)
#define PROT_FILE (1<<8)
-#define PROT_TFTP (1<<11)
#define PROT_FTPS (1<<9)
#define PROT_SSL (1<<10) /* protocol requires SSL */
+#define PROT_TFTP (1<<11)
/* 'dns_entry' is the particular host we use. This points to an entry in the
DNS cache and it will not get pruned while locked. It gets unlocked in
struct hostname host;
struct hostname proxy;
- char *pathbuffer;/* allocated buffer to store the URL's path part in */
- char *path; /* path to use, points to somewhere within the pathbuffer
- area */
long port; /* which port to use locally */
unsigned short remote_port; /* what remote port to connect to,
not the proxy port! */
- curl_off_t bytecount;
- long headerbytecount; /* only count received headers */
- long deductheadercount; /* this amount of bytes doesn't count when we check
- if anything has been transfered at the end of
- a connection. We use this counter to make only
- a 100 reply (without a following second response
- code) result in a CURLE_GOT_NOTHING error code */
-
- char *range; /* range, if used. See README for detailed specification on
- this syntax. */
- curl_off_t resume_from; /* continue [ftp] transfer from here */
char *user; /* user name string, allocated */
char *passwd; /* password string, allocated */
struct timeval created; /* creation time */
curl_socket_t sock[2]; /* two sockets, the second is used for the data
transfer when doing FTP */
- curl_off_t maxdownload; /* in bytes, the maximum amount of data to fetch, 0
- means unlimited */
struct ssl_connect_data ssl[2]; /* this is for ssl-stuff */
struct ssl_config_data ssl_config;
/**** curl_get() phase fields */
- /* READ stuff */
curl_socket_t sockfd; /* socket to read from or CURL_SOCKET_BAD */
- curl_off_t size; /* -1 if unknown at this point */
- curl_off_t *bytecountp; /* return number of bytes read or NULL */
-
- /* WRITE stuff */
curl_socket_t writesockfd; /* socket to write to, it may very
well be the same we read from.
CURL_SOCKET_BAD disables */
- curl_off_t *writebytecountp; /* return number of bytes written or NULL */
/** Dynamicly allocated strings, may need to be freed before this **/
/** struct is killed. **/
char *cookiehost; /* free later if not NULL */
} allocptr;
- char *newurl; /* This can only be set if a Location: was in the
- document headers */
-
int sec_complete; /* if krb4 is enabled for this connection */
#ifdef HAVE_KRB4
enum protection_level command_prot;
struct sockaddr_in local_addr;
#endif
- /*************** Request - specific items ************/
- /* previously this was in the urldata struct */
- union {
- struct HTTP *http;
- struct HTTP *https; /* alias, just for the sake of being more readable */
- struct FTP *ftp;
- void *tftp; /* private for tftp.c-eyes only */
- struct FILEPROTO *file;
- void *telnet; /* private for telnet.c-eyes only */
- void *generic;
- } proto;
+ bool readchannel_inuse; /* whether the read channel is in use by an easy handle */
+ bool writechannel_inuse; /* whether the write channel is in use by an easy handle */
+ bool is_in_pipeline; /* TRUE if this connection is in a pipeline */
- /* This struct is inited when needed */
- struct Curl_transfer_keeper keep;
+ struct curl_llist *send_pipe; /* List of handles waiting to
+ send on this pipeline */
+ struct curl_llist *recv_pipe; /* List of handles waiting to read
+ their responses on this pipeline */
- /* 'upload_present' is used to keep a byte counter of how much data there is
- still left in the buffer, aimed for upload. */
- ssize_t upload_present;
+ char master_buffer[BUFSIZE]; /* The master buffer for this connection. */
+ size_t read_pos;
+ size_t buf_len;
- /* 'upload_fromhere' is used as a read-pointer when we uploaded parts of a
- buffer, so the next read should read from where this pointer points to,
- and the 'upload_present' contains the number of bytes available at this
- position */
- char *upload_fromhere;
+ /*************** Request - specific items ************/
+
+ /* previously this was in the urldata struct */
curl_read_callback fread; /* function that reads the input */
void *fread_in; /* pointer to pass to the fread() above */
/* data used for the asynch name resolve callback */
struct Curl_async async;
#endif
+
struct connectdata *sec_conn; /* secondary connection for 3rd party
transfer */
+ char *sec_path; /* The source path for FTP 3rd party */
+ char *sec_pathbuffer;
enum { NORMAL, SOURCE3RD, TARGET3RD } xfertype;
int trlMax; /* allocated buffer size */
int trlPos; /* index of where to store data */
+ union {
+ struct ftp_conn ftpc;
+ } proto;
};
/* The end of connectdata. */
};
+struct conncache {
+ /* 'connects' will be an allocated array with pointers. If the pointer is
+ set, it holds an allocated connection. */
+ struct connectdata **connects;
+ long num; /* size of the 'connects' array */
+ enum {
+ CONNCACHE_PRIVATE, /* used for an easy handle alone */
+ CONNCACHE_MULTI /* shared within a multi handle */
+ } type;
+};
+
+
struct UrlState {
enum {
Curl_if_none,
Curl_if_multi
} used_interface;
+ struct conncache *connc; /* points to the connection cache this handle
+ uses */
+
/* buffers to store authentication data in, as parsed from input options */
struct timeval keeps_speed; /* for the progress meter really */
- /* 'connects' will be an allocated array with pointers. If the pointer is
- set, it holds an allocated connection. */
- struct connectdata **connects;
- long numconnects; /* size of the 'connects' array */
long lastconnect; /* index of most recent connect or -1 if undefined */
char *headerbuff; /* allocated buffer to store headers in */
bytes / second */
bool this_is_a_follow; /* this is a followed Location: request */
+ bool is_in_pipeline; /* Indicates whether this handle is part of a pipeline */
+
char *first_host; /* if set, this should be the host name that we will
sent authorization to, no else. Used to make Location:
following not keep sending user+password... This is
struct auth authproxy;
bool authproblem; /* TRUE if there's some problem authenticating */
+
#ifdef USE_ARES
ares_channel areschannel; /* for name resolves */
#endif
bool expect100header; /* TRUE if we added Expect: 100-continue */
+ bool pipe_broke; /* TRUE if the connection we were pipelined on broke
+ and we need to restart from the beginning */
+ bool cancelled; /* TRUE if the request was cancelled */
+
#ifndef WIN32
/* do FTP line-end conversions on most platforms */
#define CURL_DO_LINEEND_CONV
/* for FTP downloads: how many CRLFs did we converted to LFs? */
curl_off_t crlf_conversions;
#endif
+ /* If set to non-NULL, there's a connection in a shared connection cache
+ that uses this handle so we can't kill this SessionHandle just yet but
+ must keep it around and add it to the list of handles to kill once all
+ its connections are gone */
+ void *shared_conn;
};
struct SessionHandle {
struct Names dns;
struct Curl_multi *multi; /* if non-NULL, points to the multi handle
- struct of which this "belongs" */
+ struct to which this "belongs" */
struct Curl_share *share; /* Share, handles global variable mutexing */
+ struct HandleData reqdata; /* Request-specific data */
struct UserDefined set; /* values set by the libcurl user */
struct DynamicStatic change; /* possibly modified userdefined data */
test250 test251 test252 test253 test254 test255 test521 test522 test523 \
test256 test257 test258 test259 test260 test261 test262 test263 test264 \
test265 test266 test267 test268 test269 test270 test271 test272 test273 \
- test274 test275 test524 test525 test276 test277
+ test274 test275 test524 test525 test276 test277 test526 test527 test528
--- /dev/null
+<info>
+<keywords>
+FTP
+PASV
+RETR
+</keywords>
+</info>
+# Server-side
+<reply>
+<data>
+file contents should appear once for each file
+</data>
+<datacheck>
+file contents should appear once for each file
+file contents should appear once for each file
+file contents should appear once for each file
+file contents should appear once for each file
+</datacheck>
+</reply>
+
+# Client-side
+<client>
+<server>
+ftp
+</server>
+<tool>
+lib526
+</tool>
+ <name>
+FTP RETR same file using different handles but same connection
+ </name>
+ <command>
+ftp://%HOSTIP:%FTPPORT/path/526
+</command>
+</client>
+
+# Verify data after the test has been "shot"
+<verify>
+<strip>
+</strip>
+<protocol>
+USER anonymous\r
+PASS curl_by_daniel@haxx.se\r
+PWD\r
+CWD path\r
+EPSV\r
+TYPE I\r
+SIZE 526\r
+RETR 526\r
+EPSV\r
+SIZE 526\r
+RETR 526\r
+EPSV\r
+SIZE 526\r
+RETR 526\r
+EPSV\r
+SIZE 526\r
+RETR 526\r
+QUIT\r
+</protocol>
+</verify>
--- /dev/null
+<info>
+<keywords>
+FTP
+PASV
+RETR
+</keywords>
+</info>
+# Server-side
+<reply>
+<data>
+file contents should appear once for each file
+</data>
+<datacheck>
+file contents should appear once for each file
+file contents should appear once for each file
+file contents should appear once for each file
+file contents should appear once for each file
+</datacheck>
+</reply>
+
+# Client-side
+<client>
+<server>
+ftp
+</server>
+<tool>
+lib527
+</tool>
+ <name>
+FTP RETR same file using different handles but same connection
+ </name>
+ <command>
+ftp://%HOSTIP:%FTPPORT/path/527
+</command>
+</client>
+
+# Verify data after the test has been "shot"
+<verify>
+<strip>
+</strip>
+<protocol>
+USER anonymous\r
+PASS curl_by_daniel@haxx.se\r
+PWD\r
+CWD path\r
+EPSV\r
+TYPE I\r
+SIZE 527\r
+RETR 527\r
+EPSV\r
+SIZE 527\r
+RETR 527\r
+EPSV\r
+SIZE 527\r
+RETR 527\r
+EPSV\r
+SIZE 527\r
+RETR 527\r
+QUIT\r
+</protocol>
+</verify>
--- /dev/null
+<info>
+<keywords>
+HTTP
+</keywords>
+</info>
+# Server-side
+<reply>
+<data>
+HTTP/1.1 200 OK
+Date: Thu, 09 Nov 2010 14:49:00 GMT
+Server: test-server/fake
+Content-Length: 47
+
+file contents should appear once for each file
+</data>
+<datacheck>
+file contents should appear once for each file
+file contents should appear once for each file
+file contents should appear once for each file
+file contents should appear once for each file
+</datacheck>
+</reply>
+
+# Client-side
+<client>
+<server>
+http
+</server>
+<tool>
+lib526
+</tool>
+ <name>
+HTTP GET same file using different handles but same connection
+ </name>
+ <command>
+http://%HOSTIP:%HTTPPORT/path/528
+</command>
+</client>
+
+# Verify data after the test has been "shot"
+<verify>
+<strip>
+</strip>
+<protocol>
+GET /path/528 HTTP/1.1\r
+Host: %HOSTIP:%HTTPPORT\r
+Accept: */*\r
+\r
+GET /path/528 HTTP/1.1\r
+Host: %HOSTIP:%HTTPPORT\r
+Accept: */*\r
+\r
+GET /path/528 HTTP/1.1\r
+Host: %HOSTIP:%HTTPPORT\r
+Accept: */*\r
+\r
+GET /path/528 HTTP/1.1\r
+Host: %HOSTIP:%HTTPPORT\r
+Accept: */*\r
+\r
+</protocol>
+</verify>
# These are all libcurl test programs
noinst_PROGRAMS = lib500 lib501 lib502 lib503 lib504 lib505 lib506 lib507 \
lib508 lib509 lib510 lib511 lib512 lib513 lib514 lib515 lib516 lib517 \
- lib518 lib519 lib520 lib521 lib523 lib524 lib525
+ lib518 lib519 lib520 lib521 lib523 lib524 lib525 lib526 lib527
lib500_SOURCES = lib500.c $(SUPPORTFILES)
lib500_LDADD = $(LIBDIR)/libcurl.la
lib525_SOURCES = lib525.c $(SUPPORTFILES)
lib525_LDADD = $(LIBDIR)/libcurl.la
lib525_DEPENDENCIES = $(LIBDIR)/libcurl.la
+
+lib526_SOURCES = lib526.c $(SUPPORTFILES)
+lib526_LDADD = $(LIBDIR)/libcurl.la
+lib526_DEPENDENCIES = $(LIBDIR)/libcurl.la
+
+lib527_SOURCES = lib526.c $(SUPPORTFILES)
+lib527_CFLAGS = -DLIB527
+lib527_LDADD = $(LIBDIR)/libcurl.la
+lib527_DEPENDENCIES = $(LIBDIR)/libcurl.la
+
--- /dev/null
+/*****************************************************************************
+ * _ _ ____ _
+ * Project ___| | | | _ \| |
+ * / __| | | | |_) | |
+ * | (__| |_| | _ <| |___
+ * \___|\___/|_| \_\_____|
+ *
+ * $Id$
+ */
+
+/*
+ * This code sets up multiple easy handles that transfer a single file from
+ * the same URL, in a serial manner after each other. Due to the connection
+ * sharing within the multi handle all transfers are performed on the same
+ * persistent connection.
+ *
+ * This source code is used for lib526 _and_ lib527 with only #ifdefs
+ * controlling the small differences. lib526 closes all easy handles after
+ * they all have transfered the file over the single connection, while lib527
+ * closes each easy handle after each single transfer. 526 and 527 use FTP,
+ * while 528 uses the lib526 tool but use HTTP.
+ */
+
+#include "test.h"
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#define NUM_HANDLES 4
+
+int test(char *URL)
+{
+ int res = 0;
+ CURL *curl[NUM_HANDLES];
+ int running;
+ char done=FALSE;
+ CURLM *m;
+ int current=0;
+ int i;
+
+ /* In windows, this will init the winsock stuff */
+ curl_global_init(CURL_GLOBAL_ALL);
+
+ /* get NUM_HANDLES easy handles */
+ for(i=0; i < NUM_HANDLES; i++) {
+ curl[i] = curl_easy_init();
+ if(!curl[i])
+ return 100 + i; /* major bad */
+ curl_easy_setopt(curl[i], CURLOPT_URL, URL);
+
+ /* go verbose */
+ curl_easy_setopt(curl[i], CURLOPT_VERBOSE, 1);
+ }
+
+ m = curl_multi_init();
+
+ res = (int)curl_multi_add_handle(m, curl[current]);
+
+ fprintf(stderr, "Start at URL 0\n");
+
+ while(!done) {
+ fd_set rd, wr, exc;
+ int max_fd;
+ struct timeval interval;
+
+ interval.tv_sec = 1;
+ interval.tv_usec = 0;
+
+ while (res == CURLM_CALL_MULTI_PERFORM) {
+ res = (int)curl_multi_perform(m, &running);
+ if (running <= 0) {
+#ifdef LIB527
+ curl_easy_cleanup(curl[current]);
+#endif
+ if(++current < NUM_HANDLES) {
+ fprintf(stderr, "Advancing to URL %d\n", current);
+ res = (int)curl_multi_add_handle(m, curl[current]);
+ if(res) {
+ fprintf(stderr, "add handle failed: %d.\n", res);
+ res = 243;
+ break;
+ }
+ }
+ else
+ done = TRUE; /* bail out */
+ break;
+ }
+ }
+ if(done)
+ break;
+
+ if (res != CURLM_OK) {
+ fprintf(stderr, "not okay???\n");
+ break;
+ }
+
+ FD_ZERO(&rd);
+ FD_ZERO(&wr);
+ FD_ZERO(&exc);
+ max_fd = 0;
+
+ if (curl_multi_fdset(m, &rd, &wr, &exc, &max_fd) != CURLM_OK) {
+ fprintf(stderr, "unexpected failured of fdset.\n");
+ res = 189;
+ break;
+ }
+
+ if (select(max_fd+1, &rd, &wr, &exc, &interval) == -1) {
+ fprintf(stderr, "bad select??\n");
+ res = 195;
+ break;
+ }
+
+ res = CURLM_CALL_MULTI_PERFORM;
+ }
+
+#ifndef LIB527
+ /* get NUM_HANDLES easy handles */
+ for(i=0; i < NUM_HANDLES; i++) {
+ curl_multi_remove_handle(m, curl[i]);
+ curl_easy_cleanup(curl[i]);
+ }
+#endif
+ curl_multi_cleanup(m);
+
+ curl_global_cleanup();
+ return res;
+}