doom3-gpl
Doom 3 GPL source release
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
transfer.c
Go to the documentation of this file.
1 /***************************************************************************
2  * _ _ ____ _
3  * Project ___| | | | _ \| |
4  * / __| | | | |_) | |
5  * | (__| |_| | _ <| |___
6  * \___|\___/|_| \_\_____|
7  *
8  * Copyright (C) 1998 - 2004, Daniel Stenberg, <daniel@haxx.se>, et al.
9  *
10  * This software is licensed as described in the file COPYING, which
11  * you should have received as part of this distribution. The terms
12  * are also available at http://curl.haxx.se/docs/copyright.html.
13  *
14  * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15  * copies of the Software, and permit persons to whom the Software is
16  * furnished to do so, under the terms of the COPYING file.
17  *
18  * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19  * KIND, either express or implied.
20  *
21  * $Id: transfer.c,v 1.212 2004/03/16 09:16:38 bagder Exp $
22  ***************************************************************************/
23 
24 #include "setup.h"
25 
26 /* -- WIN32 approved -- */
27 #include <stdio.h>
28 #include <string.h>
29 #include <stdarg.h>
30 #include <stdlib.h>
31 #include <ctype.h>
32 #ifdef HAVE_SYS_TYPES_H
33 #include <sys/types.h>
34 #endif
35 #include <sys/stat.h>
36 
37 #include <errno.h>
38 
39 #include "strtoofft.h"
40 #include "strequal.h"
41 
42 #if defined(WIN32) && !defined(__GNUC__) || defined(__MINGW32__)
43 #include <time.h>
44 #include <io.h>
45 #else
46 #ifdef HAVE_SYS_SOCKET_H
47 #include <sys/socket.h>
48 #endif
49 #ifdef HAVE_NETINET_IN_H
50 #include <netinet/in.h>
51 #endif
52 #include <sys/time.h>
53 #ifdef HAVE_UNISTD_H
54 #include <unistd.h>
55 #endif
56 #include <netdb.h>
57 #ifdef HAVE_ARPA_INET_H
58 #include <arpa/inet.h>
59 #endif
60 #ifdef HAVE_NET_IF_H
61 #include <net/if.h>
62 #endif
63 #ifdef HAVE_SYS_IOCTL_H
64 #include <sys/ioctl.h>
65 #endif
66 #include <signal.h>
67 
68 #ifdef HAVE_SYS_PARAM_H
69 #include <sys/param.h>
70 #endif
71 
72 #ifdef HAVE_SYS_SELECT_H
73 #include <sys/select.h>
74 #endif
75 
76 #ifndef HAVE_SELECT
77 #error "We can't compile without select() support!"
78 #endif
79 #ifndef HAVE_SOCKET
80 #error "We can't compile without socket() support!"
81 #endif
82 
83 #endif
84 
85 #include "urldata.h"
86 #include <curl/curl.h>
87 #include <curl/types.h>
88 #include "netrc.h"
89 
90 #include "content_encoding.h"
91 #include "hostip.h"
92 #include "transfer.h"
93 #include "sendf.h"
94 #include "speedcheck.h"
95 #include "progress.h"
96 #include "getdate.h"
97 #include "http.h"
98 #include "url.h"
99 #include "getinfo.h"
100 #include "ssluse.h"
101 #include "http_digest.h"
102 #include "http_ntlm.h"
103 #include "http_negotiate.h"
104 #include "share.h"
105 
106 #define _MPRINTF_REPLACE /* use our functions only */
107 #include <curl/mprintf.h>
108 
109 /* The last #include file should be: */
110 #ifdef CURLDEBUG
111 #include "memdebug.h"
112 #endif
113 
114 #define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
115 
116 enum {
120 };
121 
122 /* We keep this static and global since this is read-only and NEVER
123  changed. It should just remain a blanked-out timeout value. */
124 static struct timeval notimeout={0,0};
125 
126 /*
127  * This function will call the read callback to fill our buffer with data
128  * to upload.
129  */
130 static int fillbuffer(struct connectdata *conn,
131  int bytes)
132 {
133  int buffersize = bytes;
134  int nread;
135 
136  if(conn->bits.upload_chunky) {
137  /* if chunked Transfer-Encoding */
138  buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
139  conn->upload_fromhere += 10; /* 32bit hex + CRLF */
140  }
141 
142  nread = conn->fread(conn->upload_fromhere, 1,
143  buffersize, conn->fread_in);
144 
145  if(!conn->bits.forbidchunk && conn->bits.upload_chunky) {
146  /* if chunked Transfer-Encoding */
147  char hexbuffer[11];
148  int hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
149  "%x\r\n", nread);
150  /* move buffer pointer */
151  conn->upload_fromhere -= hexlen;
152  nread += hexlen;
153 
154  /* copy the prefix to the buffer */
155  memcpy(conn->upload_fromhere, hexbuffer, hexlen);
156 
157  /* always append CRLF to the data */
158  memcpy(conn->upload_fromhere + nread, "\r\n", 2);
159 
160  if((nread - hexlen) == 0) {
161  /* mark this as done once this chunk is transfered */
162  conn->keep.upload_done = TRUE;
163  }
164 
165  nread+=2; /* for the added CRLF */
166  }
167  return nread;
168 }
169 
170 /*
171  * checkhttpprefix()
172  *
173  * Returns TRUE if member of the list matches prefix of string
174  */
175 static bool
176 checkhttpprefix(struct SessionHandle *data,
177  const char *s)
178 {
179  struct curl_slist *head = data->set.http200aliases;
180 
181  while (head) {
182  if (checkprefix(head->data, s))
183  return TRUE;
184  head = head->next;
185  }
186 
187  if(checkprefix("HTTP/", s))
188  return TRUE;
189 
190  return FALSE;
191 }
192 
194  bool *done)
195 {
196  struct Curl_transfer_keeper *k = &conn->keep;
197  struct SessionHandle *data = conn->data;
199  ssize_t nread; /* number of bytes read */
200  int didwhat=0;
201 
202  /* These two are used only if no other select() or _fdset() have been
203  invoked before this. This typicly happens if you use the multi interface
204  and call curl_multi_perform() without calling curl_multi_fdset()
205  first. */
206  fd_set extrareadfd;
207  fd_set extrawritefd;
208 
209  fd_set *readfdp = k->readfdp;
210  fd_set *writefdp = k->writefdp;
211  curl_off_t contentlength;
212 
213  if((k->keepon & KEEP_READ) && !readfdp) {
214  /* reading is requested, but no socket descriptor pointer was set */
215  FD_ZERO(&extrareadfd);
216  FD_SET(conn->sockfd, &extrareadfd);
217  readfdp = &extrareadfd;
218 
219  /* no write, no exceptions, no timeout */
220  select(conn->sockfd+1, readfdp, NULL, NULL, &notimeout);
221  }
222  if((k->keepon & KEEP_WRITE) && !writefdp) {
223  /* writing is requested, but no socket descriptor pointer was set */
224  FD_ZERO(&extrawritefd);
225  FD_SET(conn->writesockfd, &extrawritefd);
226  writefdp = &extrawritefd;
227 
228  /* no read, no exceptions, no timeout */
229  select(conn->writesockfd+1, NULL, writefdp, NULL, &notimeout);
230  }
231 
232  do {
233  /* If we still have reading to do, we check if we have a readable
234  socket. Sometimes the reafdp is NULL, if no fd_set was done using
235  the multi interface and then we can do nothing but to attempt a
236  read to be sure. */
237  if((k->keepon & KEEP_READ) &&
238  (!readfdp || FD_ISSET(conn->sockfd, readfdp))) {
239 
240  bool readdone = TRUE;
241 
242  /* This is where we loop until we have read everything there is to
243  read or we get a EWOULDBLOCK */
244  do {
245  int buffersize = data->set.buffer_size?
246  data->set.buffer_size:BUFSIZE -1;
247 
248  /* receive data from the network! */
249  int readrc = Curl_read(conn, conn->sockfd, k->buf, buffersize, &nread);
250 
251  /* subzero, this would've blocked */
252  if(0>readrc)
253  break; /* get out of loop */
254 
255  /* get the CURLcode from the int */
256  result = (CURLcode)readrc;
257 
258  if(result>0)
259  return result;
260 
261  if ((k->bytecount == 0) && (k->writebytecount == 0)) {
263  if(k->wait100_after_headers)
264  /* set time stamp to compare with when waiting for the 100 */
265  k->start100 = Curl_tvnow();
266  }
267 
268  didwhat |= KEEP_READ;
269 
270  /* NULL terminate, allowing string ops to be used */
271  if (0 < nread)
272  k->buf[nread] = 0;
273 
274  /* if we receive 0 or less here, the server closed the connection and
275  we bail out from this! */
276  else if (0 >= nread) {
277  k->keepon &= ~KEEP_READ;
278  FD_ZERO(&k->rkeepfd);
279  readdone = TRUE;
280  break;
281  }
282 
283  /* Default buffer to use when we write the buffer, it may be changed
284  in the flow below before the actual storing is done. */
285  k->str = k->buf;
286 
287  /* Since this is a two-state thing, we check if we are parsing
288  headers at the moment or not. */
289  if (k->header) {
290  /* we are in parse-the-header-mode */
291  bool stop_reading = FALSE;
292 
293  /* header line within buffer loop */
294  do {
295  int hbufp_index;
296  int rest_length;
297  int full_length;
298  int writetype;
299 
300  /* str_start is start of line within buf */
301  k->str_start = k->str;
302 
303  k->end_ptr = strchr (k->str_start, '\n');
304 
305  if (!k->end_ptr) {
306  /* Not a complete header line within buffer, append the data to
307  the end of the headerbuff. */
308 
309  if (k->hbuflen + nread >= data->state.headersize) {
310  /* We enlarge the header buffer as it is too small */
311  char *newbuff;
312  long newsize=CURLMAX((k->hbuflen+nread)*3/2,
313  data->state.headersize*2);
314  hbufp_index = k->hbufp - data->state.headerbuff;
315  newbuff = (char *)realloc(data->state.headerbuff, newsize);
316  if(!newbuff) {
317  failf (data, "Failed to alloc memory for big header!");
318  return CURLE_OUT_OF_MEMORY;
319  }
320  data->state.headersize=newsize;
321  data->state.headerbuff = newbuff;
322  k->hbufp = data->state.headerbuff + hbufp_index;
323  }
324  memcpy(k->hbufp, k->str, nread);
325  k->hbufp += nread;
326  k->hbuflen += nread;
327  if (!k->headerline && (k->hbuflen>5)) {
328  /* make a first check that this looks like a HTTP header */
329  if(!checkhttpprefix(data, data->state.headerbuff)) {
330  /* this is not the beginning of a HTTP first header line */
331  k->header = FALSE;
332  k->badheader = HEADER_ALLBAD;
333  break;
334  }
335  }
336 
337  break; /* read more and try again */
338  }
339 
340  /* decrease the size of the remaining (supposed) header line */
341  rest_length = (k->end_ptr - k->str)+1;
342  nread -= rest_length;
343 
344  k->str = k->end_ptr + 1; /* move past new line */
345 
346  full_length = k->str - k->str_start;
347 
348  /*
349  * We're about to copy a chunk of data to the end of the
350  * already received header. We make sure that the full string
351  * fit in the allocated header buffer, or else we enlarge
352  * it.
353  */
354  if (k->hbuflen + full_length >=
355  data->state.headersize) {
356  char *newbuff;
357  long newsize=CURLMAX((k->hbuflen+full_length)*3/2,
358  data->state.headersize*2);
359  hbufp_index = k->hbufp - data->state.headerbuff;
360  newbuff = (char *)realloc(data->state.headerbuff, newsize);
361  if(!newbuff) {
362  failf (data, "Failed to alloc memory for big header!");
363  return CURLE_OUT_OF_MEMORY;
364  }
365  data->state.headersize= newsize;
366  data->state.headerbuff = newbuff;
367  k->hbufp = data->state.headerbuff + hbufp_index;
368  }
369 
370  /* copy to end of line */
371  strncpy (k->hbufp, k->str_start, full_length);
372  k->hbufp += full_length;
373  k->hbuflen += full_length;
374  *k->hbufp = 0;
375  k->end_ptr = k->hbufp;
376 
377  k->p = data->state.headerbuff;
378 
379  /****
380  * We now have a FULL header line that p points to
381  *****/
382 
383  if(!k->headerline) {
384  /* the first read header */
385  if((k->hbuflen>5) &&
386  !checkhttpprefix(data, data->state.headerbuff)) {
387  /* this is not the beginning of a HTTP first header line */
388  k->header = FALSE;
389  if(nread)
390  /* since there's more, this is a partial bad header */
391  k->badheader = HEADER_PARTHEADER;
392  else {
393  /* this was all we read so its all a bad header */
394  k->badheader = HEADER_ALLBAD;
395  nread = rest_length;
396  }
397  break;
398  }
399  }
400 
401  if (('\n' == *k->p) || ('\r' == *k->p)) {
402  int headerlen;
403  /* Zero-length header line means end of headers! */
404 
405  if ('\r' == *k->p)
406  k->p++; /* pass the \r byte */
407  if ('\n' == *k->p)
408  k->p++; /* pass the \n byte */
409 
410  if(100 == k->httpcode) {
411  /*
412  * We have made a HTTP PUT or POST and this is 1.1-lingo
413  * that tells us that the server is OK with this and ready
414  * to receive the data.
415  * However, we'll get more headers now so we must get
416  * back into the header-parsing state!
417  */
418  k->header = TRUE;
419  k->headerline = 0; /* restart the header line counter */
420  /* if we did wait for this do enable write now! */
421  if (k->write_after_100_header) {
422 
424  FD_SET (conn->writesockfd, &k->writefd); /* write */
425  k->keepon |= KEEP_WRITE;
426  k->wkeepfd = k->writefd;
427  }
428  }
429  else
430  k->header = FALSE; /* no more header to parse! */
431 
432  if (417 == k->httpcode) {
433  /*
434  * we got: "417 Expectation Failed" this means:
435  * we have made a HTTP call and our Expect Header
436  * seems to cause a problem => abort the write operations
437  * (or prevent them from starting).
438  */
440  k->keepon &= ~KEEP_WRITE;
441  FD_ZERO(&k->wkeepfd);
442  }
443 
444  /* now, only output this if the header AND body are requested:
445  */
446  writetype = CLIENTWRITE_HEADER;
447  if (data->set.http_include_header)
448  writetype |= CLIENTWRITE_BODY;
449 
450  headerlen = k->p - data->state.headerbuff;
451 
452  result = Curl_client_write(data, writetype,
453  data->state.headerbuff,
454  headerlen);
455  if(result)
456  return result;
457 
458  data->info.header_size += headerlen;
459  conn->headerbytecount += headerlen;
460 
461  if (conn->resume_from &&
462  !k->content_range &&
463  (data->set.httpreq==HTTPREQ_GET)) {
464  if(k->httpcode == 416) {
465  /* "Requested Range Not Satisfiable" */
466  stop_reading = TRUE;
467  }
468  else {
469  /* we wanted to resume a download, although the server
470  doesn't seem to support this and we did this with a GET
471  (if it wasn't a GET we did a POST or PUT resume) */
472  failf (data, "HTTP server doesn't seem to support "
473  "byte ranges. Cannot resume.");
474  return CURLE_HTTP_RANGE_ERROR;
475  }
476  }
477 
478  if(!stop_reading)
479  /* *auth_act() checks what authentication methods that are
480  available and decides which one (if any) to use. It will
481  set 'newurl' if an auth metod was picked. */
482  Curl_http_auth_act(conn);
483 
484  if(!k->header) {
485  /*
486  * really end-of-headers.
487  *
488  * If we requested a "no body", this is a good time to get
489  * out and return home.
490  */
491  if(data->set.no_body)
492  stop_reading = TRUE;
493  else {
494  /* If we know the expected size of this document, we set the
495  maximum download size to the size of the expected
496  document or else, we won't know when to stop reading!
497 
498  Note that we set the download maximum even if we read a
499  "Connection: close" header, to make sure that
500  "Content-Length: 0" still prevents us from attempting to
501  read the (missing) response-body.
502  */
503  /* According to RFC2616 section 4.4, we MUST ignore
504  Content-Length: headers if we are now receiving data
505  using chunked Transfer-Encoding.
506  */
507  if(conn->bits.chunk)
508  conn->size=-1;
509 
510  if(-1 != conn->size) {
511  Curl_pgrsSetDownloadSize(data, conn->size);
512  conn->maxdownload = conn->size;
513  }
514  }
515  /* If max download size is *zero* (nothing) we already
516  have nothing and can safely return ok now! */
517  if(0 == conn->maxdownload)
518  stop_reading = TRUE;
519 
520  if(stop_reading) {
521  /* we make sure that this socket isn't read more now */
522  k->keepon &= ~KEEP_READ;
523  FD_ZERO(&k->rkeepfd);
524  }
525 
526  break; /* exit header line loop */
527  }
528 
529  /* We continue reading headers, so reset the line-based
530  header parsing variables hbufp && hbuflen */
531  k->hbufp = data->state.headerbuff;
532  k->hbuflen = 0;
533  continue;
534  }
535 
536  /*
537  * Checks for special headers coming up.
538  */
539 
540  if (!k->headerline++) {
541  /* This is the first header, it MUST be the error code line
542  or else we consiser this to be the body right away! */
543  int httpversion_major;
544  int nc=sscanf (k->p, " HTTP/%d.%d %3d",
545  &httpversion_major,
546  &k->httpversion,
547  &k->httpcode);
548  if (nc==3) {
549  k->httpversion += 10 * httpversion_major;
550  }
551  else {
552  /* this is the real world, not a Nirvana
553  NCSA 1.5.x returns this crap when asked for HTTP/1.1
554  */
555  nc=sscanf (k->p, " HTTP %3d", &k->httpcode);
556  k->httpversion = 10;
557 
558  /* If user has set option HTTP200ALIASES,
559  compare header line against list of aliases
560  */
561  if (!nc) {
562  if (checkhttpprefix(data, k->p)) {
563  nc = 1;
564  k->httpcode = 200;
565  k->httpversion =
566  (data->set.httpversion==CURL_HTTP_VERSION_1_0)? 10 : 11;
567  }
568  }
569  }
570 
571  if (nc) {
572  data->info.httpcode = k->httpcode;
573  data->info.httpversion = k->httpversion;
574 
575  /* 404 -> URL not found! */
576  if (data->set.http_fail_on_error &&
577  (k->httpcode >= 400)) {
578  /* If we have been told to fail hard on HTTP-errors,
579  here is the check for that: */
580  /* serious error, go home! */
581  failf (data, "The requested URL returned error: %d",
582  k->httpcode);
584  }
585 
586  if(k->httpversion == 10)
587  /* Default action for HTTP/1.0 must be to close, unless
588  we get one of those fancy headers that tell us the
589  server keeps it open for us! */
590  conn->bits.close = TRUE;
591 
592  switch(k->httpcode) {
593  case 204:
594  /* (quote from RFC2616, section 10.2.5): The server has
595  * fulfilled the request but does not need to return an
596  * entity-body ... The 204 response MUST NOT include a
597  * message-body, and thus is always terminated by the first
598  * empty line after the header fields. */
599  /* FALLTHROUGH */
600  case 416: /* Requested Range Not Satisfiable, it has the
601  Content-Length: set as the "real" document but no
602  actual response is sent. */
603  case 304:
604  /* (quote from RFC2616, section 10.3.5): The 304 response
605  * MUST NOT contain a message-body, and thus is always
606  * terminated by the first empty line after the header
607  * fields. */
608  conn->size=0;
609  conn->maxdownload=0;
610  break;
611  default:
612  /* nothing */
613  break;
614  }
615  }
616  else {
617  k->header = FALSE; /* this is not a header line */
618  break;
619  }
620  }
621 
622  /* Check for Content-Length: header lines to get size. Ignore
623  the header completely if we get a 416 response as then we're
624  resuming a document that we don't get, and this header contains
625  info about the true size of the document we didn't get now. */
626  if ((k->httpcode != 416) &&
627  checkprefix("Content-Length:", k->p)) {
628  contentlength = strtoofft(k->p+15, NULL, 10);
629  if (data->set.max_filesize && contentlength >
630  data->set.max_filesize) {
631  failf(data, "Maximum file size exceeded");
633  }
634  conn->size = contentlength;
635  }
636  /* check for Content-Type: header lines to get the mime-type */
637  else if (checkprefix("Content-Type:", k->p)) {
638  char *start;
639  char *end;
640  int len;
641 
642  /* Find the first non-space letter */
643  for(start=k->p+13;
644  *start && isspace((int)*start);
645  start++);
646 
647  end = strchr(start, '\r');
648  if(!end)
649  end = strchr(start, '\n');
650 
651  if(end) {
652  /* skip all trailing space letters */
653  for(; isspace((int)*end) && (end > start); end--);
654 
655  /* get length of the type */
656  len = end-start+1;
657 
658  /* allocate memory of a cloned copy */
660 
661  data->info.contenttype = malloc(len + 1);
662  if (NULL == data->info.contenttype)
663  return CURLE_OUT_OF_MEMORY;
664 
665  /* copy the content-type string */
666  memcpy(data->info.contenttype, start, len);
667  data->info.contenttype[len] = 0; /* zero terminate */
668  }
669  }
670  else if((k->httpversion == 10) &&
671  conn->bits.httpproxy &&
673  "Proxy-Connection:", "keep-alive")) {
674  /*
675  * When a HTTP/1.0 reply comes when using a proxy, the
676  * 'Proxy-Connection: keep-alive' line tells us the
677  * connection will be kept alive for our pleasure.
678  * Default action for 1.0 is to close.
679  */
680  conn->bits.close = FALSE; /* don't close when done */
681  infof(data, "HTTP/1.0 proxy connection set to keep alive!\n");
682  }
683  else if((k->httpversion == 10) &&
684  Curl_compareheader(k->p, "Connection:", "keep-alive")) {
685  /*
686  * A HTTP/1.0 reply with the 'Connection: keep-alive' line
687  * tells us the connection will be kept alive for our
688  * pleasure. Default action for 1.0 is to close.
689  *
690  * [RFC2068, section 19.7.1] */
691  conn->bits.close = FALSE; /* don't close when done */
692  infof(data, "HTTP/1.0 connection set to keep alive!\n");
693  }
694  else if (Curl_compareheader(k->p, "Connection:", "close")) {
695  /*
696  * [RFC 2616, section 8.1.2.1]
697  * "Connection: close" is HTTP/1.1 language and means that
698  * the connection will close when this request has been
699  * served.
700  */
701  conn->bits.close = TRUE; /* close when done */
702  }
703  else if (Curl_compareheader(k->p,
704  "Transfer-Encoding:", "chunked")) {
705  /*
706  * [RFC 2616, section 3.6.1] A 'chunked' transfer encoding
707  * means that the server will send a series of "chunks". Each
708  * chunk starts with line with info (including size of the
709  * coming block) (terminated with CRLF), then a block of data
710  * with the previously mentioned size. There can be any amount
711  * of chunks, and a chunk-data set to zero signals the
712  * end-of-chunks. */
713  conn->bits.chunk = TRUE; /* chunks coming our way */
714 
715  /* init our chunky engine */
716  Curl_httpchunk_init(conn);
717  }
718  else if (checkprefix("Content-Encoding:", k->p) &&
719  data->set.encoding) {
720  /*
721  * Process Content-Encoding. Look for the values: identity,
722  * gzip, deflate, compress, x-gzip and x-compress. x-gzip and
723  * x-compress are the same as gzip and compress. (Sec 3.5 RFC
724  * 2616). zlib cannot handle compress. However, errors are
725  * handled further down when the response body is processed
726  */
727  char *start;
728 
729  /* Find the first non-space letter */
730  for(start=k->p+17;
731  *start && isspace((int)*start);
732  start++);
733 
734  /* Record the content-encoding for later use */
735  if (checkprefix("identity", start))
737  else if (checkprefix("deflate", start))
739  else if (checkprefix("gzip", start)
740  || checkprefix("x-gzip", start))
741  k->content_encoding = GZIP;
742  else if (checkprefix("compress", start)
743  || checkprefix("x-compress", start))
745  }
746  else if (Curl_compareheader(k->p, "Content-Range:", "bytes")) {
747  /* Content-Range: bytes [num]-
748  Content-Range: bytes: [num]-
749 
750  The second format was added August 1st 2000 by Igor
751  Khristophorov since Sun's webserver JavaWebServer/1.1.1
752  obviously sends the header this way! :-( */
753 
754  char *ptr = strstr(k->p, "bytes");
755  ptr+=5;
756 
757  if(*ptr == ':')
758  /* stupid colon skip */
759  ptr++;
760 
761  k->offset = strtoofft(ptr, NULL, 10);
762 
763  if (conn->resume_from == k->offset)
764  /* we asked for a resume and we got it */
765  k->content_range = TRUE;
766  }
767  else if(data->cookies &&
768  checkprefix("Set-Cookie:", k->p)) {
771  Curl_cookie_add(data,
772  data->cookies, TRUE, k->p+11,
773  /* If there is a custom-set Host: name, use it
774  here, or else use real peer host name. */
775  conn->allocptr.cookiehost?
776  conn->allocptr.cookiehost:conn->name,
777  conn->ppath);
779  }
780  else if(checkprefix("Last-Modified:", k->p) &&
781  (data->set.timecondition || data->set.get_filetime) ) {
782  time_t secs=time(NULL);
783  k->timeofdoc = curl_getdate(k->p+strlen("Last-Modified:"),
784  &secs);
785  if(data->set.get_filetime)
786  data->info.filetime = k->timeofdoc;
787  }
788  else if((checkprefix("WWW-Authenticate:", k->p) &&
789  (401 == k->httpcode)) ||
790  (checkprefix("Proxy-authenticate:", k->p) &&
791  (407 == k->httpcode))) {
792  result = Curl_http_auth(conn, k->httpcode, k->p);
793  if(result)
794  return result;
795  }
796  else if ((k->httpcode >= 300 && k->httpcode < 400) &&
797  checkprefix("Location:", k->p)) {
798  if(data->set.http_follow_location) {
799  /* this is the URL that the server advices us to get instead */
800  char *ptr;
801  char *start=k->p;
802  char backup;
803 
804  start += 9; /* pass "Location:" */
805 
806  /* Skip spaces and tabs. We do this to support multiple
807  white spaces after the "Location:" keyword. */
808  while(*start && isspace((int)*start ))
809  start++;
810 
811  /* Scan through the string from the end to find the last
812  non-space. k->end_ptr points to the actual terminating zero
813  letter, move pointer one letter back and start from
814  there. This logic strips off trailing whitespace, but keeps
815  any embedded whitespace. */
816  ptr = k->end_ptr-1;
817  while((ptr>=start) && isspace((int)*ptr))
818  ptr--;
819  ptr++;
820 
821  backup = *ptr; /* store the ending letter */
822  if(ptr != start) {
823  *ptr = '\0'; /* zero terminate */
824  conn->newurl = strdup(start); /* clone string */
825  *ptr = backup; /* restore ending letter */
826  }
827  }
828 #if 0 /* for consideration */
829  else {
830  /* This is a Location: but we have not been instructed to
831  follow it */
832  infof(data, "We ignore this location header as instructed\n");
833  }
834 #endif
835  }
836 
837  /*
838  * End of header-checks. Write them to the client.
839  */
840 
841  writetype = CLIENTWRITE_HEADER;
842  if (data->set.http_include_header)
843  writetype |= CLIENTWRITE_BODY;
844 
845  if(data->set.verbose)
847  k->p, k->hbuflen);
848 
849  result = Curl_client_write(data, writetype, k->p, k->hbuflen);
850  if(result)
851  return result;
852 
853  data->info.header_size += k->hbuflen;
854  conn->headerbytecount += k->hbuflen;
855 
856  /* reset hbufp pointer && hbuflen */
857  k->hbufp = data->state.headerbuff;
858  k->hbuflen = 0;
859  }
860  while (!stop_reading && *k->str); /* header line within buffer */
861 
862  if(stop_reading)
863  /* We've stopped dealing with input, get out of the do-while loop */
864  break;
865 
866  /* We might have reached the end of the header part here, but
867  there might be a non-header part left in the end of the read
868  buffer. */
869 
870  } /* end if header mode */
871 
872  /* This is not an 'else if' since it may be a rest from the header
873  parsing, where the beginning of the buffer is headers and the end
874  is non-headers. */
875  if (k->str && !k->header && (nread > 0)) {
876 
877  if(0 == k->bodywrites) {
878  /* These checks are only made the first time we are about to
879  write a piece of the body */
880  if(conn->protocol&PROT_HTTP) {
881  /* HTTP-only checks */
882 
883  if (conn->newurl) {
884  if(conn->bits.close) {
885  /* Abort after the headers if "follow Location" is set
886  and we're set to close anyway. */
887  k->keepon &= ~KEEP_READ;
888  FD_ZERO(&k->rkeepfd);
889  *done = TRUE;
890  return CURLE_OK;
891  }
892  /* We have a new url to load, but since we want to be able
893  to re-use this connection properly, we read the full
894  response in "ignore more" */
895  k->ignorebody = TRUE;
896  infof(data, "Ignoring the response-body\n");
897  }
898  if(data->set.timecondition && !conn->range) {
899  /* A time condition has been set AND no ranges have been
900  requested. This seems to be what chapter 13.3.4 of
901  RFC 2616 defines to be the correct action for a
902  HTTP/1.1 client */
903  if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
904  switch(data->set.timecondition) {
906  default:
907  if(k->timeofdoc < data->set.timevalue) {
908  infof(data,
909  "The requested document is not new enough\n");
910  *done = TRUE;
911  return CURLE_OK;
912  }
913  break;
915  if(k->timeofdoc > data->set.timevalue) {
916  infof(data,
917  "The requested document is not old enough\n");
918  *done = TRUE;
919  return CURLE_OK;
920  }
921  break;
922  } /* switch */
923  } /* two valid time strings */
924  } /* we have a time condition */
925 
926  } /* this is HTTP */
927  } /* this is the first time we write a body part */
928  k->bodywrites++;
929 
930  /* pass data to the debug function before it gets "dechunked" */
931  if(data->set.verbose) {
932  if(k->badheader) {
934  k->hbuflen);
935  if(k->badheader == HEADER_PARTHEADER)
936  Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
937  }
938  else
939  Curl_debug(data, CURLINFO_DATA_IN, k->str, nread);
940  }
941 
942  if(conn->bits.chunk) {
943  /*
944  * Bless me father for I have sinned. Here comes a chunked
945  * transfer flying and we need to decode this properly. While
946  * the name says read, this function both reads and writes away
947  * the data. The returned 'nread' holds the number of actual
948  * data it wrote to the client. */
949  CHUNKcode res =
950  Curl_httpchunk_read(conn, k->str, nread, &nread);
951 
952  if(CHUNKE_OK < res) {
953  if(CHUNKE_WRITE_ERROR == res) {
954  failf(data, "Failed writing data");
955  return CURLE_WRITE_ERROR;
956  }
957  failf(data, "Received problem %d in the chunky parser", res);
958  return CURLE_RECV_ERROR;
959  }
960  else if(CHUNKE_STOP == res) {
961  /* we're done reading chunks! */
962  k->keepon &= ~KEEP_READ; /* read no more */
963  FD_ZERO(&k->rkeepfd);
964 
965  /* There are now possibly N number of bytes at the end of the
966  str buffer that weren't written to the client, but we don't
967  care about them right now. */
968  }
969  /* If it returned OK, we just keep going */
970  }
971 
972  if((-1 != conn->maxdownload) &&
973  (k->bytecount + nread >= conn->maxdownload)) {
974  nread = (ssize_t) (conn->maxdownload - k->bytecount);
975  if(nread < 0 ) /* this should be unusual */
976  nread = 0;
977 
978  k->keepon &= ~KEEP_READ; /* we're done reading */
979  FD_ZERO(&k->rkeepfd);
980  }
981 
982  k->bytecount += nread;
983 
985 
986  if(!conn->bits.chunk && (nread || k->badheader)) {
987  /* If this is chunky transfer, it was already written */
988 
989  if(k->badheader && !k->ignorebody) {
990  /* we parsed a piece of data wrongly assuming it was a header
991  and now we output it as body instead */
992  result = Curl_client_write(data, CLIENTWRITE_BODY,
993  data->state.headerbuff,
994  k->hbuflen);
995  }
996  if(k->badheader < HEADER_ALLBAD) {
997  /* This switch handles various content encodings. If there's an
998  error here, be sure to check over the almost identical code
999  in http_chunks.c.
1000  Make sure that ALL_CONTENT_ENCODINGS contains all the
1001  encodings handled here. */
1002 #ifdef HAVE_LIBZ
1003  switch (k->content_encoding) {
1004  case IDENTITY:
1005 #endif
1006  /* This is the default when the server sends no
1007  Content-Encoding header. See Curl_readwrite_init; the
1008  memset() call initializes k->content_encoding to zero. */
1009  if(!k->ignorebody)
1010  result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
1011  nread);
1012 #ifdef HAVE_LIBZ
1013  break;
1014 
1015  case DEFLATE:
1016  /* Assume CLIENTWRITE_BODY; headers are not encoded. */
1017  result = Curl_unencode_deflate_write(data, k, nread);
1018  break;
1019 
1020  case GZIP:
1021  /* Assume CLIENTWRITE_BODY; headers are not encoded. */
1022  result = Curl_unencode_gzip_write(data, k, nread);
1023  break;
1024 
1025  case COMPRESS:
1026  default:
1027  failf (data, "Unrecognized content encoding type. "
1028  "libcurl understands `identity', `deflate' and `gzip' "
1029  "content encodings.");
1030  result = CURLE_BAD_CONTENT_ENCODING;
1031  break;
1032  }
1033 #endif
1034  }
1035  k->badheader = HEADER_NORMAL; /* taken care of now */
1036 
1037  if(result)
1038  return result;
1039  }
1040 
1041  } /* if (! header and data to read ) */
1042 
1043  } while(!readdone);
1044 
1045  } /* if( read from socket ) */
1046 
1047  /* If we still have writing to do, we check if we have a writable
1048  socket. Sometimes the writefdp is NULL, if no fd_set was done using
1049  the multi interface and then we can do nothing but to attempt a
1050  write to be sure. */
1051  if((k->keepon & KEEP_WRITE) &&
1052  (!writefdp || FD_ISSET(conn->writesockfd, writefdp)) ) {
1053  /* write */
1054 
1055  int i, si;
1056  ssize_t bytes_written;
1057  bool writedone=TRUE;
1058 
1059  if ((k->bytecount == 0) && (k->writebytecount == 0))
1061 
1062  didwhat |= KEEP_WRITE;
1063 
1064  /*
1065  * We loop here to do the READ and SEND loop until we run out of
1066  * data to send or until we get EWOULDBLOCK back
1067  */
1068  do {
1069 
1070  /* only read more data if there's no upload data already
1071  present in the upload buffer */
1072  if(0 == conn->upload_present) {
1073  /* init the "upload from here" pointer */
1074  conn->upload_fromhere = k->uploadbuf;
1075 
1076  if(!k->upload_done) {
1077  /* HTTP pollution, this should be written nicer to become more
1078  protocol agnostic. */
1079 
1080  if(k->wait100_after_headers &&
1081  (conn->proto.http->sending == HTTPSEND_BODY)) {
1082  /* If this call is to send body data, we must take some action:
1083  We have sent off the full HTTP 1.1 request, and we shall now
1084  go into the Expect: 100 state and await such a header */
1085  k->wait100_after_headers = FALSE; /* headers sent */
1086  k->write_after_100_header = TRUE; /* wait for the header */
1087  FD_ZERO (&k->writefd); /* clear it */
1088  k->wkeepfd = k->writefd; /* set the keeper variable */
1089  k->keepon &= ~KEEP_WRITE; /* disable writing */
1090  k->start100 = Curl_tvnow(); /* timeout count starts now */
1091  didwhat &= ~KEEP_WRITE; /* we didn't write anything actually */
1092  break;
1093  }
1094 
1095  nread = fillbuffer(conn, BUFSIZE);
1096  }
1097  else
1098  nread = 0; /* we're done uploading/reading */
1099 
1100  /* the signed int typecase of nread of for systems that has
1101  unsigned size_t */
1102  if (nread<=0) {
1103  /* done */
1104  k->keepon &= ~KEEP_WRITE; /* we're done writing */
1105  FD_ZERO(&k->wkeepfd);
1106  writedone = TRUE;
1107  break;
1108  }
1109 
1110  /* store number of bytes available for upload */
1111  conn->upload_present = nread;
1112 
1113  /* convert LF to CRLF if so asked */
1114  if (data->set.crlf) {
1115  if(data->state.scratch == NULL)
1116  data->state.scratch = malloc(2*BUFSIZE);
1117  if(data->state.scratch == NULL) {
1118  failf (data, "Failed to alloc scratch buffer!");
1119  return CURLE_OUT_OF_MEMORY;
1120  }
1121  for(i = 0, si = 0; i < nread; i++, si++) {
1122  if (conn->upload_fromhere[i] == 0x0a) {
1123  data->state.scratch[si++] = 0x0d;
1124  data->state.scratch[si] = 0x0a;
1125  }
1126  else
1127  data->state.scratch[si] = conn->upload_fromhere[i];
1128  }
1129  if(si != nread) {
1130  /* only perform the special operation if we really did replace
1131  anything */
1132  nread = si;
1133 
1134  /* upload from the new (replaced) buffer instead */
1135  conn->upload_fromhere = data->state.scratch;
1136 
1137  /* set the new amount too */
1138  conn->upload_present = nread;
1139  }
1140  }
1141  }
1142  else {
1143  /* We have a partial buffer left from a previous "round". Use
1144  that instead of reading more data */
1145  }
1146 
1147  /* write to socket (send away data) */
1148  result = Curl_write(conn,
1149  conn->writesockfd, /* socket to send to */
1150  conn->upload_fromhere, /* buffer pointer */
1151  conn->upload_present, /* buffer size */
1152  &bytes_written); /* actually send away */
1153  if(result)
1154  return result;
1155 
1156  if(data->set.verbose)
1157  /* show the data before we change the pointer upload_fromhere */
1159  bytes_written);
1160 
1161  if(conn->upload_present != bytes_written) {
1162  /* we only wrote a part of the buffer (if anything), deal with it! */
1163 
1164  /* store the amount of bytes left in the buffer to write */
1165  conn->upload_present -= bytes_written;
1166 
1167  /* advance the pointer where to find the buffer when the next send
1168  is to happen */
1169  conn->upload_fromhere += bytes_written;
1170 
1171  writedone = TRUE; /* we are done, stop the loop */
1172  }
1173  else {
1174  /* we've uploaded that buffer now */
1175  conn->upload_fromhere = k->uploadbuf;
1176  conn->upload_present = 0; /* no more bytes left */
1177 
1178  if(k->upload_done) {
1179  /* switch off writing, we're done! */
1180  k->keepon &= ~KEEP_WRITE; /* we're done writing */
1181  FD_ZERO(&k->wkeepfd);
1182  writedone = TRUE;
1183  }
1184  }
1185 
1186  k->writebytecount += bytes_written;
1188 
1189  } while(!writedone); /* loop until we're done writing! */
1190 
1191  }
1192 
1193  } while(0); /* just to break out from! */
1194 
1195  k->now = Curl_tvnow();
1196  if(didwhat) {
1197  /* Update read/write counters */
1198  if(conn->bytecountp)
1199  *conn->bytecountp = k->bytecount; /* read count */
1200  if(conn->writebytecountp)
1201  *conn->writebytecountp = k->writebytecount; /* write count */
1202  }
1203  else {
1204  /* no read no write, this is a timeout? */
1205  if (k->write_after_100_header) {
1206  /* This should allow some time for the header to arrive, but only a
1207  very short time as otherwise it'll be too much wasted times too
1208  often. */
1209 
1210  /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1211 
1212  Therefore, when a client sends this header field to an origin server
1213  (possibly via a proxy) from which it has never seen a 100 (Continue)
1214  status, the client SHOULD NOT wait for an indefinite period before
1215  sending the request body.
1216 
1217  */
1218 
1219  int ms = Curl_tvdiff(k->now, k->start100);
1220  if(ms > CURL_TIMEOUT_EXPECT_100) {
1221  /* we've waited long enough, continue anyway */
1223  FD_SET (conn->writesockfd, &k->writefd); /* write socket */
1224  k->keepon |= KEEP_WRITE;
1225  k->wkeepfd = k->writefd;
1226  }
1227  }
1228  }
1229 
1230  if(Curl_pgrsUpdate(conn))
1231  result = CURLE_ABORTED_BY_CALLBACK;
1232  else
1233  result = Curl_speedcheck (data, k->now);
1234  if (result)
1235  return result;
1236 
1237  if (data->set.timeout &&
1238  ((Curl_tvdiff(k->now, k->start)/1000) >= data->set.timeout)) {
1239  failf (data, "Operation timed out with %" FORMAT_OFF_T
1240  " out of %" FORMAT_OFF_T " bytes received",
1241  k->bytecount, conn->size);
1243  }
1244 
1245  if(!k->keepon) {
1246  /*
1247  * The transfer has been performed. Just make some general checks before
1248  * returning.
1249  */
1250 
1251  if(!(data->set.no_body) && (conn->size != -1) &&
1252  (k->bytecount != conn->size) &&
1253  !conn->newurl) {
1254  failf(data, "transfer closed with %" FORMAT_OFF_T
1255  " bytes remaining to read",
1256  conn->size - k->bytecount);
1257  return CURLE_PARTIAL_FILE;
1258  }
1259  else if(conn->bits.chunk && conn->proto.http->chunk.datasize) {
1260  failf(data, "transfer closed with at least %d bytes remaining",
1261  conn->proto.http->chunk.datasize);
1262  return CURLE_PARTIAL_FILE;
1263  }
1264  if(Curl_pgrsUpdate(conn))
1266  }
1267 
1268  /* Now update the "done" boolean we return */
1269  *done = !k->keepon;
1270 
1271  return CURLE_OK;
1272 }
1273 
1275 {
1276  struct SessionHandle *data = conn->data;
1277  struct Curl_transfer_keeper *k = &conn->keep;
1278 
1279  /* NB: the content encoding software depends on this initialization of
1280  Curl_transfer_keeper. */
1281  memset(k, 0, sizeof(struct Curl_transfer_keeper));
1282 
1283  k->start = Curl_tvnow(); /* start time */
1284  k->now = k->start; /* current time is now */
1285  k->header = TRUE; /* assume header */
1286  k->httpversion = -1; /* unknown at this point */
1287 
1288  data = conn->data; /* there's the root struct */
1289  k->buf = data->state.buffer;
1290  k->uploadbuf = data->state.uploadbuffer;
1291  k->maxfd = (conn->sockfd>conn->writesockfd?
1292  conn->sockfd:conn->writesockfd)+1;
1293  k->hbufp = data->state.headerbuff;
1294  k->ignorebody=FALSE;
1295 
1297  Curl_speedinit(data);
1298 
1299  Curl_pgrsSetUploadCounter(data, 0);
1300  Curl_pgrsSetDownloadCounter(data, 0);
1301 
1302  if (!conn->bits.getheader) {
1303  k->header = FALSE;
1304  if(conn->size > 0)
1305  Curl_pgrsSetDownloadSize(data, conn->size);
1306  }
1307  /* we want header and/or body, if neither then don't do this! */
1308  if(conn->bits.getheader || !data->set.no_body) {
1309 
1310  FD_ZERO (&k->readfd); /* clear it */
1311  if(conn->sockfd != CURL_SOCKET_BAD) {
1312  FD_SET (conn->sockfd, &k->readfd); /* read socket */
1313  k->keepon |= KEEP_READ;
1314  }
1315 
1316  FD_ZERO (&k->writefd); /* clear it */
1317  if(conn->writesockfd != CURL_SOCKET_BAD) {
1318  /* HTTP 1.1 magic:
1319 
1320  Even if we require a 100-return code before uploading data, we might
1321  need to write data before that since the REQUEST may not have been
1322  finished sent off just yet.
1323 
1324  Thus, we must check if the request has been sent before we set the
1325  state info where we wait for the 100-return code
1326  */
1327  if (data->set.expect100header &&
1328  (conn->proto.http->sending == HTTPSEND_BODY)) {
1329  /* wait with write until we either got 100-continue or a timeout */
1331  k->start100 = k->start;
1332  }
1333  else {
1334  if(data->set.expect100header)
1335  /* when we've sent off the rest of the headers, we must await a
1336  100-continue */
1338  FD_SET (conn->writesockfd, &k->writefd); /* write socket */
1339  k->keepon |= KEEP_WRITE;
1340  }
1341  }
1342 
1343  /* get these in backup variables to be able to restore them on each lap in
1344  the select() loop */
1345  k->rkeepfd = k->readfd;
1346  k->wkeepfd = k->writefd;
1347 
1348  }
1349 
1350  return CURLE_OK;
1351 }
1352 
1353 void Curl_single_fdset(struct connectdata *conn,
1354  fd_set *read_fd_set,
1355  fd_set *write_fd_set,
1356  fd_set *exc_fd_set,
1357  int *max_fd)
1358 {
1359  *max_fd = -1; /* init */
1360  if(conn->keep.keepon & KEEP_READ) {
1361  FD_SET(conn->sockfd, read_fd_set);
1362  *max_fd = conn->sockfd;
1363  conn->keep.readfdp = read_fd_set; /* store the address of the set */
1364  }
1365  if(conn->keep.keepon & KEEP_WRITE) {
1366  FD_SET(conn->writesockfd, write_fd_set);
1367 
1368  /* since sockets are curl_socket_t nowadays, we typecast it to int here
1369  to compare it nicely */
1370  if((int)conn->writesockfd > *max_fd)
1371  *max_fd = conn->writesockfd;
1372  conn->keep.writefdp = write_fd_set; /* store the address of the set */
1373  }
1374  /* we don't use exceptions, only touch that one to prevent compiler
1375  warnings! */
1376  *exc_fd_set = *exc_fd_set;
1377 }
1378 
1379 
1380 /*
1381  * Transfer()
1382  *
1383  * This function is what performs the actual transfer. It is capable of
1384  * doing both ways simultaneously.
1385  * The transfer must already have been setup by a call to Curl_Transfer().
1386  *
1387  * Note that headers are created in a preallocated buffer of a default size.
1388  * That buffer can be enlarged on demand, but it is never shrunken again.
1389  *
1390  * Parts of this function was once written by the friendly Mark Butler
1391  * <butlerm@xmission.com>.
1392  */
1393 
1394 static CURLcode
1395 Transfer(struct connectdata *conn)
1396 {
1397  struct SessionHandle *data = conn->data;
1398  CURLcode result;
1399  struct Curl_transfer_keeper *k = &conn->keep;
1400  bool done=FALSE;
1401 
1402  if(!(conn->protocol & PROT_FILE))
1403  /* Only do this if we are not transferring FILE:, since the file: treatment
1404  is different*/
1405  Curl_readwrite_init(conn);
1406 
1407  if((conn->sockfd == CURL_SOCKET_BAD) && (conn->writesockfd == CURL_SOCKET_BAD))
1408  /* nothing to read, nothing to write, we're already OK! */
1409  return CURLE_OK;
1410 
1411  /* we want header and/or body, if neither then don't do this! */
1412  if(!conn->bits.getheader && data->set.no_body)
1413  return CURLE_OK;
1414 
1415  k->writefdp = &k->writefd; /* store the address of the set */
1416  k->readfdp = &k->readfd; /* store the address of the set */
1417 
1418  while (!done) {
1419  struct timeval interval;
1420  k->readfd = k->rkeepfd; /* set these every lap in the loop */
1421  k->writefd = k->wkeepfd;
1422  interval.tv_sec = 1;
1423  interval.tv_usec = 0;
1424 
1425  switch (select (k->maxfd, k->readfdp, k->writefdp, NULL, &interval)) {
1426  case -1: /* select() error, stop reading */
1427 #ifdef EINTR
1428  /* The EINTR is not serious, and it seems you might get this more
1429  ofen when using the lib in a multi-threaded environment! */
1430  if(errno == EINTR)
1431  ;
1432  else
1433 #endif
1434  done = TRUE; /* no more read or write */
1435  continue;
1436  case 0: /* timeout */
1437  default: /* readable descriptors */
1438  result = Curl_readwrite(conn, &done);
1439  break;
1440  }
1441  if(result)
1442  return result;
1443 
1444  /* "done" signals to us if the transfer(s) are ready */
1445  }
1446 
1447  return CURLE_OK;
1448 }
1449 
1451 {
1452  if(!data->change.url)
1453  /* we can't do anything wihout URL */
1454  return CURLE_URL_MALFORMAT;
1455 
1456 #ifdef USE_SSLEAY
1457  {
1458  /* Init the SSL session ID cache here. We do it here since we want to do
1459  it after the *_setopt() calls (that could change the size of the cache)
1460  but before any transfer takes place. */
1462  if(res)
1463  return res;
1464  }
1465 #endif
1466 
1467  data->set.followlocation=0; /* reset the location-follow counter */
1468  data->state.this_is_a_follow = FALSE; /* reset this */
1469  data->state.errorbuf = FALSE; /* no error has occurred */
1470 
1471  /* set preferred authentication, default to basic */
1472 
1473  data->state.authstage = 0; /* initialize authentication later */
1474 
1475  /* If there was a list of cookie files to read and we haven't done it before,
1476  do it now! */
1477  if(data->change.cookielist) {
1478  struct curl_slist *list = data->change.cookielist;
1480  while(list) {
1481  data->cookies = Curl_cookie_init(data,
1482  list->data,
1483  data->cookies,
1484  data->set.cookiesession);
1485  list = list->next;
1486  }
1488  curl_slist_free_all(data->change.cookielist); /* clean up list */
1489  data->change.cookielist = NULL; /* don't do this again! */
1490  }
1491 
1492 
1493 
1494  /* Allow data->set.use_port to set which port to use. This needs to be
1495  * disabled for example when we follow Location: headers to URLs using
1496  * different ports! */
1497  data->state.allow_port = TRUE;
1498 
1499 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1500  /*************************************************************
1501  * Tell signal handler to ignore SIGPIPE
1502  *************************************************************/
1503  if(!data->set.no_signal)
1504  data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1505 #endif
1506 
1507  Curl_initinfo(data); /* reset session-specific information "variables" */
1508  Curl_pgrsStartNow(data);
1509 
1510  return CURLE_OK;
1511 }
1512 
1514 {
1515 #if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1516  /* restore the signal handler for SIGPIPE before we get back */
1517  if(!data->set.no_signal)
1518  signal(SIGPIPE, data->state.prev_signal);
1519 #else
1520  (void)data; /* unused parameter */
1521 #endif
1522 
1523  return CURLE_OK;
1524 }
1525 
1526 static int strlen_url(char *url)
1527 {
1528  char *ptr;
1529  int newlen=0;
1530  bool left=TRUE; /* left side of the ? */
1531 
1532  for(ptr=url; *ptr; ptr++) {
1533  switch(*ptr) {
1534  case '?':
1535  left=FALSE;
1536  default:
1537  newlen++;
1538  break;
1539  case ' ':
1540  if(left)
1541  newlen+=3;
1542  else
1543  newlen++;
1544  break;
1545  }
1546  }
1547  return newlen;
1548 }
1549 
1550 static void strcpy_url(char *output, char *url)
1551 {
1552  /* we must add this with whitespace-replacing */
1553  bool left=TRUE;
1554  char *iptr;
1555  char *optr = output;
1556  for(iptr = url; /* read from here */
1557  *iptr; /* until zero byte */
1558  iptr++) {
1559  switch(*iptr) {
1560  case '?':
1561  left=FALSE;
1562  default:
1563  *optr++=*iptr;
1564  break;
1565  case ' ':
1566  if(left) {
1567  *optr++='%'; /* add a '%' */
1568  *optr++='2'; /* add a '2' */
1569  *optr++='0'; /* add a '0' */
1570  }
1571  else
1572  *optr++='+'; /* add a '+' here */
1573  break;
1574  }
1575  }
1576  *optr=0; /* zero terminate output buffer */
1577 
1578 }
1579 
1581  char *newurl) /* this 'newurl' is the Location: string,
1582  and it must be malloc()ed before passed
1583  here */
1584 {
1585  /* Location: redirect */
1586  char prot[16]; /* URL protocol string storage */
1587  char letter; /* used for a silly sscanf */
1588  int newlen;
1589  char *newest;
1590 
1591  if (data->set.maxredirs &&
1592  (data->set.followlocation >= data->set.maxredirs)) {
1593  failf(data,"Maximum (%d) redirects followed", data->set.maxredirs);
1594  return CURLE_TOO_MANY_REDIRECTS;
1595  }
1596 
1597  /* mark the next request as a followed location: */
1598  data->state.this_is_a_follow = TRUE;
1599 
1600  data->set.followlocation++; /* count location-followers */
1601 
1602  if(data->set.http_auto_referer) {
1603  /* We are asked to automatically set the previous URL as the
1604  referer when we get the next URL. We pick the ->url field,
1605  which may or may not be 100% correct */
1606 
1607  if(data->change.referer_alloc)
1608  /* If we already have an allocated referer, free this first */
1609  free(data->change.referer);
1610 
1611  data->change.referer = strdup(data->change.url);
1612  data->change.referer_alloc = TRUE; /* yes, free this later */
1613  }
1614 
1615  if(2 != sscanf(newurl, "%15[^?&/:]://%c", prot, &letter)) {
1616  /***
1617  *DANG* this is an RFC 2068 violation. The URL is supposed
1618  to be absolute and this doesn't seem to be that!
1619  ***
1620  Instead, we have to TRY to append this new path to the old URL
1621  to the right of the host part. Oh crap, this is doomed to cause
1622  problems in the future...
1623  */
1624  char *protsep;
1625  char *pathsep;
1626 
1627  char *useurl = newurl;
1628  int urllen;
1629 
1630  /* we must make our own copy of the URL to play with, as it may
1631  point to read-only data */
1632  char *url_clone=strdup(data->change.url);
1633 
1634  if(!url_clone)
1635  return CURLE_OUT_OF_MEMORY; /* skip out of this NOW */
1636 
1637  /* protsep points to the start of the host name */
1638  protsep=strstr(url_clone, "//");
1639  if(!protsep)
1640  protsep=url_clone;
1641  else
1642  protsep+=2; /* pass the slashes */
1643 
1644  if('/' != newurl[0]) {
1645  int level=0;
1646 
1647  /* First we need to find out if there's a ?-letter in the URL,
1648  and cut it and the right-side of that off */
1649  pathsep = strrchr(protsep, '?');
1650  if(pathsep)
1651  *pathsep=0;
1652 
1653  /* we have a relative path to append to the last slash if
1654  there's one available */
1655  pathsep = strrchr(protsep, '/');
1656  if(pathsep)
1657  *pathsep=0;
1658 
1659  /* Check if there's any slash after the host name, and if so,
1660  remember that position instead */
1661  pathsep = strchr(protsep, '/');
1662  if(pathsep)
1663  protsep = pathsep+1;
1664  else
1665  protsep = NULL;
1666 
1667  /* now deal with one "./" or any amount of "../" in the newurl
1668  and act accordingly */
1669 
1670  if((useurl[0] == '.') && (useurl[1] == '/'))
1671  useurl+=2; /* just skip the "./" */
1672 
1673  while((useurl[0] == '.') &&
1674  (useurl[1] == '.') &&
1675  (useurl[2] == '/')) {
1676  level++;
1677  useurl+=3; /* pass the "../" */
1678  }
1679 
1680  if(protsep) {
1681  while(level--) {
1682  /* cut off one more level from the right of the original URL */
1683  pathsep = strrchr(protsep, '/');
1684  if(pathsep)
1685  *pathsep=0;
1686  else {
1687  *protsep=0;
1688  break;
1689  }
1690  }
1691  }
1692  }
1693  else {
1694  /* We got a new absolute path for this server, cut off from the
1695  first slash */
1696  pathsep = strchr(protsep, '/');
1697  if(pathsep)
1698  *pathsep=0;
1699  else {
1700  /* There was no slash. Now, since we might be operating on a badly
1701  formatted URL, such as "http://www.url.com?id=2380" which doesn't
1702  use a slash separator as it is supposed to, we need to check for a
1703  ?-letter as well! */
1704  pathsep = strchr(protsep, '?');
1705  if(pathsep)
1706  *pathsep=0;
1707  }
1708  }
1709 
1710  /* If the new part contains a space, this is a mighty stupid redirect
1711  but we still make an effort to do "right". To the left of a '?'
1712  letter we replace each space with %20 while it is replaced with '+'
1713  on the right side of the '?' letter.
1714  */
1715  newlen = strlen_url(useurl);
1716 
1717  urllen = strlen(url_clone);
1718 
1719  newest=(char *)malloc( urllen + 1 + /* possible slash */
1720  newlen + 1 /* zero byte */);
1721 
1722  if(!newest)
1723  return CURLE_OUT_OF_MEMORY; /* go out from this */
1724 
1725  /* copy over the root url part */
1726  memcpy(newest, url_clone, urllen);
1727 
1728  /* check if we need to append a slash */
1729  if(('/' == useurl[0]) || (protsep && !*protsep))
1730  ;
1731  else
1732  newest[urllen++]='/';
1733 
1734  /* then append the new piece on the right side */
1735  strcpy_url(&newest[urllen], useurl);
1736 
1737  free(newurl); /* newurl is the allocated pointer */
1738  free(url_clone);
1739  newurl = newest;
1740  }
1741  else {
1742  /* This is an absolute URL, don't allow the custom port number */
1743  data->state.allow_port = FALSE;
1744 
1745  if(strchr(newurl, ' ')) {
1746  /* This new URL contains at least one space, this is a mighty stupid
1747  redirect but we still make an effort to do "right". */
1748  newlen = strlen_url(newurl);
1749 
1750  newest = malloc(newlen+1); /* get memory for this */
1751  if(newest) {
1752  strcpy_url(newest, newurl); /* create a space-free URL */
1753 
1754  free(newurl); /* that was no good */
1755  newurl = newest; /* use this instead now */
1756  }
1757  }
1758 
1759  }
1760 
1761  if(data->change.url_alloc)
1762  free(data->change.url);
1763  else
1764  data->change.url_alloc = TRUE; /* the URL is allocated */
1765 
1766  data->change.url = newurl;
1767  newurl = NULL; /* don't free! */
1768 
1769  infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1770 
1771  /*
1772  * We get here when the HTTP code is 300-399 (and 401). We need to perform
1773  * differently based on exactly what return code there was.
1774  *
1775  * News from 7.10.6: we can also get here on a 401, in case we act on a
1776  * HTTP authentication scheme other than Basic.
1777  */
1778  switch(data->info.httpcode) {
1779  case 401:
1780  /* Act on an authentication, we keep on moving and do the Authorization:
1781  XXXX header in the HTTP request code snippet */
1782  break;
1783  case 300: /* Multiple Choices */
1784  case 306: /* Not used */
1785  case 307: /* Temporary Redirect */
1786  default: /* for all unknown ones */
1787  /* These are explicitly mention since I've checked RFC2616 and they
1788  * seem to be OK to POST to.
1789  */
1790  break;
1791  case 301: /* Moved Permanently */
1792  /* (quote from RFC2616, section 10.3.2):
1793  *
1794  * Note: When automatically redirecting a POST request after
1795  * receiving a 301 status code, some existing HTTP/1.0 user agents
1796  * will erroneously change it into a GET request.
1797  *
1798  * ----
1799  * Warning: Because most of importants user agents do this clear
1800  * RFC2616 violation, many webservers expect this misbehavior. So
1801  * these servers often answers to a POST request with an error page.
1802  * To be sure that libcurl gets the page that most user agents
1803  * would get, libcurl has to force GET:
1804  */
1805  if( data->set.httpreq == HTTPREQ_POST
1806  || data->set.httpreq == HTTPREQ_POST_FORM) {
1807  infof(data,
1808  "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1809  data->set.httpreq = HTTPREQ_GET;
1810  }
1811  break;
1812  case 302: /* Found */
1813  /* (From 10.3.3)
1814 
1815  Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1816  to change the method on the redirected request. However, most
1817  existing user agent implementations treat 302 as if it were a 303
1818  response, performing a GET on the Location field-value regardless
1819  of the original request method. The status codes 303 and 307 have
1820  been added for servers that wish to make unambiguously clear which
1821  kind of reaction is expected of the client.
1822 
1823  (From 10.3.4)
1824 
1825  Note: Many pre-HTTP/1.1 user agents do not understand the 303
1826  status. When interoperability with such clients is a concern, the
1827  302 status code may be used instead, since most user agents react
1828  to a 302 response as described here for 303.
1829  */
1830  case 303: /* See Other */
1831  /* Disable both types of POSTs, since doing a second POST when
1832  * following isn't what anyone would want! */
1833  if(data->set.httpreq != HTTPREQ_GET) {
1834  data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1835  infof(data, "Disables POST, goes with %s\n",
1836  data->set.no_body?"HEAD":"GET");
1837  }
1838  break;
1839  case 304: /* Not Modified */
1840  /* 304 means we did a conditional request and it was "Not modified".
1841  * We shouldn't get any Location: header in this response!
1842  */
1843  break;
1844  case 305: /* Use Proxy */
1845  /* (quote from RFC2616, section 10.3.6):
1846  * "The requested resource MUST be accessed through the proxy given
1847  * by the Location field. The Location field gives the URI of the
1848  * proxy. The recipient is expected to repeat this single request
1849  * via the proxy. 305 responses MUST only be generated by origin
1850  * servers."
1851  */
1852  break;
1853  }
1855  Curl_pgrsResetTimes(data);
1856 
1857  return CURLE_OK;
1858 }
1859 
1861 {
1862  CURLcode res;
1863  CURLcode res2;
1864  struct connectdata *conn=NULL;
1865  char *newurl = NULL; /* possibly a new URL to follow to! */
1866 
1867  data->state.used_interface = Curl_if_easy;
1868 
1869  res = Curl_pretransfer(data);
1870  if(res)
1871  return res;
1872 
1873  /*
1874  * It is important that there is NO 'return' from this function at any other
1875  * place than falling down to the end of the function! This is because we
1876  * have cleanup stuff that must be done before we get back, and that is only
1877  * performed after this do-while loop.
1878  */
1879 
1880  do {
1881  int urlchanged = FALSE;
1882  do {
1883  bool async;
1885  data->change.url_changed = FALSE;
1886  res = Curl_connect(data, &conn, &async);
1887 
1888  if((CURLE_OK == res) && async) {
1889  /* Now, if async is TRUE here, we need to wait for the name
1890  to resolve */
1891  res = Curl_wait_for_resolv(conn, NULL);
1892  if(CURLE_OK == res)
1893  /* Resolved, continue with the connection */
1894  res = Curl_async_resolved(conn);
1895  }
1896  if(res)
1897  break;
1898 
1899  /* If a callback (or something) has altered the URL we should use within
1900  the Curl_connect(), we detect it here and act as if we are redirected
1901  to the new URL */
1902  urlchanged = data->change.url_changed;
1903  if ((CURLE_OK == res) && urlchanged) {
1904  res = Curl_done(conn);
1905  if(CURLE_OK == res) {
1906  char *gotourl = strdup(data->change.url);
1907  res = Curl_follow(data, gotourl);
1908  if(res)
1909  free(gotourl);
1910  }
1911  }
1912  } while (urlchanged && res == CURLE_OK) ;
1913 
1914  if(res == CURLE_OK) {
1915  res = Curl_do(&conn);
1916 
1917  if(res == CURLE_OK) {
1918  res = Transfer(conn); /* now fetch that URL please */
1919  if(res == CURLE_OK) {
1920 
1921  if((conn->keep.bytecount == 0) &&
1922  (conn->sockerror == ECONNRESET) &&
1923  conn->bits.reuse) {
1924  /* We got no data, the connection was reset and we did attempt
1925  to re-use a connection. This smells like we were too fast to
1926  re-use a connection that was closed when we wanted to read
1927  from it. Bad luck. Let's simulate a redirect to the same URL
1928  to retry! */
1929  infof(data, "Connection reset, retrying a fresh connect\n");
1930  newurl = strdup(conn->data->change.url);
1931 
1932  conn->bits.close = TRUE; /* close this connection */
1933  conn->bits.retry = TRUE; /* mark this as a connection we're about
1934  to retry. Marking it this way should
1935  prevent i.e HTTP transfers to return
1936  error just because nothing has been
1937  transfered! */
1938  }
1939  else
1940  /*
1941  * We must duplicate the new URL here as the connection data
1942  * may be free()ed in the Curl_done() function.
1943  */
1944  newurl = conn->newurl?strdup(conn->newurl):NULL;
1945  }
1946  else {
1947  /* The transfer phase returned error, we mark the connection to get
1948  * closed to prevent being re-used. This is becasue we can't
1949  * possibly know if the connection is in a good shape or not now. */
1950  conn->bits.close = TRUE;
1951 
1952  if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) {
1953  /* if we failed anywhere, we must clean up the secondary socket if
1954  it was used */
1955  sclose(conn->sock[SECONDARYSOCKET]);
1957  }
1958  }
1959 
1960  /* Always run Curl_done(), even if some of the previous calls
1961  failed, but return the previous (original) error code */
1962  res2 = Curl_done(conn);
1963 
1964  if(CURLE_OK == res)
1965  res = res2;
1966  }
1967 
1968  /*
1969  * Important: 'conn' cannot be used here, since it may have been closed
1970  * in 'Curl_done' or other functions.
1971  */
1972 
1973  if((res == CURLE_OK) && newurl) {
1974  res = Curl_follow(data, newurl);
1975  if(CURLE_OK == res) {
1976  newurl = NULL;
1977  continue;
1978  }
1979  }
1980  }
1981  break; /* it only reaches here when this shouldn't loop */
1982 
1983  } while(1); /* loop if Location: */
1984 
1985  if(newurl)
1986  free(newurl);
1987 
1988  /* run post-transfer uncondionally, but don't clobber the return code if
1989  we already have an error code recorder */
1990  res2 = Curl_posttransfer(data);
1991  if(!res && res2)
1992  res = res2;
1993 
1994  return res;
1995 }
1996 
1997 CURLcode
1998 Curl_Transfer(struct connectdata *c_conn, /* connection data */
1999  int sockindex, /* socket index to read from or -1 */
2000  curl_off_t size, /* -1 if unknown at this point */
2001  bool getheader, /* TRUE if header parsing is wanted */
2002  curl_off_t *bytecountp, /* return number of bytes read or NULL */
2003  int writesockindex, /* socket index to write to, it may very
2004  well be the same we read from. -1
2005  disables */
2006  curl_off_t *writecountp /* return number of bytes written or
2007  NULL */
2008  )
2009 {
2010  struct connectdata *conn = (struct connectdata *)c_conn;
2011  if(!conn)
2013 
2014  curlassert((sockindex <= 1) && (sockindex >= -1));
2015 
2016  /* now copy all input parameters */
2017  conn->sockfd = sockindex==-1?
2018  CURL_SOCKET_BAD:conn->sock[sockindex];
2019  conn->size = size;
2020  conn->bits.getheader = getheader;
2021  conn->bytecountp = bytecountp;
2022  conn->writesockfd = writesockindex==-1?
2023  CURL_SOCKET_BAD:conn->sock[writesockindex];
2024  conn->writebytecountp = writecountp;
2025 
2026  return CURLE_OK;
2027 
2028 }
CURLcode Curl_pretransfer(struct SessionHandle *data)
Definition: transfer.c:1450
CHUNKcode Curl_httpchunk_read(struct connectdata *conn, char *datap, ssize_t length, ssize_t *wrote)
Definition: http_chunks.c:101
CURLcode Curl_unencode_gzip_write(struct SessionHandle *data, struct Curl_transfer_keeper *k, ssize_t nread)
void Curl_pgrsSetDownloadSize(struct SessionHandle *data, curl_off_t size)
Definition: progress.c:180
bool no_body
Definition: urldata.h:864
#define DEFLATE
Definition: urldata.h:348
#define CLIENTWRITE_BODY
Definition: sendf.h:34
struct CookieInfo * cookies
Definition: urldata.h:901
bool httpproxy
Definition: urldata.h:281
enum UrlState::@12 used_interface
struct ConnectBits bits
Definition: urldata.h:462
struct timeval now
Definition: urldata.h:316
char buffer[BUFSIZE+1]
Definition: urldata.h:679
long numsessions
Definition: urldata.h:156
CURLcode Curl_initinfo(struct SessionHandle *data)
Definition: getinfo.c:50
struct Curl_transfer_keeper keep
Definition: urldata.h:553
union connectdata::@11 proto
CURLcode Curl_unencode_deflate_write(struct SessionHandle *data, struct Curl_transfer_keeper *k, ssize_t nread)
void Curl_speedinit(struct SessionHandle *data)
Definition: speedcheck.c:34
bool forbidchunk
Definition: urldata.h:296
struct timeval start100
Definition: urldata.h:338
void Curl_pgrsTime(struct SessionHandle *data, timerid timer)
Definition: progress.c:126
CURLcode Curl_posttransfer(struct SessionHandle *data)
Definition: transfer.c:1513
#define COMPRESS
Definition: urldata.h:350
struct ssl_config_data ssl
Definition: urldata.h:829
void Curl_httpchunk_init(struct connectdata *conn)
Definition: http_chunks.c:85
#define GZIP
Definition: urldata.h:349
bool url_alloc
Definition: urldata.h:735
struct DynamicStatic change
Definition: urldata.h:899
#define curlassert(x)
Definition: setup.h:149
#define failf
Definition: sendf.h:32
char * data
Definition: curl.h:1062
struct curl_slist * http200aliases
Definition: urldata.h:838
CURLcode Curl_client_write(struct SessionHandle *data, int type, char *ptr, size_t len)
Definition: sendf.c:319
time_t curl_getdate(const char *p, const time_t *now)
Definition: getdate.c:1991
CURLcode Curl_connect(struct SessionHandle *data, struct connectdata **in_connect, bool *asyncp)
Definition: url.c:3217
bool no_signal
Definition: urldata.h:879
curl_socket_t sockfd
Definition: urldata.h:493
CURLcode
Definition: curl.h:209
char * contenttype
Definition: urldata.h:600
#define PROT_FILE
Definition: urldata.h:416
struct timeval start
Definition: urldata.h:315
bool http_auto_referer
Definition: urldata.h:863
#define SECONDARYSOCKET
Definition: urldata.h:395
GLdouble s
Definition: glext.h:2935
GLenum GLsizei len
Definition: glext.h:3472
struct UrlState state
Definition: urldata.h:903
CURLcode Curl_do(struct connectdata **connp)
Definition: url.c:3330
off_t curl_off_t
Definition: curl.h:96
int Curl_read(struct connectdata *conn, curl_socket_t sockfd, char *buf, size_t buffersize, ssize_t *n)
Definition: sendf.c:362
bool referer_alloc
Definition: urldata.h:743
int i
Definition: process.py:33
void Curl_single_fdset(struct connectdata *conn, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd)
Definition: transfer.c:1353
Boolean result
#define ssize_t
Definition: config-win32.h:27
enum HTTP::@9 sending
enum Curl_transfer_keeper::@10 badheader
char * upload_fromhere
Definition: urldata.h:563
bool reuse
Definition: urldata.h:279
CURLcode Curl_write(struct connectdata *conn, curl_socket_t sockfd, void *mem, size_t len, ssize_t *written)
Definition: sendf.c:221
bool get_filetime
Definition: urldata.h:848
bool allow_port
Definition: urldata.h:703
curl_off_t resume_from
Definition: urldata.h:442
bool write_after_100_header
Definition: urldata.h:339
bool errorbuf
Definition: urldata.h:695
long timeout
Definition: urldata.h:794
int upload_present
Definition: urldata.h:557
fd_set * writefdp
Definition: urldata.h:368
long header_size
Definition: urldata.h:594
struct HTTP * http
Definition: urldata.h:539
bool crlf
Definition: urldata.h:812
#define PROT_HTTP
Definition: urldata.h:410
int Curl_pgrsUpdate(struct connectdata *conn)
Definition: progress.c:206
void Curl_pgrsSetDownloadCounter(struct SessionHandle *data, curl_off_t size)
Definition: progress.c:170
long followlocation
Definition: urldata.h:775
CURLcode Curl_http_auth(struct connectdata *conn, int httpcode, char *header)
Definition: http.c:319
char uploadbuffer[BUFSIZE+1]
Definition: urldata.h:680
void Curl_pgrsSetUploadCounter(struct SessionHandle *data, curl_off_t size)
Definition: progress.c:175
void Curl_pgrsResetTimes(struct SessionHandle *data)
Definition: progress.c:118
struct PureInfo info
Definition: urldata.h:905
bool expect100header
Definition: urldata.h:875
curl_off_t offset
Definition: urldata.h:334
CURLcode Curl_SSL_InitSessions(struct SessionHandle *, long)
curl_off_t size
Definition: urldata.h:494
long buffer_size
Definition: urldata.h:834
GLuint GLuint end
Definition: glext.h:2845
#define FORMAT_OFF_T
Definition: setup.h:99
char * url
Definition: urldata.h:734
#define NULL
Definition: Lib.h:88
#define select(args...)
Definition: amigaos.h:39
struct curl_slist * next
Definition: curl.h:1063
GLsizei GLsizei GLenum GLenum const GLvoid * data
Definition: glext.h:2853
CURLcode Curl_follow(struct SessionHandle *data, char *newurl)
Definition: transfer.c:1580
char * ppath
Definition: urldata.h:436
char * referer
Definition: urldata.h:742
long httpversion
Definition: urldata.h:822
int httpversion
Definition: urldata.h:589
CHUNKcode
Definition: http_chunks.h:68
long maxredirs
Definition: urldata.h:776
char * scratch
Definition: urldata.h:694
CURLcode Curl_async_resolved(struct connectdata *conn)
Definition: url.c:3254
#define CLIENTWRITE_HEADER
Definition: sendf.h:35
bool this_is_a_follow
Definition: urldata.h:683
CURLcode Curl_Transfer(struct connectdata *c_conn, int sockindex, curl_off_t size, bool getheader, curl_off_t *bytecountp, int writesockindex, curl_off_t *writecountp)
Definition: transfer.c:1998
void Curl_http_auth_act(struct connectdata *conn)
Definition: http.c:167
char * range
Definition: urldata.h:440
long Curl_tvdiff(struct timeval newer, struct timeval older)
Definition: timeval.c:92
CURLcode Curl_speedcheck(struct SessionHandle *data, struct timeval now)
Definition: speedcheck.c:39
long authstage
Definition: urldata.h:712
Definition: curl.h:210
struct SessionHandle * data
Definition: urldata.h:403
bool getheader
Definition: urldata.h:294
curl_read_callback fread
Definition: urldata.h:565
#define checkprefix(a, b)
Definition: strequal.h:37
char * newurl
Definition: urldata.h:517
struct curl_slist * cookielist
Definition: urldata.h:744
bool close
Definition: urldata.h:278
char * strdup(char *s1)
Definition: main.c:183
curl_socket_t writesockfd
Definition: urldata.h:498
#define sclose(x)
Definition: setup.h:220
curl_socket_t maxfd
Definition: urldata.h:364
curl_off_t * bytecountp
Definition: urldata.h:495
curl_off_t bytecount
Definition: urldata.h:313
bool chunk
Definition: urldata.h:280
char * name
Definition: urldata.h:430
int sockerror
Definition: urldata.h:573
int Curl_debug(struct SessionHandle *data, curl_infotype type, char *ptr, size_t size)
Definition: sendf.c:439
int httpcode
Definition: urldata.h:587
bool wait100_after_headers
Definition: urldata.h:342
curl_off_t * writebytecountp
Definition: urldata.h:501
curl_off_t max_filesize
Definition: urldata.h:842
CURLcode Curl_readwrite(struct connectdata *conn, bool *done)
Definition: transfer.c:193
char * headerbuff
Definition: urldata.h:676
curl_socket_t sock[2]
Definition: urldata.h:454
bool verbose
Definition: urldata.h:871
#define snprintf
Definition: Str.h:70
GLsizeiptr size
Definition: glext.h:3112
time_t timevalue
Definition: urldata.h:818
CURLcode Curl_wait_for_resolv(struct connectdata *conn, struct Curl_dns_entry **entry)
Definition: hostip.c:676
#define CURL_SOCKET_BAD
Definition: setup.h:255
bool url_changed
Definition: urldata.h:736
char * encoding
Definition: urldata.h:781
int headersize
Definition: urldata.h:677
#define CURL_TIMEOUT_EXPECT_100
Definition: transfer.c:114
#define infof
Definition: sendf.h:31
void Curl_safefree(void *ptr)
Definition: url.c:172
typedef void(APIENTRYP PFNGLBLENDCOLORPROC)(GLclampf red
void * fread_in
Definition: urldata.h:566
#define FALSE
Definition: mprintf.c:70
struct timeval Curl_tvnow(void)
Definition: timeval.c:81
CURLSHcode Curl_share_lock(struct SessionHandle *data, curl_lock_data type, curl_lock_access accesstype)
Definition: share.c:180
size_t datasize
Definition: http_chunks.h:84
bool upload_chunky
Definition: urldata.h:292
bool Curl_compareheader(char *headerline, const char *header, const char *content)
Definition: http.c:677
struct UserDefined set
Definition: urldata.h:898
struct Curl_chunker chunk
Definition: urldata.h:218
void curl_slist_free_all(struct curl_slist *)
Definition: sendf.c:108
long headerbytecount
Definition: urldata.h:438
#define strtoofft
Definition: strtoofft.h:58
CURLcode Curl_perform(struct SessionHandle *data)
Definition: transfer.c:1860
GLuint res
Definition: glext.h:5385
#define TRUE
Definition: mprintf.c:69
CURLcode Curl_readwrite_init(struct connectdata *conn)
Definition: transfer.c:1274
long protocol
Definition: urldata.h:407
void Curl_pgrsStartNow(struct SessionHandle *data)
Definition: progress.c:164
bool http_follow_location
Definition: urldata.h:857
#define IDENTITY
Definition: urldata.h:347
GLint level
Definition: glext.h:2878
bool retry
Definition: urldata.h:302
#define CURLMAX(x, y)
Definition: urldata.h:115
struct connectdata::dynamically_allocated_data allocptr
curl_off_t maxdownload
Definition: urldata.h:456
Curl_HttpReq httpreq
Definition: urldata.h:820
curl_TimeCond timecondition
Definition: urldata.h:817
CURLSHcode Curl_share_unlock(struct SessionHandle *data, curl_lock_data type)
Definition: share.c:198
long filetime
Definition: urldata.h:590
#define BUFSIZE
Definition: urldata.h:104
GLuint start
Definition: glext.h:2845
bool cookiesession
Definition: urldata.h:811
fd_set * readfdp
Definition: urldata.h:367
bool http_fail_on_error
Definition: urldata.h:856
CURLcode Curl_done(struct connectdata *conn)
Definition: url.c:3272
#define ECONNRESET
Definition: setup.h:293