Send patches - preferably formatted by git format-patch - to patches at archlinux32 dot org.
summaryrefslogtreecommitdiff
path: root/lib/libalpm/dload.c
diff options
context:
space:
mode:
authorDan McGee <dan@archlinux.org>2009-11-11 23:39:26 -0600
committerDan McGee <dan@archlinux.org>2009-11-15 19:47:30 -0600
commitd2dbb04a9af7a18da217fb21b9152626c43461da (patch)
treed0d3f7f1d19ce5e14031ecfcd32924036215224a /lib/libalpm/dload.c
parentbe266b43647ef57632d7bcfd07a4441f737b5aed (diff)
download: major refactor to address lingering issues
Sorry for this being such a huge patch, but I believe it is necessary for quite a few reasons which I will attempt to explain herein. I've been mulling this over for a while, but wasn't super happy with making the download interface more complex. Instead, if we carefully order things in the internal download code, we can actually make the interface simpler. 1. FS#15657 - This involves `name.db.tar.gz.part` files being left around the filesystem, and then causing all sorts of issues when someone attempts to rerun the operation they canceled. We need to ensure that if we resume a download, we are resuming it on exactly the same file; if we cannot be almost postive of that then we need to start over. 2. http://www.mail-archive.com/pacman-dev@archlinux.org/msg03536.html - Here we have a lighttpd bug to ruin the day. If we send both a Range: header and If-Modified-Since: header across the wire in a GET request, lighttpd doesn't do what we want in several cases. If the file hadn't been modified, it returns a '304 Not Modified' instead of a '206 Partial Content'. We need to do a stat (e.g. HEAD in HTTP terms) operation here, and the proceed accordingly based off the values we get back from it. 3. The mtime stuff was rather ugly, and relied on the called function to write back to a passed in reference, which isn't the greatest. Instead, use the power of the filesystem to contain this info. Every file downloaded internally is now carefully timestamped with the remote file time. This should allow the resume logic to work. In order to guarantee this, we need to implement a signal handler that catches interrupts, notifies the running code, and causes it to set the mtimes on the file. It then rethrows the signal so the pacman signal handler (or any frontend) works as expected. 4. We did a lot of funky stuff in trying to track the DB last modified time. It is a lot easier to just keep the downloaded DB file around and track the time on that rather than in a funky dot file. It also kills a lot of code. 5. For GPG verification of the databases down the road, we are going to need the DB file around for at least a short bit of time anyway, so this gets us closer to that. Signed-off-by: Dan McGee <dan@archlinux.org> [Xav: fixed printf with off_t] Signed-off-by: Xavier Chantry <shiningxc@gmail.com>
Diffstat (limited to 'lib/libalpm/dload.c')
-rw-r--r--lib/libalpm/dload.c177
1 files changed, 121 insertions, 56 deletions
diff --git a/lib/libalpm/dload.c b/lib/libalpm/dload.c
index a4c9f1f4..05555f2e 100644
--- a/lib/libalpm/dload.c
+++ b/lib/libalpm/dload.c
@@ -25,6 +25,9 @@
#include <errno.h>
#include <string.h>
#include <unistd.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
#include <signal.h>
#include <limits.h>
/* the following two are needed on BSD for libfetch */
@@ -85,18 +88,32 @@ static const char *gethost(struct url *fileurl)
return(host);
}
+int dload_interrupted;
+static RETSIGTYPE inthandler(int signum)
+{
+ dload_interrupted = 1;
+}
+
+#define check_stop() if(dload_interrupted) { ret = -1; goto cleanup; }
+enum sighandlers { OLD = 0, NEW = 1 };
+
static int download_internal(const char *url, const char *localpath,
- time_t mtimeold, time_t *mtimenew) {
- fetchIO *dlf = NULL;
+ int force) {
FILE *localf = NULL;
- struct url_stat ust;
struct stat st;
- int chk_resume = 0, ret = 0;
+ int ret = 0;
off_t dl_thisfile = 0;
ssize_t nread = 0;
char *tempfile, *destfile, *filename;
- struct sigaction new_action, old_action;
+ struct sigaction sig_pipe[2], sig_int[2];
+
+ off_t local_size = 0;
+ time_t local_time = 0;
+
struct url *fileurl;
+ struct url_stat ust;
+ fetchIO *dlf = NULL;
+
char buffer[PM_DLBUF_LEN];
filename = get_filename(url);
@@ -113,51 +130,80 @@ static int download_internal(const char *url, const char *localpath,
destfile = get_destfile(localpath, filename);
tempfile = get_tempfile(localpath, filename);
- if(mtimeold) {
- fileurl->last_modified = mtimeold;
- }
-
- /* pass the raw filename for passing to the callback function */
- _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", filename);
-
if(stat(tempfile, &st) == 0 && st.st_size > 0) {
- _alpm_log(PM_LOG_DEBUG, "existing file found, using it\n");
- fileurl->offset = (off_t)st.st_size;
+ _alpm_log(PM_LOG_DEBUG, "tempfile found, attempting continuation\n");
+ local_time = fileurl->last_modified = st.st_mtime;
+ local_size = fileurl->offset = (off_t)st.st_size;
dl_thisfile = st.st_size;
localf = fopen(tempfile, "ab");
- chk_resume = 1;
+ } else if(!force && stat(destfile, &st) == 0 && st.st_size > 0) {
+ _alpm_log(PM_LOG_DEBUG, "destfile found, using mtime only\n");
+ local_time = fileurl->last_modified = st.st_mtime;
+ local_size = /* no fu->off here */ (off_t)st.st_size;
} else {
- fileurl->offset = (off_t)0;
- dl_thisfile = 0;
+ _alpm_log(PM_LOG_DEBUG, "no file found matching criteria, starting from scratch\n");
}
+ /* pass the raw filename for passing to the callback function */
+ _alpm_log(PM_LOG_DEBUG, "using '%s' for download progress\n", filename);
+
/* print proxy info for debug purposes */
_alpm_log(PM_LOG_DEBUG, "HTTP_PROXY: %s\n", getenv("HTTP_PROXY"));
_alpm_log(PM_LOG_DEBUG, "http_proxy: %s\n", getenv("http_proxy"));
_alpm_log(PM_LOG_DEBUG, "FTP_PROXY: %s\n", getenv("FTP_PROXY"));
_alpm_log(PM_LOG_DEBUG, "ftp_proxy: %s\n", getenv("ftp_proxy"));
- /* libfetch does not reset the error code */
- fetchLastErrCode = 0;
-
/* 10s timeout */
fetchTimeout = 10;
/* ignore any SIGPIPE signals- these may occur if our FTP socket dies or
* something along those lines. Store the old signal handler first. */
- new_action.sa_handler = SIG_IGN;
- sigemptyset(&new_action.sa_mask);
- new_action.sa_flags = 0;
- sigaction(SIGPIPE, NULL, &old_action);
- sigaction(SIGPIPE, &new_action, NULL);
-
- dlf = fetchXGet(fileurl, &ust, "i");
-
- if(fetchLastErrCode == FETCH_UNCHANGED) {
- _alpm_log(PM_LOG_DEBUG, "mtimes are identical, skipping %s\n", filename);
+ sig_pipe[NEW].sa_handler = SIG_IGN;
+ sigemptyset(&sig_pipe[NEW].sa_mask);
+ sig_pipe[NEW].sa_flags = 0;
+ sigaction(SIGPIPE, NULL, &sig_pipe[OLD]);
+ sigaction(SIGPIPE, &sig_pipe[NEW], NULL);
+
+ dload_interrupted = 0;
+ sig_int[NEW].sa_handler = &inthandler;
+ sigemptyset(&sig_int[NEW].sa_mask);
+ sig_int[NEW].sa_flags = 0;
+ sigaction(SIGINT, NULL, &sig_int[OLD]);
+ sigaction(SIGINT, &sig_int[NEW], NULL);
+
+ /* NOTE: libfetch does not reset the error code, be sure to do it before
+ * calls into the library */
+
+ /* find out the remote size *and* mtime in one go. there is a lot of
+ * trouble in trying to do both size and "if-modified-since" logic in a
+ * non-stat request, so avoid it. */
+ fetchLastErrCode = 0;
+ if(fetchStat(fileurl, &ust, "") == -1) {
+ ret = -1;
+ goto cleanup;
+ }
+ check_stop();
+
+ _alpm_log(PM_LOG_DEBUG, "ust.mtime: %ld local_time: %ld compare: %ld\n",
+ ust.mtime, local_time, local_time - ust.mtime);
+ _alpm_log(PM_LOG_DEBUG, "ust.size: %"PRId64" local_size: %"PRId64" compare: %"PRId64"\n",
+ ust.size, local_size, local_size - ust.size);
+ if(!force && ust.mtime && ust.mtime == local_time
+ && ust.size && ust.size == local_size) {
+ /* the remote time and size values agreed with what we have, so move on
+ * because there is nothing more to do. */
+ _alpm_log(PM_LOG_DEBUG, "files are identical, skipping %s\n", filename);
ret = 1;
goto cleanup;
}
+ if(!ust.mtime || ust.mtime != local_time) {
+ _alpm_log(PM_LOG_DEBUG, "mtimes were different or unavailable, downloading %s from beginning\n", filename);
+ fileurl->offset = 0;
+ }
+
+ fetchLastErrCode = 0;
+ dlf = fetchGet(fileurl, "");
+ check_stop();
if(fetchLastErrCode != 0 || dlf == NULL) {
pm_errno = PM_ERR_LIBFETCH;
@@ -169,17 +215,14 @@ static int download_internal(const char *url, const char *localpath,
_alpm_log(PM_LOG_DEBUG, "connected to %s successfully\n", fileurl->host);
}
- if(ust.mtime && mtimenew) {
- *mtimenew = ust.mtime;
+ if(localf && fileurl->offset == 0) {
+ _alpm_log(PM_LOG_WARNING, _("resuming download of %s not possible; starting over\n"), filename);
+ fclose(localf);
+ localf = NULL;
+ } else if(fileurl->offset) {
+ _alpm_log(PM_LOG_DEBUG, "resuming download at position %"PRId64"\n", fileurl->offset);
}
- if(chk_resume && fileurl->offset == 0) {
- _alpm_log(PM_LOG_WARNING, _("cannot resume download, starting over\n"));
- if(localf != NULL) {
- fclose(localf);
- localf = NULL;
- }
- }
if(localf == NULL) {
_alpm_rmrf(tempfile);
@@ -187,7 +230,8 @@ static int download_internal(const char *url, const char *localpath,
dl_thisfile = 0;
localf = fopen(tempfile, "wb");
if(localf == NULL) { /* still null? */
- _alpm_log(PM_LOG_ERROR, _("cannot write to file '%s'\n"), tempfile);
+ _alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"),
+ tempfile, strerror(errno));
ret = -1;
goto cleanup;
}
@@ -199,11 +243,12 @@ static int download_internal(const char *url, const char *localpath,
}
while((nread = fetchIO_read(dlf, buffer, PM_DLBUF_LEN)) > 0) {
+ check_stop();
size_t nwritten = 0;
nwritten = fwrite(buffer, 1, nread, localf);
if((nwritten != nread) || ferror(localf)) {
_alpm_log(PM_LOG_ERROR, _("error writing to file '%s': %s\n"),
- destfile, strerror(errno));
+ tempfile, strerror(errno));
ret = -1;
goto cleanup;
}
@@ -240,36 +285,60 @@ static int download_internal(const char *url, const char *localpath,
fetchIO_close(dlf);
dlf = NULL;
+ /* set the times on the file to the same as that of the remote file */
+ if(ust.mtime) {
+ struct timeval tv[2];
+ memset(&tv, 0, sizeof(tv));
+ tv[0].tv_sec = ust.atime;
+ tv[1].tv_sec = ust.mtime;
+ utimes(tempfile, tv);
+ }
rename(tempfile, destfile);
ret = 0;
cleanup:
- /* restore any existing SIGPIPE signal handler */
- sigaction(SIGPIPE, &old_action, NULL);
-
FREE(tempfile);
FREE(destfile);
if(localf != NULL) {
+ /* if we still had a local file open, we got interrupted. set the mtimes on
+ * the file accordingly. */
+ fflush(localf);
+ if(ust.mtime) {
+ struct timeval tv[2];
+ memset(&tv, 0, sizeof(tv));
+ tv[0].tv_sec = ust.atime;
+ tv[1].tv_sec = ust.mtime;
+ futimes(fileno(localf), tv);
+ }
fclose(localf);
}
if(dlf != NULL) {
fetchIO_close(dlf);
}
fetchFreeURL(fileurl);
+
+ /* restore the old signal handlers */
+ sigaction(SIGINT, &sig_int[OLD], NULL);
+ sigaction(SIGPIPE, &sig_pipe[OLD], NULL);
+ /* if we were interrupted, trip the old handler */
+ if(dload_interrupted) {
+ raise(SIGINT);
+ }
+
return(ret);
}
#endif
static int download(const char *url, const char *localpath,
- time_t mtimeold, time_t *mtimenew) {
+ int force) {
if(handle->fetchcb == NULL) {
#if defined(INTERNAL_DOWNLOAD)
- return(download_internal(url, localpath, mtimeold, mtimenew));
+ return(download_internal(url, localpath, force));
#else
RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1);
#endif
} else {
- int ret = handle->fetchcb(url, localpath, mtimeold, mtimenew);
+ int ret = handle->fetchcb(url, localpath, force);
if(ret == -1) {
RET_ERR(PM_ERR_EXTERNAL_DOWNLOAD, -1);
}
@@ -279,19 +348,15 @@ static int download(const char *url, const char *localpath,
/*
* Download a single file
- * - if mtimeold is non-NULL, then only download the file if it's different
- * than mtimeold.
- * - if *mtimenew is non-NULL, it will be filled with the mtime of the remote
- * file.
* - servers must be a list of urls WITHOUT trailing slashes.
*
* RETURN: 0 for successful download
- * 1 if the mtimes are identical
+ * 1 if the files are identical
* -1 on error
*/
int _alpm_download_single_file(const char *filename,
alpm_list_t *servers, const char *localpath,
- time_t mtimeold, time_t *mtimenew)
+ int force)
{
alpm_list_t *i;
int ret = -1;
@@ -308,7 +373,7 @@ int _alpm_download_single_file(const char *filename,
CALLOC(fileurl, len, sizeof(char), RET_ERR(PM_ERR_MEMORY, -1));
snprintf(fileurl, len, "%s/%s", server, filename);
- ret = download(fileurl, localpath, mtimeold, mtimenew);
+ ret = download(fileurl, localpath, force);
FREE(fileurl);
if(ret != -1) {
break;
@@ -327,7 +392,7 @@ int _alpm_download_files(alpm_list_t *files,
for(lp = files; lp; lp = lp->next) {
char *filename = lp->data;
if(_alpm_download_single_file(filename, servers,
- localpath, 0, NULL) == -1) {
+ localpath, 0) == -1) {
ret++;
}
}
@@ -354,7 +419,7 @@ char SYMEXPORT *alpm_fetch_pkgurl(const char *url)
cachedir = _alpm_filecache_setup();
/* download the file */
- ret = download(url, cachedir, 0, NULL);
+ ret = download(url, cachedir, 0);
if(ret == -1) {
_alpm_log(PM_LOG_WARNING, _("failed to download %s\n"), url);
return(NULL);