summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJudd Vinet <judd@archlinux.org>2004-04-15 07:05:54 +0200
committerJudd Vinet <judd@archlinux.org>2004-04-15 07:05:54 +0200
commit4cb6a179b3b23fff7208628438d59783e9e65dfc (patch)
treefd3094ad328f188085c9e74e5a351ddfc82b621e
parentd7c17ff1ccf5ff1c5297545b0fad858e6a116492 (diff)
downloadpacman-4cb6a179b3b23fff7208628438d59783e9e65dfc.tar.gz
pacman-4cb6a179b3b23fff7208628438d59783e9e65dfc.tar.xz
Imported from pacman-2.7.7.tar.gz
-rw-r--r--ChangeLog6
-rw-r--r--Makefile.in2
-rw-r--r--README2
-rw-r--r--doc/pacman.8.in12
-rw-r--r--etc/pacman.conf1
-rwxr-xr-xscripts/gensync9
-rwxr-xr-xscripts/makepkg7
-rwxr-xr-xscripts/makeworld4
-rw-r--r--src/db.c8
-rw-r--r--src/package.c6
-rw-r--r--src/package.h3
-rw-r--r--src/pacman.c173
-rw-r--r--src/pacman.h2
-rw-r--r--src/pacsync.c372
-rw-r--r--src/pacsync.h2
15 files changed, 378 insertions, 231 deletions
diff --git a/ChangeLog b/ChangeLog
index 5e6a8dc5..e82b3e16 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,11 @@
VERSION DESCRIPTION
-----------------------------------------------------------------------------
+2.7.7 - added an XferCommand directive that will make pacman use an
+ external download utility like wget
+ - added a license field to package meta-data
+ - add url support to -A and -U operations (download packages)
+ - -Ss now searches thru provides fields
+ - added --dbonly option to -R
2.7.6 - added --print-uris option
- fixed an http download bug (bug #667)
- fixed a segfault related to replaces/conflicts handling
diff --git a/Makefile.in b/Makefile.in
index 404139dd..dcf070f3 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -34,7 +34,7 @@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ $(AM_INSTALL_PROGRAM_FLAGS)
INSTALL_DATA = @INSTALL_DATA@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
-PACVER = 2.7.6
+PACVER = 2.7.7
TOPDIR = @srcdir@
SRCDIR = $(TOPDIR)/src/
diff --git a/README b/README
index 2aa5057a..8bff48bb 100644
--- a/README
+++ b/README
@@ -51,7 +51,7 @@ it helps.
COPYRIGHT:
----------
-pacman is Copyright (c) 2002-2003 Judd Vinet <jvinet@zeroflux.org> and is
+pacman is Copyright (c) 2002-2004 Judd Vinet <jvinet@zeroflux.org> and is
licensed through the GNU General Public License (see COPYING).
pacman uses "libtar", a library for reading/writing tar-files. This
diff --git a/doc/pacman.8.in b/doc/pacman.8.in
index cf342923..33b43636 100644
--- a/doc/pacman.8.in
+++ b/doc/pacman.8.in
@@ -1,4 +1,4 @@
-.TH pacman 8 "April 3, 2004" "pacman #VERSION#" ""
+.TH pacman 8 "April 14, 2004" "pacman #VERSION#" ""
.SH NAME
pacman \- package manager utility
.SH SYNOPSIS
@@ -70,6 +70,9 @@ package that is about to be installed contains files that are already
installed, this option will cause all those files to be overwritten.
This option should be used with care, ideally not at all.
.TP
+.B "\-k, \-\-keep"
+Removes the database entry only. Leaves all files in place.
+.TP
.B "\-n, \-\-nosave"
(only used with \fB--remove\fP)
Instructs pacman to ignore file backup designations. Normally, when
@@ -225,6 +228,13 @@ Instructs pacman to ignore any upgrades for this package when performing a
.B "ProxyServer = <host|ip>[:port]"
If set, pacman will use this proxy server for all ftp/http transfers.
.TP
+.B "XferCommand = /path/to/command %u"
+If set, pacman will use this external program to download all remote files.
+All instances of \fB%u\fP will be replaced with the URL to be downloaded.
+This is useful for users who experience problems with pacman's built-in http/ftp
+support, or need the more advanced proxy support that comes with utilities like
+wget.
+.TP
.B "NoPassiveFtp"
Disables passive ftp connections when downloading packages. (aka Active Mode)
.TP
diff --git a/etc/pacman.conf b/etc/pacman.conf
index bdf9de0e..07ef718d 100644
--- a/etc/pacman.conf
+++ b/etc/pacman.conf
@@ -23,6 +23,7 @@ NoUpgrade = etc/fstab etc/raidtab
NoUpgrade = etc/rc.conf etc/rc.local
NoUpgrade = etc/modprobe.conf etc/modules.conf
NoUpgrade = etc/lilo.conf boot/grub/menu.lst
+#XferCommand = /usr/bin/wget --passive-ftp -c %u
#
# REPOSITORIES
diff --git a/scripts/gensync b/scripts/gensync
index dd7e50a1..e7c7724c 100755
--- a/scripts/gensync
+++ b/scripts/gensync
@@ -2,7 +2,7 @@
#
# gensync
#
-# Copyright (c) 2002-2003 by Judd Vinet <jvinet@zeroflux.org>
+# Copyright (c) 2002-2004 by Judd Vinet <jvinet@zeroflux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,16 +20,19 @@
# USA.
#
-myver='2.7.5'
+myver='2.7.7'
usage() {
echo "gensync $myver"
- echo "usage: $0 <root> <destfile>"
+ echo "usage: $0 <root> <destfile> [package_directory]"
echo
echo "gensync will generate a sync database by reading all PKGBUILD files"
echo "from <root>. gensync builds the database in /tmp/.gensync and then"
echo "compresses it to <destfile>."
echo
+ echo "gensync will calculate md5sums of packages in <destdir>, unless an"
+ echo "alternate [package_directory] is specified."
+ echo
echo "note: The <destfile> name is important. It must be of the form"
echo " {treename}.db.tar.gz where {treename} is the name of the custom"
echo " package repository you configured in /etc/pacman.conf. The"
diff --git a/scripts/makepkg b/scripts/makepkg
index f28da232..d53bdd17 100755
--- a/scripts/makepkg
+++ b/scripts/makepkg
@@ -2,7 +2,7 @@
#
# makepkg
#
-# Copyright (c) 2002-2003 by Judd Vinet <jvinet@zeroflux.org>
+# Copyright (c) 2002-2004 by Judd Vinet <jvinet@zeroflux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,7 @@
# USA.
#
-myver='2.7.5'
+myver='2.7.7'
startdir=`pwd`
USE_COLOR="n"
@@ -290,7 +290,7 @@ if [ "$CLEANCACHE" = "1" ]; then
fi
fi
-unset pkgname pkgver pkgrel pkgdesc url groups provides md5sums
+unset pkgname pkgver pkgrel pkgdesc url license groups provides md5sums
unset replaces depends conflicts backup source install build makedepends
umask 0022
@@ -607,6 +607,7 @@ echo "pkgname = $pkgname" >>.PKGINFO
echo "pkgver = $pkgver-$pkgrel" >>.PKGINFO
echo "pkgdesc = $pkgdesc" >>.PKGINFO
echo "url = $url" >>.PKGINFO
+echo "license = $license" >>.PKGINFO
echo "builddate = $builddate" >>.PKGINFO
echo "packager = $packager" >>.PKGINFO
echo "size = $size" >>.PKGINFO
diff --git a/scripts/makeworld b/scripts/makeworld
index 67c58ec2..1e6c7e6b 100755
--- a/scripts/makeworld
+++ b/scripts/makeworld
@@ -2,7 +2,7 @@
#
# makeworld
#
-# Copyright (c) 2002-2003 by Judd Vinet <jvinet@zeroflux.org>
+# Copyright (c) 2002-2004 by Judd Vinet <jvinet@zeroflux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@
#
toplevel=`pwd`
-version="2.7.5"
+version="2.7.7"
usage() {
echo "makeworld version $version"
diff --git a/src/db.c b/src/db.c
index 0f8b5a9e..29135afd 100644
--- a/src/db.c
+++ b/src/db.c
@@ -216,6 +216,12 @@ pkginfo_t* db_read(pacdb_t *db, struct dirent *ent, unsigned int inforeq)
return(NULL);
}
trim(info->url);
+ } else if(!strcmp(line, "%LICENSE%")) {
+ if(fgets(info->license, sizeof(info->license), fp) == NULL) {
+ FREEPKG(info);
+ return(NULL);
+ }
+ trim(info->license);
} else if(!strcmp(line, "%BUILDDATE%")) {
if(fgets(info->builddate, sizeof(info->builddate), fp) == NULL) {
FREEPKG(info);
@@ -379,6 +385,8 @@ int db_write(pacdb_t *db, pkginfo_t *info)
fprintf(fp, "\n");
fputs("%URL%\n", fp);
fprintf(fp, "%s\n\n", info->url);
+ fputs("%LICENSE%\n", fp);
+ fprintf(fp, "%s\n\n", info->license);
fputs("%BUILDDATE%\n", fp);
fprintf(fp, "%s\n\n", info->builddate);
fputs("%INSTALLDATE%\n", fp);
diff --git a/src/package.c b/src/package.c
index 1c7359bd..04153fb1 100644
--- a/src/package.c
+++ b/src/package.c
@@ -189,6 +189,8 @@ int parse_descfile(char *descfile, pkginfo_t *info, PMList **backup, int output)
info->groups = list_add(info->groups, strdup(ptr));
} else if(!strcmp(key, "URL")) {
strncpy(info->url, ptr, sizeof(info->url));
+ } else if(!strcmp(key, "LICENSE")) {
+ strncpy(info->license, ptr, sizeof(info->license));
} else if(!strcmp(key, "BUILDDATE")) {
strncpy(info->builddate, ptr, sizeof(info->builddate));
} else if(!strcmp(key, "INSTALLDATE")) {
@@ -232,6 +234,7 @@ pkginfo_t* newpkg()
pkg->version[0] = '\0';
pkg->desc[0] = '\0';
pkg->url[0] = '\0';
+ pkg->license[0] = '\0';
pkg->builddate[0] = '\0';
pkg->installdate[0] = '\0';
pkg->packager[0] = '\0';
@@ -321,7 +324,8 @@ void dump_pkg_full(pkginfo_t *info)
list_display("Groups :", pm);
FREELIST(pm);
printf("Packager : %s\n", info->packager);
- printf("URL : %s\n", (info->url ? info->url : "None"));
+ printf("URL : %s\n", info->url);
+ printf("License : %s\n", info->license);
printf("Size : %ld\n", info->size);
printf("Build Date : %s %s\n", info->builddate, strlen(info->builddate) ? "UTC" : "");
printf("Install Date : %s %s\n", info->installdate, strlen(info->installdate) ? "UTC" : "");
diff --git a/src/package.h b/src/package.h
index 16678858..8d6f458c 100644
--- a/src/package.h
+++ b/src/package.h
@@ -45,7 +45,8 @@ typedef struct __pkginfo_t {
char name[256];
char version[64];
char desc[512];
- char url[255];
+ char url[256];
+ char license[128];
char builddate[32];
char installdate[32];
char packager[64];
diff --git a/src/pacman.c b/src/pacman.c
index 54b50e9e..e2db8721 100644
--- a/src/pacman.c
+++ b/src/pacman.c
@@ -69,6 +69,7 @@ unsigned short pmo_q_list = 0;
unsigned short pmo_q_orphans = 0;
unsigned short pmo_q_owns = 0;
unsigned short pmo_r_cascade = 0;
+unsigned short pmo_r_dbonly = 0;
unsigned short pmo_r_recurse = 0;
unsigned short pmo_s_upgrade = 0;
unsigned short pmo_s_downloadonly = 0;
@@ -82,6 +83,7 @@ char *pmo_dbpath = NULL;
char *pmo_logfile = NULL;
char *pmo_proxyhost = NULL;
unsigned short pmo_proxyport = 0;
+char *pmo_xfercommand = NULL;
PMList *pmo_noupgrade = NULL;
PMList *pmo_ignorepkg = NULL;
unsigned short pmo_usesyslog = 0;
@@ -255,12 +257,11 @@ int pacman_deptest(pacdb_t *db, PMList *targets)
sprintf(dummy->version, "1.0-1");
for(lp = targets; lp; lp = lp->next) {
if(lp->data == NULL) continue;
- dummy->depends = list_add(dummy->depends, lp->data);
+ dummy->depends = list_add(dummy->depends, strdup(lp->data));
}
list = list_add(list, dummy);
deps = checkdeps(db, PM_ADD, list);
- FREELIST(list);
- FREEPKG(dummy);
+ FREELISTPKGS(list);
if(deps) {
/* return 126 = deps were missing, but successfully resolved
@@ -381,25 +382,42 @@ int pacman_sync(pacdb_t *db, PMList *targets)
for(k = dbs->pkgcache; k; k = k->next) {
pkginfo_t *pkg = (pkginfo_t*)k->data;
char *haystack;
+ PMList *m;
+ int match = 0;
/* check name */
haystack = strdup(pkg->name);
strtoupper(haystack);
if(strstr(haystack, targ)) {
+ match = 1;
+ }
+ FREE(haystack);
+
+ /* check description */
+ haystack = strdup(pkg->desc);
+ strtoupper(haystack);
+ if(strstr(haystack, targ)) {
+ match = 1;
+ }
+ FREE(haystack);
+
+ if(!match) {
+ pkg = db_scan(dbs->db, pkg->name, INFRQ_DESC | INFRQ_DEPENDS);
+ /* check provides */
+ for(m = pkg->provides; m; m = m->next) {
+ haystack = strdup(m->data);
+ strtoupper(haystack);
+ if(strstr(haystack, targ)) {
+ match = 1;
+ }
+ FREE(haystack);
+ }
+ }
+
+ if(match) {
printf("%s/%s %s\n ", dbs->sync->treename, pkg->name, pkg->version);
indentprint(pkg->desc, 4);
printf("\n");
- } else {
- /* check description */
- FREE(haystack);
- haystack = strdup(pkg->desc);
- strtoupper(haystack);
- if(strstr(haystack, targ)) {
- printf("%s/%s %s\n ", dbs->sync->treename, pkg->name, pkg->version);
- indentprint(pkg->desc, 4);
- printf("\n");
- }
}
- FREE(haystack);
}
}
FREE(targ);
@@ -1320,6 +1338,43 @@ int pacman_add(pacdb_t *db, PMList *targets)
if(targets == NULL) {
return(0);
}
+ for(targ = targets; targ; targ = targ->next) {
+ if(strstr(targ->data, "://")) {
+ /* this target looks like an URL. download it and then
+ * strip the URL portion from the target.
+ */
+ char url[PATH_MAX];
+ server_t server;
+ PMList *servers = NULL;
+ PMList *files = NULL;
+ char *host, *path, *fn;
+ strncpy(url, targ->data, PATH_MAX);
+ host = strstr(url, "://");
+ *host = '\0';
+ host += 3;
+ path = strchr(host, '/');
+ *path = '\0';
+ path++;
+ fn = strrchr(path, '/');
+ *fn = '\0';
+ fn++;
+ server.protocol = url;
+ server.server = host;
+ server.path = path;
+ servers = list_add(servers, &server);
+ files = list_add(files, fn);
+ if(downloadfiles(servers, ".", files)) {
+ fprintf(stderr, "error: failed to download %s\n", (char*)targ->data);
+ return(1);
+ }
+ FREELIST(servers);
+ files->data = NULL;
+ FREELIST(files);
+ /* replace this target with the raw filename, no URL */
+ free(targ->data);
+ targ->data = strndup(fn, PATH_MAX);
+ }
+ }
printf("loading package data... ");
fflush(stdout);
@@ -1942,50 +1997,52 @@ int pacman_remove(pacdb_t *db, PMList *targets)
}
}
- /* iterate through the list backwards, unlinking files */
- for(lp = list_last(info->files); lp; lp = lp->prev) {
- int nb = 0;
- if(needbackup((char*)lp->data, info->backup)) {
- nb = 1;
- }
- if(!nb && pmo_upgrade) {
- /* check pmo_noupgrade */
- if(is_in((char*)lp->data, pmo_noupgrade)) {
+ if(!pmo_r_dbonly) {
+ /* iterate through the list backwards, unlinking files */
+ for(lp = list_last(info->files); lp; lp = lp->prev) {
+ int nb = 0;
+ if(needbackup((char*)lp->data, info->backup)) {
nb = 1;
}
- }
- snprintf(line, PATH_MAX, "%s%s", pmo_root, (char*)lp->data);
- if(lstat(line, &buf)) {
- vprint("file %s does not exist\n", line);
- continue;
- }
- if(S_ISDIR(buf.st_mode)) {
- /*vprint(" removing directory %s\n", line);*/
- if(rmdir(line)) {
- /* this is okay, other packages are probably using it. */
+ if(!nb && pmo_upgrade) {
+ /* check pmo_noupgrade */
+ if(is_in((char*)lp->data, pmo_noupgrade)) {
+ nb = 1;
+ }
}
- } else {
- /* if the file is flagged, back it up to .pacsave */
- if(nb) {
- if(pmo_upgrade) {
- /* we're upgrading so just leave the file as is. pacman_add() will handle it */
- } else {
- if(!pmo_nosave) {
- newpath = (char*)realloc(newpath, strlen(line)+strlen(".pacsave")+1);
- sprintf(newpath, "%s.pacsave", line);
- rename(line, newpath);
- logaction(stderr, "warning: %s saved as %s", line, newpath);
+ snprintf(line, PATH_MAX, "%s%s", pmo_root, (char*)lp->data);
+ if(lstat(line, &buf)) {
+ vprint("file %s does not exist\n", line);
+ continue;
+ }
+ if(S_ISDIR(buf.st_mode)) {
+ /*vprint(" removing directory %s\n", line);*/
+ if(rmdir(line)) {
+ /* this is okay, other packages are probably using it. */
+ }
+ } else {
+ /* if the file is flagged, back it up to .pacsave */
+ if(nb) {
+ if(pmo_upgrade) {
+ /* we're upgrading so just leave the file as is. pacman_add() will handle it */
} else {
- /*vprint(" unlinking %s\n", line);*/
- if(unlink(line)) {
- perror("cannot remove file");
+ if(!pmo_nosave) {
+ newpath = (char*)realloc(newpath, strlen(line)+strlen(".pacsave")+1);
+ sprintf(newpath, "%s.pacsave", line);
+ rename(line, newpath);
+ logaction(stderr, "warning: %s saved as %s", line, newpath);
+ } else {
+ /*vprint(" unlinking %s\n", line);*/
+ if(unlink(line)) {
+ perror("cannot remove file");
+ }
}
}
- }
- } else {
- /*vprint(" unlinking %s\n", line);*/
- if(unlink(line)) {
- perror("cannot remove file");
+ } else {
+ /*vprint(" unlinking %s\n", line);*/
+ if(unlink(line)) {
+ perror("cannot remove file");
+ }
}
}
}
@@ -2933,13 +2990,14 @@ int parseargs(int op, int argc, char **argv)
{"downloadonly", no_argument, 0, 'w'},
{"print-uris", no_argument, 0, 'p'},
{"refresh", no_argument, 0, 'y'},
+ {"dbonly", no_argument, 0, 'k'},
{"cascade", no_argument, 0, 'c'},
{"recursive", no_argument, 0, 's'},
{"groups", no_argument, 0, 'g'},
{0, 0, 0, 0}
};
- while((opt = getopt_long(argc, argv, "ARUFQSTDYr:b:vhscVfnoldepiuwyg", opts, &option_index))) {
+ while((opt = getopt_long(argc, argv, "ARUFQSTDYr:b:vkhscVfnoldepiuwyg", opts, &option_index))) {
if(opt < 0) {
break;
}
@@ -2963,6 +3021,7 @@ int parseargs(int op, int argc, char **argv)
case 'f': pmo_force = 1; break;
case 'g': pmo_group = 1; break;
case 'i': pmo_q_info++; break;
+ case 'k': pmo_r_dbonly = 1; break;
case 'l': pmo_q_list = 1; break;
case 'n': pmo_nosave = 1; break;
case 'p': pmo_q_isfile = 1; pmo_s_printuris = 1; break;
@@ -3114,6 +3173,10 @@ int parseconfig(char *configfile)
}
pmo_logfile = strndup(ptr, PATH_MAX);
vprint("config: log file: %s\n", pmo_logfile);
+ } else if (!strcmp(key, "XFERCOMMAND")) {
+ FREE(pmo_xfercommand);
+ pmo_xfercommand = strndup(ptr, PATH_MAX);
+ vprint("config: xfercommand: %s\n", pmo_xfercommand);
} else if (!strcmp(key, "PROXYSERVER")) {
char *p;
if(pmo_proxyhost) {
@@ -3233,6 +3296,7 @@ void usage(int op, char *myname)
printf("options:\n");
printf(" -c, --cascade remove packages and all packages that depend on them\n");
printf(" -d, --nodeps skip dependency checks\n");
+ printf(" -k, --dbonly only remove database entry, do not remove files\n");
printf(" -n, --nosave remove configuration files as well\n");
printf(" -s, --recursive remove dependencies also (that won't break packages)\n");
} else if(op == PM_UPGRADE) {
@@ -3247,7 +3311,7 @@ void usage(int op, char *myname)
} else if(op == PM_QUERY) {
printf("usage: %s {-Q --query} [options] [package]\n", myname);
printf("options:\n");
- printf(" -i, --info view package information\n");
+ printf(" -i, --info view package information (use -ii for more)\n");
printf(" -g, --groups view all members of a package group\n");
printf(" -l, --list list the contents of the queried package\n");
printf(" -o, --owns <file> query the package that owns <file>\n");
@@ -3412,6 +3476,7 @@ void cleanup(int signum)
FREE(pmo_dbpath);
FREE(pmo_logfile);
FREE(pmo_proxyhost);
+ FREE(pmo_xfercommand);
FREELIST(pm_targets);
diff --git a/src/pacman.h b/src/pacman.h
index 90694be7..3bc7eb3b 100644
--- a/src/pacman.h
+++ b/src/pacman.h
@@ -22,7 +22,7 @@
#define _PAC_PACMAN_H
#ifndef PACVER
-#define PACVER "2.7.6"
+#define PACVER "2.7.7"
#endif
#ifndef PKGDIR
diff --git a/src/pacsync.c b/src/pacsync.c
index 54366cca..0c41f543 100644
--- a/src/pacsync.c
+++ b/src/pacsync.c
@@ -48,6 +48,7 @@ static struct timeval t0, t;
extern char *pmo_root;
extern char *pmo_dbpath;
extern char *pmo_proxyhost;
+extern char *pmo_xfercommand;
extern unsigned short pmo_proxyport;
extern unsigned short pmo_nopassiveftp;
@@ -105,7 +106,7 @@ int sync_synctree()
return(!success);
}
-int downloadfiles(PMList *servers, char *localpath, PMList *files)
+int downloadfiles(PMList *servers, const char *localpath, PMList *files)
{
int fsz;
netbuf *control = NULL;
@@ -121,197 +122,244 @@ int downloadfiles(PMList *servers, char *localpath, PMList *files)
for(i = servers; i && !done; i = i->next) {
server_t *server = (server_t*)i->data;
- if(!strcmp(server->protocol, "ftp") && !pmo_proxyhost) {
- FtpInit();
- vprint("Connecting to %s:21\n", server->server);
- if(!FtpConnect(server->server, &control)) {
- fprintf(stderr, "error: cannot connect to %s\n", server->server);
- continue;
- }
- if(!FtpLogin("anonymous", "arch@guest", control)) {
- fprintf(stderr, "error: anonymous login failed\n");
- FtpQuit(control);
- continue;
- }
- if(!FtpChdir(server->path, control)) {
- fprintf(stderr, "error: could not cwd to %s: %s\n", server->path,
- FtpLastResponse(control));
- continue;
- }
- if(!pmo_nopassiveftp) {
- if(!FtpOptions(FTPLIB_CONNMODE, FTPLIB_PASSIVE, control)) {
- fprintf(stderr, "warning: failed to set passive mode\n");
+ if(!pmo_xfercommand) {
+ if(!strcmp(server->protocol, "ftp") && !pmo_proxyhost) {
+ FtpInit();
+ vprint("Connecting to %s:21\n", server->server);
+ if(!FtpConnect(server->server, &control)) {
+ fprintf(stderr, "error: cannot connect to %s\n", server->server);
+ continue;
+ }
+ if(!FtpLogin("anonymous", "arch@guest", control)) {
+ fprintf(stderr, "error: anonymous login failed\n");
+ FtpQuit(control);
+ continue;
+ }
+ if(!FtpChdir(server->path, control)) {
+ fprintf(stderr, "error: could not cwd to %s: %s\n", server->path,
+ FtpLastResponse(control));
+ continue;
+ }
+ if(!pmo_nopassiveftp) {
+ if(!FtpOptions(FTPLIB_CONNMODE, FTPLIB_PASSIVE, control)) {
+ fprintf(stderr, "warning: failed to set passive mode\n");
+ }
+ } else {
+ vprint("FTP passive mode not set\n");
+ }
+ } else if(pmo_proxyhost) {
+ char *host;
+ unsigned port;
+ host = (pmo_proxyhost) ? pmo_proxyhost : server->server;
+ port = (pmo_proxyhost) ? pmo_proxyport : 80;
+ if(strchr(host, ':')) {
+ vprint("Connecting to %s\n", host);
+ } else {
+ vprint("Connecting to %s:%u\n", host, port);
+ }
+ if(!HttpConnect(host, port, &control)) {
+ fprintf(stderr, "error: cannot connect to %s\n", host);
+ continue;
}
- } else {
- vprint("FTP passive mode not set\n");
- }
- } else if(pmo_proxyhost) {
- char *host;
- unsigned port;
- host = (pmo_proxyhost) ? pmo_proxyhost : server->server;
- port = (pmo_proxyhost) ? pmo_proxyport : 80;
- if(strchr(host, ':')) {
- vprint("Connecting to %s\n", host);
- } else {
- vprint("Connecting to %s:%u\n", host, port);
- }
- if(!HttpConnect(host, port, &control)) {
- fprintf(stderr, "error: cannot connect to %s\n", host);
- continue;
}
- }
- /* set up our progress bar's callback (and idle timeout) */
- if(strcmp(server->protocol, "file") && control) {
- FtpOptions(FTPLIB_CALLBACK, (long)log_progress, control);
- FtpOptions(FTPLIB_IDLETIME, (long)1000, control);
- FtpOptions(FTPLIB_CALLBACKARG, (long)&fsz, control);
- FtpOptions(FTPLIB_CALLBACKBYTES, (10*1024), control);
+ /* set up our progress bar's callback (and idle timeout) */
+ if(strcmp(server->protocol, "file") && control) {
+ FtpOptions(FTPLIB_CALLBACK, (long)log_progress, control);
+ FtpOptions(FTPLIB_IDLETIME, (long)1000, control);
+ FtpOptions(FTPLIB_CALLBACKARG, (long)&fsz, control);
+ FtpOptions(FTPLIB_CALLBACKBYTES, (10*1024), control);
+ }
}
/* get each file in the list */
for(lp = files; lp; lp = lp->next) {
- char output[PATH_MAX];
- int j, filedone = 0;
char *fn = (char*)lp->data;
- char *ptr;
- struct stat st;
if(is_in(fn, complete)) {
continue;
}
- snprintf(output, PATH_MAX, "%s/%s.part", localpath, fn);
- strncpy(sync_fnm, fn, 24);
- /* drop filename extension */
- ptr = strstr(fn, ".db.tar.gz");
- if(ptr && (ptr-fn) < 24) {
- sync_fnm[ptr-fn] = '\0';
- }
- ptr = strstr(fn, ".pkg.tar.gz");
- if(ptr && (ptr-fn) < 24) {
- sync_fnm[ptr-fn] = '\0';
- }
- for(j = strlen(sync_fnm); j < 24; j++) {
- sync_fnm[j] = ' ';
- }
- sync_fnm[24] = '\0';
- offset = 0;
-
- /* ETA setup */
- gettimeofday(&t0, NULL);
- t = t0;
- rate = 0;
- xfered1 = 0;
- eta_h = 0;
- eta_m = 0;
- eta_s = 0;
-
- if(!strcmp(server->protocol, "ftp") && !pmo_proxyhost) {
- if(!FtpSize(fn, &fsz, FTPLIB_IMAGE, control)) {
- fprintf(stderr, "warning: failed to get filesize for %s\n", fn);
+ if(pmo_xfercommand) {
+ int ret;
+ char *ptr1, *ptr2;
+ char origCmd[PATH_MAX];
+ char parsedCmd[PATH_MAX] = "";
+ char url[PATH_MAX];
+ char cwd[PATH_MAX];
+ /* build the full download url */
+ snprintf(url, PATH_MAX, "%s://%s%s%s", server->protocol, server->server,
+ server->path, fn);
+ /* replace all occurrences of %u with the download URL */
+ strncpy(origCmd, pmo_xfercommand, sizeof(origCmd));
+ ptr1 = origCmd;
+ while((ptr2 = strstr(ptr1, "%u"))) {
+ ptr2[0] = '\0';
+ strcat(parsedCmd, ptr1);
+ strcat(parsedCmd, url);
+ ptr1 = ptr2 + 2;
}
- if(!stat(output, &st)) {
- offset = (int)st.st_size;
- if(!FtpRestart(offset, control)) {
- fprintf(stderr, "warning: failed to resume download -- restarting\n");
- /* can't resume: */
- /* unlink the file in order to restart download from scratch */
- unlink(output);
- }
+ strcat(parsedCmd, ptr1);
+ /* cwd to the download directory */
+ getcwd(cwd, PATH_MAX);
+ if(chdir(localpath)) {
+ fprintf(stderr, "error: could not chdir to %s\n", localpath);
+ return(1);
}
- if(!FtpGet(output, fn, FTPLIB_IMAGE, control)) {
- fprintf(stderr, "\nfailed downloading %s from %s: %s\n",
- fn, server->server, FtpLastResponse(control));
- /* we leave the partially downloaded file in place so it can be resumed later */
+ /* execute the parsed command via /bin/sh -c */
+ vprint("running command: %s\n", parsedCmd);
+ ret = system(parsedCmd);
+ if(ret == -1) {
+ fprintf(stderr, "error running XferCommand: fork failed!\n");
+ return(1);
+ } else if(ret != 0) {
+ /* download failed */
+ vprint("XferCommand command returned non-zero status code (%d)\n",
+ WEXITSTATUS(ret));
} else {
- filedone = 1;
+ /* download was successful */
+ complete = list_add(complete, fn);
}
- } else if(!strcmp(server->protocol, "http") || pmo_proxyhost) {
- char src[PATH_MAX];
- char *host;
- unsigned port;
- if(!strcmp(server->protocol, "http") && !pmo_proxyhost) {
- /* HTTP servers hang up after each request (but not proxies), so
- * we have to re-connect for each files.
- */
- host = (pmo_proxyhost) ? pmo_proxyhost : server->server;
- port = (pmo_proxyhost) ? pmo_proxyport : 80;
- if(strchr(host, ':')) {
- vprint("Connecting to %s\n", host);
- } else {
- vprint("Connecting to %s:%u\n", host, port);
+ chdir(cwd);
+ } else {
+ char output[PATH_MAX];
+ int j, filedone = 0;
+ char *ptr;
+ struct stat st;
+ snprintf(output, PATH_MAX, "%s/%s.part", localpath, fn);
+ strncpy(sync_fnm, fn, 24);
+ /* drop filename extension */
+ ptr = strstr(fn, ".db.tar.gz");
+ if(ptr && (ptr-fn) < 24) {
+ sync_fnm[ptr-fn] = '\0';
+ }
+ ptr = strstr(fn, ".pkg.tar.gz");
+ if(ptr && (ptr-fn) < 24) {
+ sync_fnm[ptr-fn] = '\0';
+ }
+ for(j = strlen(sync_fnm); j < 24; j++) {
+ sync_fnm[j] = ' ';
+ }
+ sync_fnm[24] = '\0';
+ offset = 0;
+
+ /* ETA setup */
+ gettimeofday(&t0, NULL);
+ t = t0;
+ rate = 0;
+ xfered1 = 0;
+ eta_h = 0;
+ eta_m = 0;
+ eta_s = 0;
+
+ if(!strcmp(server->protocol, "ftp") && !pmo_proxyhost) {
+ if(!FtpSize(fn, &fsz, FTPLIB_IMAGE, control)) {
+ fprintf(stderr, "warning: failed to get filesize for %s\n", fn);
+ }
+ if(!stat(output, &st)) {
+ offset = (int)st.st_size;
+ if(!FtpRestart(offset, control)) {
+ fprintf(stderr, "warning: failed to resume download -- restarting\n");
+ /* can't resume: */
+ /* unlink the file in order to restart download from scratch */
+ unlink(output);
+ }
}
- if(!HttpConnect(host, port, &control)) {
- fprintf(stderr, "error: cannot connect to %s\n", host);
- continue;
+ if(!FtpGet(output, fn, FTPLIB_IMAGE, control)) {
+ fprintf(stderr, "\nfailed downloading %s from %s: %s\n",
+ fn, server->server, FtpLastResponse(control));
+ /* we leave the partially downloaded file in place so it can be resumed later */
+ } else {
+ filedone = 1;
}
- /* set up our progress bar's callback (and idle timeout) */
- if(strcmp(server->protocol, "file") && control) {
- FtpOptions(FTPLIB_CALLBACK, (long)log_progress, control);
- FtpOptions(FTPLIB_IDLETIME, (long)1000, control);
- FtpOptions(FTPLIB_CALLBACKARG, (long)&fsz, control);
- FtpOptions(FTPLIB_CALLBACKBYTES, (10*1024), control);
+ } else if(!strcmp(server->protocol, "http") || pmo_proxyhost) {
+ char src[PATH_MAX];
+ char *host;
+ unsigned port;
+ if(!strcmp(server->protocol, "http") && !pmo_proxyhost) {
+ /* HTTP servers hang up after each request (but not proxies), so
+ * we have to re-connect for each file.
+ */
+ host = (pmo_proxyhost) ? pmo_proxyhost : server->server;
+ port = (pmo_proxyhost) ? pmo_proxyport : 80;
+ if(strchr(host, ':')) {
+ vprint("Connecting to %s\n", host);
+ } else {
+ vprint("Connecting to %s:%u\n", host, port);
+ }
+ if(!HttpConnect(host, port, &control)) {
+ fprintf(stderr, "error: cannot connect to %s\n", host);
+ continue;
+ }
+ /* set up our progress bar's callback (and idle timeout) */
+ if(strcmp(server->protocol, "file") && control) {
+ FtpOptions(FTPLIB_CALLBACK, (long)log_progress, control);
+ FtpOptions(FTPLIB_IDLETIME, (long)1000, control);
+ FtpOptions(FTPLIB_CALLBACKARG, (long)&fsz, control);
+ FtpOptions(FTPLIB_CALLBACKBYTES, (10*1024), control);
+ }
}
- }
- if(!stat(output, &st)) {
- offset = (int)st.st_size;
- }
- if(!pmo_proxyhost) {
+ if(!stat(output, &st)) {
+ offset = (int)st.st_size;
+ }
+ if(!pmo_proxyhost) {
+ snprintf(src, PATH_MAX, "%s%s", server->path, fn);
+ } else {
+ snprintf(src, PATH_MAX, "%s://%s%s%s", server->protocol, server->server, server->path, fn);
+ }
+ if(!HttpGet(server->server, output, src, &fsz, control, offset)) {
+ fprintf(stderr, "\nfailed downloading %s from %s: %s\n",
+ fn, server->server, FtpLastResponse(control));
+ /* we leave the partially downloaded file in place so it can be resumed later */
+ } else {
+ filedone = 1;
+ }
+ } else if(!strcmp(server->protocol, "file")) {
+ char src[PATH_MAX];
snprintf(src, PATH_MAX, "%s%s", server->path, fn);
- } else {
- snprintf(src, PATH_MAX, "%s://%s%s%s", server->protocol, server->server, server->path, fn);
- }
- if(!HttpGet(server->server, output, src, &fsz, control, offset)) {
- fprintf(stderr, "\nfailed downloading %s from %s: %s\n",
- fn, server->server, FtpLastResponse(control));
- /* we leave the partially downloaded file in place so it can be resumed later */
- } else {
- filedone = 1;
- }
- } else if(!strcmp(server->protocol, "file")) {
- char src[PATH_MAX];
- snprintf(src, PATH_MAX, "%s%s", server->path, fn);
- vprint("copying %s to %s/%s\n", src, localpath, fn);
- /* local repository, just copy the file */
- if(copyfile(src, output)) {
- fprintf(stderr, "failed copying %s\n", src);
- } else {
- filedone = 1;
+ vprint("copying %s to %s/%s\n", src, localpath, fn);
+ /* local repository, just copy the file */
+ if(copyfile(src, output)) {
+ fprintf(stderr, "failed copying %s\n", src);
+ } else {
+ filedone = 1;
+ }
}
- }
- if(filedone) {
- char completefile[PATH_MAX];
- if(!strcmp(server->protocol, "file")) {
- char out[56];
- printf(" %s [", sync_fnm);
- strncpy(out, server->path, 33);
- printf("%s", out);
- for(j = strlen(out); j < maxcols-64; j++) {
- printf(" ");
+ if(filedone) {
+ char completefile[PATH_MAX];
+ if(!strcmp(server->protocol, "file")) {
+ char out[56];
+ printf(" %s [", sync_fnm);
+ strncpy(out, server->path, 33);
+ printf("%s", out);
+ for(j = strlen(out); j < maxcols-64; j++) {
+ printf(" ");
+ }
+ fputs("] 100% | LOCAL |", stdout);
+ } else {
+ log_progress(control, fsz-offset, &fsz);
}
- fputs("] 100% | LOCAL |", stdout);
- } else {
- log_progress(control, fsz-offset, &fsz);
+ complete = list_add(complete, fn);
+ /* rename "output.part" file to "output" file */
+ snprintf(completefile, PATH_MAX, "%s/%s", localpath, fn);
+ rename(output, completefile);
}
- complete = list_add(complete, fn);
- /* rename "output.part" file to "output" file */
- snprintf(completefile, PATH_MAX, "%s/%s", localpath, fn);
- rename(output, completefile);
+ printf("\n");
+ fflush(stdout);
}
- printf("\n");
- fflush(stdout);
}
- if(list_count(complete) == list_count(files)) {
- done = 1;
+ if(!pmo_xfercommand) {
+ if(!strcmp(server->protocol, "ftp")) {
+ FtpQuit(control);
+ } else if(!strcmp(server->protocol, "http")) {
+ HttpQuit(control);
+ }
}
- if(!strcmp(server->protocol, "ftp")) {
- FtpQuit(control);
- } else if(!strcmp(server->protocol, "http")) {
- HttpQuit(control);
+ if(list_count(complete) == list_count(files)) {
+ done = 1;
}
}
diff --git a/src/pacsync.h b/src/pacsync.h
index 9a4846e2..775fa658 100644
--- a/src/pacsync.h
+++ b/src/pacsync.h
@@ -48,7 +48,7 @@ typedef struct __syncpkg_t {
} syncpkg_t;
int sync_synctree();
-int downloadfiles(PMList *servers, char *localpath, PMList *files);
+int downloadfiles(PMList *servers, const char *localpath, PMList *files);
syncpkg_t* find_pkginsync(char *needle, PMList *haystack);
#endif