diff options
author | Aaron Griffin <aaronmgriffin@gmail.com> | 2008-04-08 05:52:55 +0200 |
---|---|---|
committer | Aaron Griffin <aaronmgriffin@gmail.com> | 2008-04-08 05:52:55 +0200 |
commit | 4bc106f1a60a6172b707bfaee5aff5030234466e (patch) | |
tree | 95e3ff179162665274e0ca212b9b4c8f258201c1 | |
parent | 29a77254307e42ba0959fe7660aa3d0d76285e6a (diff) | |
download | dbscripts-4bc106f1a60a6172b707bfaee5aff5030234466e.tar.gz dbscripts-4bc106f1a60a6172b707bfaee5aff5030234466e.tar.xz |
Giant SVN changes
Some other fun stuff snuck in there, like indentation changes,
but this should get us mostly working under SVN, and at least
allow us to build DB files at the very least
Signed-off-by: Aaron Griffin <aaronmgriffin@gmail.com>
-rwxr-xr-x | db-core | 12 | ||||
-rwxr-xr-x | db-core64 | 15 | ||||
-rwxr-xr-x | db-extra | 12 | ||||
-rwxr-xr-x | db-extra64 | 15 | ||||
-rw-r--r-- | db-inc | 198 | ||||
-rwxr-xr-x | db-testing | 50 | ||||
-rwxr-xr-x | db-testing64 | 53 | ||||
-rwxr-xr-x | db-unstable | 11 | ||||
-rwxr-xr-x | db-unstable64 | 14 | ||||
-rwxr-xr-x | pkgdb1 | 101 | ||||
-rw-r--r-- | pkgdb2-add.c | 464 | ||||
-rwxr-xr-x | updatesync-many | 16 |
12 files changed, 401 insertions, 560 deletions
@@ -4,14 +4,4 @@ repoid=5 reponame="core" arch="i686" -ftppath="/home/ftp/core/os/i686" - -cvspath="/home/cvs-core" -cvsmod="core" -cvstag="CURRENT" - -stagedir="$HOME/staging/core" - -# call the real script -. `dirname $0`/db-inc - +. $(dirname $0)/db-inc @@ -1,20 +1,7 @@ #!/bin/bash -export CARCH=x86_64 - repoid=5 reponame="core" arch="x86_64" -ftppath="/home/ftp/core/os/x86_64" - -cvspath="/home/cvs-core" -cvsmod="core" -cvstag="CURRENT-64" - -#stagedir="/home/arch64/staging/extra" -stagedir="$HOME/staging/core64" - -# call the real script -. `dirname $0`/db-inc - +. $(dirname $0)/db-inc @@ -4,14 +4,4 @@ repoid=2 reponame="extra" arch="i686" -ftppath="/home/ftp/extra/os/i686" - -cvspath="/home/cvs-extra" -cvsmod="extra" -cvstag="CURRENT" - -stagedir="$HOME/staging/extra" - -# call the real script -. `dirname $0`/db-inc - +. $(dirname $0)/db-inc @@ -1,20 +1,7 @@ #!/bin/bash -export CARCH=x86_64 - repoid=2 reponame="extra" arch="x86_64" -ftppath="/home/ftp/extra/os/x86_64" - -cvspath="/home/cvs-extra" -cvsmod="extra" -cvstag="CURRENT-64" - -#stagedir="/home/arch64/staging/extra" -stagedir="$HOME/staging/extra64" - -# call the real script -. `dirname $0`/db-inc - +. $(dirname $0)/db-inc @@ -1,53 +1,69 @@ -[ "$UID" = "" ] && UID=`uid` -TMPDIR="/tmp/archpkg.$arch.$repoid.$UID" +#!/bin/bash # where are the arch scripts located? ARCHDIR="/arch" +#All this fun stuff used to be in the db-(whatever) files +# Let's make it cleaner +export CARCH="$arch" +ftppath="/home/ftp/$reponame/os/$arch/" +svnpath="/home/svn-packages" +svnrepo="$reponame-$arch" + +#Hacky for now +if [ "$arch" = "x86_64" ]; then + stagedir="$HOME/staging/$reponame-64" +else + stagedir="$HOME/staging/$reponame" +fi + +[ "$UID" = "" ] && UID=$(uid) +TMPDIR="/tmp/archpkg.$arch.$repoid.$UID" + if [ ! `type -p fakeroot` ]; then - echo "error: fakeroot is missing" >&2 - exit 1 + echo "error: fakeroot is missing" >&2 + exit 1 fi if [ ! -d $stagedir ]; then - echo "error: staging directory missing: $stagedir" >&2 - exit 1 + echo "error: staging directory missing: $stagedir" >&2 + exit 1 fi # Get the package name from the filename # hackish, but should work for now getpkgname() { - local tmp + local tmp - tmp=${1##*/} - tmp=${tmp%.pkg.tar.gz} - tmp=${tmp%-i686} - tmp=${tmp%-x86_64} - echo ${tmp%-*-*} + tmp=${1##*/} + tmp=${tmp%.pkg.tar.gz} + tmp=${tmp%-i686} + tmp=${tmp%-x86_64} + echo ${tmp%-*-*} } cleanup() { - rm -rf $TMPDIR - # unlock - rm -f /tmp/.repolck.$arch.$repoid - [ "$1" ] && exit $1 + rm -rf $TMPDIR + # unlock + rm -f /tmp/.repolck.$arch.$repoid + [ "$1" ] && exit $1 } ctrl_c() { - echo "Interrupted" >&2 - cleanup 0 + echo "Interrupted" >&2 + cleanup 0 } die() { - echo "$*" >&2 - cleanup 1 + echo "$*" >&2 + cleanup 1 } # check for locks if [ -f /tmp/.repolck.$arch.$repoid ]; then - owner=`/bin/ls -l /tmp/.repolck.$arch.$repoid | awk '{print $3}'` - echo "error: db generation is already in progress (started by $owner)" - exit 1 + owner=`/bin/ls -l /tmp/.repolck.$arch.$repoid | awk '{print $3}'` + echo "error: db generation is already in progress (started by $owner)" + exit 1 fi # catch ^C breaks @@ -57,30 +73,30 @@ touch /tmp/.repolck.$arch.$repoid # RedHat's mktemp is broken... if [ -d $TMPDIR ]; then - echo "==> Removing old temp dir..." >&2 - rm -rf $TMPDIR || exit 1 + echo "==> Removing old temp dir..." >&2 + rm -rf $TMPDIR || exit 1 fi mkdir $TMPDIR; [ $? -gt 0 ] && exit 1 cd $TMPDIR -# Checkout the CVS module if we need to +# Checkout the SVN module if we need to updatelists= if [ "`ls $stagedir/add 2>/dev/null`" -o "`ls $stagedir/del 2>/dev/null`" ]; then - # if $cvsdir is set, then use that instead of doing our own cvs checkout - if [ "$cvsdir" ]; then - mv $cvsdir $TMPDIR/$cvsmod - else - echo "==> Checking out module: $cvsmod ($cvstag)" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:$cvspath cvs -q export -r $cvstag $cvsmod - if [ $? -gt 0 ]; then - die "==> CVS export failed!" + # if $svndir is set, then use that instead of doing our own cvs checkout + if [ "$svndir" ]; then + mv $svndir $TMPDIR/checkout + else + echo "==> Checking out repo: $svnrepo ($arch)" + svn export file://$svnpath $TMPDIR/checkout + if [ $? -gt 0 ]; then + die "==> SVN export failed!" + fi fi - fi - updatelists=1 + updatelists=1 else - echo "No files to process" - cleanup 0 + echo "No files to process" + cleanup 0 fi # Right-O, now we look through the "add" and "del" subdirectories of @@ -92,83 +108,75 @@ fi # one db-* invocation, but it's not a huge performance hit. if [ -d $stagedir/add -a "`ls $stagedir/add`" ]; then - cd $TMPDIR - echo "==> Processing new/updated packages for repository '$reponame'..." >&2 + cd $TMPDIR + echo "==> Processing new/updated packages for repository '$reponame'..." >&2 - # copy the db file into our working area - cp $ftppath/$reponame.db.tar.gz . + # copy the db file into our working area + cp $ftppath/$reponame.db.tar.gz . - cd $stagedir/add - # run it thru fakeroot make sure everything is owned by root.root - echo "$ARCHDIR/updatesync-many add $TMPDIR/$reponame.db.tar.gz $TMPDIR/$cvsmod" \ + cd $stagedir/add + # run it thru fakeroot make sure everything is owned by root.root + echo "$ARCHDIR/updatesync-many add $TMPDIR/$reponame.db.tar.gz $TMPDIR/checkout $svnrepo" \ | fakeroot - if [ $? -ne 0 ]; then - die "==> Error returned from updatesync-many" - fi + if [ $? -ne 0 ]; then + die "==> Error returned from updatesync-many" + fi - cp $TMPDIR/$reponame.db.tar.gz $ftppath + cp $TMPDIR/$reponame.db.tar.gz $ftppath - # only for i686 (for now) - if [ "$arch" = "i686" ]; then - echo "==> Scanning for New/Updated packages..." >&2 - cd $stagedir/add - $ARCHDIR/pkgdb1 $TMPDIR/$cvsmod | $ARCHDIR/pkgdb2-add $repoid $stagedir/add - fi + # only for i686 (for now) + if [ "$arch" = "i686" ]; then + echo "==> Scanning for New/Updated packages..." >&2 + cd $stagedir/add + $ARCHDIR/pkgdb1 $TMPDIR/checkout $svnrepo | $ARCHDIR/pkgdb2-add $repoid $stagedir/add + fi - # move the package files into the ftp directory - mv -f $stagedir/add/*.pkg.tar.gz $ftppath + # move the package files into the ftp directory + mv -f $stagedir/add/*.pkg.tar.gz $ftppath fi - + if [ -d $stagedir/del -a "`ls $stagedir/del`" ]; then - cd $TMPDIR - echo "==> Processing deleted packages for repository '$reponame'..." >&2 + cd $TMPDIR + echo "==> Processing deleted packages for repository '$reponame'..." >&2 - # copy the db file into our working area - cp $ftppath/$reponame.db.tar.gz . + # copy the db file into our working area + cp $ftppath/$reponame.db.tar.gz . - cd $stagedir/del - # run it thru fakeroot make sure everything is owned by root.root - echo "$ARCHDIR/updatesync-many del $TMPDIR/$reponame.db.tar.gz NOT-USED" \ + cd $stagedir/del + # run it thru fakeroot make sure everything is owned by root.root + echo "$ARCHDIR/updatesync-many del $TMPDIR/$reponame.db.tar.gz NOT-USED ZOMGWOO" \ | fakeroot - if [ $? -ne 0 ]; then - die "==> Error returned from updatesync-many" - fi + if [ $? -ne 0 ]; then + die "==> Error returned from updatesync-many" + fi - cp $TMPDIR/$reponame.db.tar.gz $ftppath + cp $TMPDIR/$reponame.db.tar.gz $ftppath + + # only for i686 (for now) + if [ "$arch" = "i686" ]; then + echo "==> Scanning for Deleted packages..." >&2 + cd $stagedir/del + ( + for i in *.pkg.tar.gz; do + pkgname=$(getpkgname $i) + echo $pkgname + done + ) | $ARCHDIR/pkgdb2-del $repoid $stagedir/del + fi - # only for i686 (for now) - if [ "$arch" = "i686" ]; then - echo "==> Scanning for Deleted packages..." >&2 - cd $stagedir/del - ( - for i in *.pkg.tar.gz; do - pkgname=$(getpkgname $i) - echo $pkgname - done - ) | $ARCHDIR/pkgdb2-del $repoid $stagedir/del - fi - - # remove the package files - rm -f $stagedir/del/*.pkg.tar.gz + # remove the package files + rm -f $stagedir/del/*.pkg.tar.gz fi if [ "$updatelists" ]; then - echo "==> Generating Text Package List..." >&2 - cd $TMPDIR/$cvsmod - $ARCHDIR/genpkglist $ftppath $arch - - # hack -- only Current's packages.txt goes in a "setup" subdir - if [ "$reponame" = "current" ]; then - mv packages.txt $ftppath/setup/packages.txt - else + echo "==> Generating Text Package List..." >&2 + cd $TMPDIR/checkout + $ARCHDIR/genpkglist $ftppath $arch mv packages.txt $ftppath/packages.txt - fi fi -$ARCHDIR/ftpdir-cleanup $reponame $arch - cleanup -# vim: set ts=2 noet ft=sh: +# vim: set ts=4 sw=4 noet ft=sh: @@ -4,52 +4,4 @@ repoid=4 reponame="testing" arch="i686" -ftppath="/home/ftp/testing/os/i686" - -cvspath="/home/cvs-arch" -cvsmod="testing" -cvstag="TESTING" - -stagedir="$HOME/staging/testing" - -testing_cleanup() { - rm -rf $testingtmp -} - -# catch ^C -trap testing_cleanup SIGINT - -if [ -f /tmp/.repolck.$arch.$repoid ]; then - owner=`/bin/ls -l /tmp/.repolck.$arch.$repoid | awk '{print $3}'` - echo "error: db generation is already in progress (started by $owner)" - exit 1 -fi -# lock -touch /tmp/.repolck.$arch.$repoid - -# testing is special, so we have to do the CVS checkout stuff here -testingtmp=$(mktemp -d /tmp/testtmp-XXXXXX) || exit 1 -cvsdir=$testingtmp/testing -mkdir $cvsdir -cd $cvsdir - -if [ "`ls $stagedir/add`" -o "`ls $stagedir/del`" ]; then - echo "==> Checking out TESTING branches..." >&2 - echo "--- CORE ---" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:/home/cvs-core cvs -q export -r TESTING core - cp -a core/* . && rm -rf core - echo "--- EXTRA ---" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:/home/cvs-extra cvs -q export -r TESTING extra - cp -a extra/* . && rm -rf extra - echo "--- UNSTABLE ---" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:/home/cvs-unstable cvs -q export -r TESTING unstable - cp -a unstable/* . && rm -rf unstable -fi - -# unlock -rm -f /tmp/.repolck.$arch.$repoid - -# call the real script -. `dirname $0`/db-inc - -testing_cleanup +. $(dirname $0)/db-inc diff --git a/db-testing64 b/db-testing64 index 8b306ac..c34355b 100755 --- a/db-testing64 +++ b/db-testing64 @@ -1,58 +1,7 @@ #!/bin/bash -export CARCH=x86_64 - repoid=4 reponame="testing" arch="x86_64" -ftppath="/home/ftp/testing/os/x86_64" - -cvspath="/home/cvs-arch" -cvsmod="testing" -cvstag="TESTING-64" - -#stagedir="/home/arch64/staging/testing" -stagedir="$HOME/staging/testing64" - -testing_cleanup() { - rm -rf $testingtmp -} - -# catch ^C -trap testing_cleanup SIGINT - -if [ -f /tmp/.repolck.$arch.$repoid ]; then - owner=`/bin/ls -l /tmp/.repolck.$arch.$repoid | awk '{print $3}'` - echo "error: db generation is already in progress (started by $owner)" - exit 1 -fi -# lock -touch /tmp/.repolck.$arch.$repoid - -# testing is special, so we have to do the CVS checkout stuff here -testingtmp=$(mktemp -d /tmp/testtmp-XXXXXX) || exit 1 -cvsdir=$testingtmp/testing -mkdir $cvsdir -cd $cvsdir - -if [ "`ls $stagedir/add`" -o "`ls $stagedir/del`" ]; then - echo "==> Checking out TESTING-64 branches..." >&2 - echo "--- CORE ---" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:/home/cvs-core cvs -q export -r TESTING-64 core - cp -a core/* . && rm -rf core - echo "--- EXTRA ---" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:/home/cvs-extra cvs -q export -r TESTING-64 extra - cp -a extra/* . && rm -rf extra - echo "--- UNSTABLE ---" - CVS_RSH=ssh CVSROOT=:ext:cvs.archlinux.org:/home/cvs-unstable cvs -q export -r TESTING-64 unstable - cp -a unstable/* . && rm -rf unstable -fi - -# unlock -rm -f /tmp/.repolck.$arch.$repoid - -# call the real script -. `dirname $0`/db-inc - -testing_cleanup +. $(dirname $0)/db-inc diff --git a/db-unstable b/db-unstable index 0b4c524..b0231b4 100755 --- a/db-unstable +++ b/db-unstable @@ -4,14 +4,5 @@ repoid=3 reponame="unstable" arch="i686" -ftppath="/home/ftp/unstable/os/i686" - -cvspath="/home/cvs-unstable" -cvsmod="unstable" -cvstag="CURRENT" - -stagedir="$HOME/staging/unstable" - -# call the real script -. `dirname $0`/db-inc +. $(dirname $0)/db-inc diff --git a/db-unstable64 b/db-unstable64 index dece93a..225cf97 100755 --- a/db-unstable64 +++ b/db-unstable64 @@ -1,20 +1,8 @@ #!/bin/bash -export CARCH=x86_64 - repoid=3 reponame="unstable" arch="x86_64" -ftppath="/home/ftp/unstable/os/x86_64" - -cvspath="/home/cvs-unstable" -cvsmod="unstable" -cvstag="CURRENT-64" - -#stagedir="/home/arch64/staging/unstable" -stagedir="$HOME/staging/unstable64" - -# call the real script -. `dirname $0`/db-inc +. $(dirname $0)/db-inc @@ -3,66 +3,67 @@ # Get the package name from the filename # hackish, but should work for now getpkgname() { - local tmp + local tmp - tmp=${1##*/} - tmp=${tmp%.pkg.tar.gz} - tmp=${tmp%-i686} - tmp=${tmp%-x86_64} - echo ${tmp%-*-*} + tmp=${1##*/} + tmp=${tmp%.pkg.tar.gz} + tmp=${tmp%-i686} + tmp=${tmp%-x86_64} + echo ${tmp%-*-*} } -STAGEDIR=`pwd` +STAGEDIR=$(pwd) -ABSDIR=$1 -if [ "$ABSDIR" = "" ]; then - me=`basename $0` - echo "usage: $me <abs_dir>" >&2 - exit 1 +#This is our "unknown" category. We will have to do something about this later. +# It is a stop-gap +CATEGORY=25 + +SVNCO=$1 +REPOTAG=$2 +if [ "$SVNCO" = "" ]; then + me=$(basename $0) + echo "usage: $me <abs_dir> <repo_tag>" >&2 + exit 1 fi -if [ ! "`ls $STAGEDIR/*.pkg.tar.gz 2>/dev/null`" ]; then - exit +if [ ! "$(ls $STAGEDIR/*.pkg.tar.gz 2>/dev/null)" ]; then + exit fi cd $STAGEDIR -for pkgfile in `ls $STAGEDIR/*.pkg.tar.gz`; do - pkgname=$(getpkgname $pkgfile); - fullname=$(basename $pkgfile) - # find the matching PKGBUILD - tmpf=$(mktemp /tmp/pkgdb1.XXXXXXXXXX) || exit 1 - find $ABSDIR -type d -name "$pkgname" >$tmpf - if [ "`cat $tmpf | wc -l`" != "1" ]; then - echo "WARNING: could not find PKGBUILD for $pkgname, cannot update this entry" >&2 - rm $tmpf - continue - fi - pkgbuild="`cat $tmpf`/PKGBUILD" - rm $tmpf - if [ ! -f $pkgbuild ]; then - echo "WARNING: could not find PKGBUILD for $fullname, cannot update this entry" >&2 - continue - fi - # pick out the category from the pathname - catpath=$(cd `dirname $pkgbuild`/.. && pwd) - category=${catpath##*/} - # now read the PKGBUILD and output the data for pkgdb2 - unset pkgname pkgver pkgrel pkgdesc license groups provides md5sums force - unset replaces depends conflicts backup source install build makedepends - unset options - source $pkgbuild || continue +for pkgfile in $STAGEDIR/*.pkg.tar.gz; do + pkgname=$(getpkgname $pkgfile); + fullname=$(basename $pkgfile) + pkgpath="$SVNCO/$pkgname/repos/$REPOTAG" + + # find the matching PKGBUILD + if [ ! -d "$pkgpath" ]; then + msg "WARNING: could not find PKGBUILD for $pkgname, cannot update this entry" + return + fi + pkgbuild="${pkgpath}/PKGBUILD" + if [ ! -f $pkgbuild ]; then + msg "WARNING: could not find PKGBUILD for $fullname, cannot update this entry" + return + fi + + # pick out the category from the pathname + unset pkgname pkgver pkgrel pkgdesc license groups provides md5sums force + unset replaces depends conflicts backup source install build makedepends + unset options + source $pkgbuild || continue - deplist=${depends[@]} - sources=${source[@]} - echo $fullname - echo $pkgname - echo $pkgver - echo $pkgrel - echo $pkgdesc - echo $category - echo $url - echo $sources - echo $deplist + deplist=${depends[@]} + sources=${source[@]} + echo $fullname + echo $pkgname + echo $pkgver + echo $pkgrel + echo $pkgdesc + echo $CATEGORY + echo $url + echo $sources + echo $deplist done exit 0 diff --git a/pkgdb2-add.c b/pkgdb2-add.c index 292d0dc..32f2d83 100644 --- a/pkgdb2-add.c +++ b/pkgdb2-add.c @@ -1,6 +1,6 @@ #include <stdio.h> #include <stdlib.h> -#include <mysql.h> +#include <mysql/mysql.h> #include <string.h> #include <limits.h> @@ -9,60 +9,60 @@ #define DB_PASS "passwords-are-cool" typedef struct pkg { - unsigned int id; - char *name; - char *ver; - char *rel; - struct pkg *next; + unsigned int id; + char *name; + char *ver; + char *rel; + struct pkg *next; } pkg_t; MYSQL_RES *doquery(MYSQL *m, const char* q) { - MYSQL_RES *res; - if(mysql_query(m, q)) { - fprintf(stderr, "mysql_query: %s\n", mysql_error(m)); - return(NULL); - } - res = mysql_store_result(m); - return(res); + MYSQL_RES *res; + if(mysql_query(m, q)) { + fprintf(stderr, "mysql_query: %s\n", mysql_error(m)); + return(NULL); + } + res = mysql_store_result(m); + return(res); } /* this function is ugly -- it malloc's for each string it * returns, and they probably won't be freed by the caller. */ char* addslashes(const char *s) { - char slashed[8192]; - char *p; - - slashed[0] = '\0'; - p = slashed; - while(*s) { - if(*s == '\'' || *s == '"' || *s == '\\') { - *p++ = '\\'; - } - *p++ = *s++; - } - *p = '\0'; - return(strdup(slashed)); + char slashed[8192]; + char *p; + + slashed[0] = '\0'; + p = slashed; + while(*s) { + if(*s == '\'' || *s == '"' || *s == '\\') { + *p++ = '\\'; + } + *p++ = *s++; + } + *p = '\0'; + return(strdup(slashed)); } char* trim(char *str) { - char *pch = str; - while(isspace(*pch)) { - pch++; - } - if(pch != str) { - memmove(str, pch, (strlen(pch) + 1)); - } - - pch = (char*)(str + (strlen(str) - 1)); - while(isspace(*pch)) { - pch--; - } - *++pch = '\0'; + char *pch = str; + while(isspace(*pch)) { + pch++; + } + if(pch != str) { + memmove(str, pch, (strlen(pch) + 1)); + } + + pch = (char*)(str + (strlen(str) - 1)); + while(isspace(*pch)) { + pch--; + } + *++pch = '\0'; - return str; + return str; } /* scan a .pkg.tar.gz file and put all files listed into the database. @@ -72,205 +72,205 @@ char* trim(char *str) */ void updatefilelist(MYSQL *db, unsigned long id, char *fn) { - FILE *fp; - char *tmp; - char cmd[PATH_MAX]; - char line[PATH_MAX]; - char query[PATH_MAX]; + FILE *fp; + char *tmp; + char cmd[PATH_MAX]; + char line[PATH_MAX]; + char query[PATH_MAX]; - tmp = tempnam("/tmp", "pkgdb"); - snprintf(cmd, PATH_MAX-1, "/bin/tar tzvf %s | awk '{print $6}' >%s", fn, tmp); - system(cmd); - fp = fopen(tmp, "r"); - if(fp == NULL) { - fprintf(stderr, "pkgdb2-add: could not open tempfile: %s\n", tmp); - return; - } - snprintf(query, sizeof(query), "DELETE FROM packages_files WHERE pkg_id='%lu'", id); - doquery(db, query); - while(fgets(line, sizeof(line)-1, fp)) { - char *fixedfn = addslashes(trim(line)); - if(!strcmp(fixedfn, ".FILELIST") || !strcmp(fixedfn, ".PKGINFO") || !strcmp(fixedfn, ".INSTALL")) { - free(fixedfn); - continue; - } - /* varchars aren't case-sensitive but filesystems are, so we use REPLACE INTO */ - snprintf(query, sizeof(query), "REPLACE INTO packages_files (pkg_id,path) VALUES " - "('%lu', '%s')", id, fixedfn); - free(fixedfn); - doquery(db, query); - } - fclose(fp); - unlink(tmp); + tmp = tempnam("/tmp", "pkgdb"); + snprintf(cmd, PATH_MAX-1, "/bin/tar tzvf %s | awk '{print $6}' >%s", fn, tmp); + system(cmd); + fp = fopen(tmp, "r"); + if(fp == NULL) { + fprintf(stderr, "pkgdb2-add: could not open tempfile: %s\n", tmp); + return; + } + snprintf(query, sizeof(query), "DELETE FROM packages_files WHERE pkg_id='%lu'", id); + doquery(db, query); + while(fgets(line, sizeof(line)-1, fp)) { + char *fixedfn = addslashes(trim(line)); + if(!strcmp(fixedfn, ".FILELIST") || !strcmp(fixedfn, ".PKGINFO") || !strcmp(fixedfn, ".INSTALL")) { + free(fixedfn); + continue; + } + /* varchars aren't case-sensitive but filesystems are, so we use REPLACE INTO */ + snprintf(query, sizeof(query), "REPLACE INTO packages_files (pkg_id,path) VALUES " + "('%lu', '%s')", id, fixedfn); + free(fixedfn); + doquery(db, query); + } + fclose(fp); + unlink(tmp); } int main(int argc, char **argv) { - MYSQL db; - MYSQL_RES *result; - MYSQL_ROW row; - char query[4096]; - char fn[PATH_MAX]; - char ftppath[PATH_MAX]; - int repoid; - pkg_t *dblist = NULL; - pkg_t *pkglist = NULL; - pkg_t *pkgptr, *ptr; + MYSQL db; + MYSQL_RES *result; + MYSQL_ROW row; + char query[4096]; + char fn[PATH_MAX]; + char ftppath[PATH_MAX]; + int repoid; + pkg_t *dblist = NULL; + pkg_t *pkglist = NULL; + pkg_t *pkgptr, *ptr; - if(argc < 3) { - printf("usage: pkgdb2-add <repoid> <ftp_repo_root>\n"); - printf("\nWARNING: Do not run this manually! It is intended to be run from\n" - "the Arch db-generation scripts.\n\n"); - return(1); - } - repoid = atoi(argv[1]); - strncpy(ftppath, argv[2], PATH_MAX-1); + if(argc < 3) { + printf("usage: pkgdb2-add <repoid> <ftp_repo_root>\n"); + printf("\nWARNING: Do not run this manually! It is intended to be run from\n" + "the Arch db-generation scripts.\n\n"); + return(1); + } + repoid = atoi(argv[1]); + strncpy(ftppath, argv[2], PATH_MAX-1); - if(mysql_init(&db) == NULL) { - fprintf(stderr, "could not initialize\n"); - return(1); - } - if(mysql_real_connect(&db, "localhost", DB_USER, DB_PASS, DB_NAME, - 0, NULL, 0) == NULL) { - fprintf(stderr, "failed to connect to database: %s\n", mysql_error(&db)); - return(1); - } - snprintf(query, sizeof(query), "SELECT id,pkgname,pkgver,pkgrel FROM packages " - "WHERE repo_id='%d'", repoid); - result = doquery(&db, query); - while(row = mysql_fetch_row(result)) { - int i; - /*unsigned long *lengths; - lengths = mysql_fetch_lengths(result);*/ - /* add the node to the list */ - if(dblist == NULL) { - dblist = (pkg_t*)malloc(sizeof(pkg_t)); - if(dblist == NULL) { - fprintf(stderr, "error: out of memory!\n"); - return(1); - } - ptr = dblist; - } else { - ptr->next = (pkg_t*)malloc(sizeof(pkg_t)); - if(ptr->next == NULL) { - fprintf(stderr, "error: out of memory!\n"); - return(1); - } - ptr = ptr->next; - } - ptr->next = NULL; - /* pick out the fields */ - ptr->id = atoi(row[0]); - ptr->name = strdup(row[1]); - ptr->ver = strdup(row[2]); - ptr->rel = strdup(row[3]); - } - mysql_free_result(result); + if(mysql_init(&db) == NULL) { + fprintf(stderr, "could not initialize\n"); + return(1); + } + if(mysql_real_connect(&db, "localhost", DB_USER, DB_PASS, DB_NAME, + 0, NULL, 0) == NULL) { + fprintf(stderr, "failed to connect to database: %s\n", mysql_error(&db)); + return(1); + } + snprintf(query, sizeof(query), "SELECT id,pkgname,pkgver,pkgrel FROM packages " + "WHERE repo_id='%d'", repoid); + result = doquery(&db, query); + while(row = mysql_fetch_row(result)) { + int i; + /*unsigned long *lengths; + lengths = mysql_fetch_lengths(result);*/ + /* add the node to the list */ + if(dblist == NULL) { + dblist = (pkg_t*)malloc(sizeof(pkg_t)); + if(dblist == NULL) { + fprintf(stderr, "error: out of memory!\n"); + return(1); + } + ptr = dblist; + } else { + ptr->next = (pkg_t*)malloc(sizeof(pkg_t)); + if(ptr->next == NULL) { + fprintf(stderr, "error: out of memory!\n"); + return(1); + } + ptr = ptr->next; + } + ptr->next = NULL; + /* pick out the fields */ + ptr->id = atoi(row[0]); + ptr->name = strdup(row[1]); + ptr->ver = strdup(row[2]); + ptr->rel = strdup(row[3]); + } + mysql_free_result(result); - while(!feof(stdin)) { - int found = 0; - unsigned int catid = 0; - char name[256], ver[256], rel[256], desc[4096]; - char cat[256], url[256], sources[4096], deplist[4096]; - char pkgfile[4096]; - /* get package data from stdin */ - fgets(pkgfile, 4096, stdin); trim(pkgfile); if(feof(stdin)) continue; - fgets(name, 256, stdin); trim(name); if(feof(stdin)) continue; - fgets(ver, 256, stdin); trim(ver); if(feof(stdin)) continue; - fgets(rel, 256, stdin); trim(rel); if(feof(stdin)) continue; - fgets(desc, 4096, stdin); trim(desc); if(feof(stdin)) continue; - fgets(cat, 256, stdin); trim(cat); if(feof(stdin)) continue; - fgets(url, 256, stdin); trim(url); if(feof(stdin)) continue; - fgets(sources, 4096, stdin); trim(sources); if(feof(stdin)) continue; - fgets(deplist, 4096, stdin); trim(deplist); if(feof(stdin)) continue; - /* check for overruns */ - if(strlen(name) > 254 || strlen(ver) >= 254 || strlen(rel) > 254 || - strlen(desc) > 4094 || strlen(cat) >= 254 || strlen(url) > 254 || - strlen(sources) > 4094 || strlen(deplist) > 4094 || strlen(pkgfile) > 4094) { - fprintf(stderr, "pkgdb2-add: one or more fields are too long in package '%s'\n", name); - fprintf(stderr, "pkgdb2-add: check the lengths of your strings, most are limited " - "to 255 chars, some are 4095\n"); - return(1); - } - /* add the node to the list */ - if(pkglist == NULL) { - pkglist = (pkg_t*)malloc(sizeof(pkg_t)); - if(pkglist == NULL) { - fprintf(stderr, "error: out of memory!\n"); - return(1); - } - pkgptr = pkglist; - } else { - pkgptr->next = (pkg_t*)malloc(sizeof(pkg_t)); - if(pkgptr->next == NULL) { - fprintf(stderr, "error: out of memory!\n"); - return(1); - } - pkgptr = pkgptr->next; - } - pkgptr->next = NULL; - pkgptr->name = strdup(name); - /* look it up in our cache */ - for(ptr = dblist; ptr; ptr = ptr->next) { - if(!strcmp(name, ptr->name)) { - found = 1; - break; - } - } - /* get the category */ - snprintf(query, sizeof(query), - "SELECT id FROM categories WHERE category='%s'", cat); - result = doquery(&db, query); - if(mysql_num_rows(result) == 0) { - fprintf(stderr, "pkgdb2-add: no db category found for '%s'\n", cat); - /* - snprintf(query, sizeof(query), "INSERT INTO categories (id,category) " - " VALUES (NULL,'%s')", addslashes(cat)); - doquery(&db, query); - catid = (unsigned int)mysql_insert_id(&db); - */ - } else { - row = mysql_fetch_row(result); - catid = (unsigned int)atoi(row[0]); - } - if(!found) { - /* Insert... */ - unsigned long id; - fprintf(stderr, "pkgdb2-add: inserting %s\n", name); - snprintf(query, sizeof(query), "INSERT INTO packages (id,repo_id," - "category_id,pkgname,pkgver,pkgrel,pkgdesc,url,sources,depends," - "last_update) VALUES (NULL,'%d','%d','%s','%s','%s','%s'," - "'%s','%s','%s',NOW())", - repoid, catid, addslashes(name), addslashes(ver), addslashes(rel), - addslashes(desc), addslashes(url), addslashes(sources), - addslashes(deplist)); - doquery(&db, query); - id = mysql_insert_id(&db); - snprintf(fn, PATH_MAX-1, "%s/%s", ftppath, pkgfile); - updatefilelist(&db, id, fn); - continue; - } else if(strcmp(ptr->ver, ver) || strcmp(ptr->rel, rel)) { - /* ...or Update */ - fprintf(stderr, "pkgdb2-add: updating %s (%s-%s ==> %s-%s)\n", - ptr->name, ptr->ver, ptr->rel, ver, rel); - snprintf(query, sizeof(query), "UPDATE packages SET category_id='%d'," - "pkgname='%s',pkgver='%s',pkgrel='%s',pkgdesc='%s',url='%s'," - "sources='%s',depends='%s',needupdate=0,last_update=NOW() " - "WHERE id='%d'", - catid, addslashes(name), addslashes(ver), addslashes(rel), - addslashes(desc), addslashes(url), addslashes(sources), - addslashes(deplist), ptr->id); - doquery(&db, query); - snprintf(fn, PATH_MAX-1, "%s/%s", ftppath, pkgfile); - updatefilelist(&db, ptr->id, fn); - /* - snprintf(query, sizeof(query), "UPDATE todolist_pkgs SET complete=1 " - "WHERE pkgid='%d'", ptr->id); - doquery(&db, query); - */ - } - } + while(!feof(stdin)) { + int found = 0; + unsigned int catid = 0; + char name[256], ver[256], rel[256], desc[4096]; + char cat[256], url[256], sources[4096], deplist[4096]; + char pkgfile[4096]; + /* get package data from stdin */ + fgets(pkgfile, 4096, stdin); trim(pkgfile); if(feof(stdin)) continue; + fgets(name, 256, stdin); trim(name); if(feof(stdin)) continue; + fgets(ver, 256, stdin); trim(ver); if(feof(stdin)) continue; + fgets(rel, 256, stdin); trim(rel); if(feof(stdin)) continue; + fgets(desc, 4096, stdin); trim(desc); if(feof(stdin)) continue; + fgets(cat, 256, stdin); trim(cat); if(feof(stdin)) continue; + fgets(url, 256, stdin); trim(url); if(feof(stdin)) continue; + fgets(sources, 4096, stdin); trim(sources); if(feof(stdin)) continue; + fgets(deplist, 4096, stdin); trim(deplist); if(feof(stdin)) continue; + /* check for overruns */ + if(strlen(name) > 254 || strlen(ver) >= 254 || strlen(rel) > 254 || + strlen(desc) > 4094 || strlen(cat) >= 254 || strlen(url) > 254 || + strlen(sources) > 4094 || strlen(deplist) > 4094 || strlen(pkgfile) > 4094) { + fprintf(stderr, "pkgdb2-add: one or more fields are too long in package '%s'\n", name); + fprintf(stderr, "pkgdb2-add: check the lengths of your strings, most are limited " + "to 255 chars, some are 4095\n"); + return(1); + } + /* add the node to the list */ + if(pkglist == NULL) { + pkglist = (pkg_t*)malloc(sizeof(pkg_t)); + if(pkglist == NULL) { + fprintf(stderr, "error: out of memory!\n"); + return(1); + } + pkgptr = pkglist; + } else { + pkgptr->next = (pkg_t*)malloc(sizeof(pkg_t)); + if(pkgptr->next == NULL) { + fprintf(stderr, "error: out of memory!\n"); + return(1); + } + pkgptr = pkgptr->next; + } + pkgptr->next = NULL; + pkgptr->name = strdup(name); + /* look it up in our cache */ + for(ptr = dblist; ptr; ptr = ptr->next) { + if(!strcmp(name, ptr->name)) { + found = 1; + break; + } + } + /* get the category */ + snprintf(query, sizeof(query), + "SELECT id FROM categories WHERE category='%s'", cat); + result = doquery(&db, query); + if(mysql_num_rows(result) == 0) { + fprintf(stderr, "pkgdb2-add: no db category found for '%s'\n", cat); + /* + snprintf(query, sizeof(query), "INSERT INTO categories (id,category) " + " VALUES (NULL,'%s')", addslashes(cat)); + doquery(&db, query); + catid = (unsigned int)mysql_insert_id(&db); + */ + } else { + row = mysql_fetch_row(result); + catid = (unsigned int)atoi(row[0]); + } + if(!found) { + /* Insert... */ + unsigned long id; + fprintf(stderr, "pkgdb2-add: inserting %s\n", name); + snprintf(query, sizeof(query), "INSERT INTO packages (id,repo_id," + "category_id,pkgname,pkgver,pkgrel,pkgdesc,url,sources,depends," + "last_update) VALUES (NULL,'%d','%d','%s','%s','%s','%s'," + "'%s','%s','%s',NOW())", + repoid, catid, addslashes(name), addslashes(ver), addslashes(rel), + addslashes(desc), addslashes(url), addslashes(sources), + addslashes(deplist)); + doquery(&db, query); + id = mysql_insert_id(&db); + snprintf(fn, PATH_MAX-1, "%s/%s", ftppath, pkgfile); + updatefilelist(&db, id, fn); + continue; + } else if(strcmp(ptr->ver, ver) || strcmp(ptr->rel, rel)) { + /* ...or Update */ + fprintf(stderr, "pkgdb2-add: updating %s (%s-%s ==> %s-%s)\n", + ptr->name, ptr->ver, ptr->rel, ver, rel); + snprintf(query, sizeof(query), "UPDATE packages SET " + "pkgname='%s',pkgver='%s',pkgrel='%s',pkgdesc='%s',url='%s'," + "sources='%s',depends='%s',needupdate=0,last_update=NOW() " + "WHERE id='%d'", + addslashes(name), addslashes(ver), addslashes(rel), + addslashes(desc), addslashes(url), addslashes(sources), + addslashes(deplist), ptr->id); + doquery(&db, query); + snprintf(fn, PATH_MAX-1, "%s/%s", ftppath, pkgfile); + updatefilelist(&db, ptr->id, fn); + /* + snprintf(query, sizeof(query), "UPDATE todolist_pkgs SET complete=1 " + "WHERE pkgid='%d'", ptr->id); + doquery(&db, query); + */ + } + } - mysql_close(&db); - return(0); + mysql_close(&db); + return(0); } diff --git a/updatesync-many b/updatesync-many index 40c5e4d..8060585 100755 --- a/updatesync-many +++ b/updatesync-many @@ -23,13 +23,13 @@ usage() { echo "updatesync-many" - echo "usage: $0 <action> <dbfile> <abs_dir>" + echo "usage: $0 <action> <dbfile> <svn_checkout> <repo_tag>" echo echo "This should probably only be run from the Arch db-generation scripts" echo echo "Caveats:" echo " - Make sure you run it from the staging directory" - echo " - Use absolute pathnames for dbfile and abs_dir" + echo " - Use absolute pathnames for dbfile and svn_checkout" echo exit 0 } @@ -167,17 +167,14 @@ update_entry() pkgfile=$1 pkgname=$(getpkgname ${pkgfile}) fullname=$(basename ${pkgfile}) + pkgpath="$SVNCO/$pkgname/repos/$REPOTAG" # find the matching PKGBUILD - tmpf=$(mktemp /tmp/updatesync-many.XXXXXXXXXX) || exit 1 - find $ABSDIR -type d -name "$pkgname" >$tmpf - if [ "`cat $tmpf | wc -l`" != "1" ]; then + if [ ! -d "$pkgpath" ]; then msg "WARNING: could not find PKGBUILD for $pkgname, cannot update this entry" - rm $tmpf return fi - pkgbuild="`cat $tmpf`/PKGBUILD" - rm $tmpf + pkgbuild="${pkgpath}/PKGBUILD" if [ ! -f $pkgbuild ]; then msg "WARNING: could not find PKGBUILD for $fullname, cannot update this entry" return @@ -218,7 +215,8 @@ fi ACTION=$1 PKGDB=$2 -ABSDIR=$3 +SVNCO=$3 +REPOTAG=$4 STAGEDIR="`pwd`" PKGDIR="`dirname $PKGDB`" if [ "$PKGDIR" = "." ]; then |